paredeyes commited on
Commit
a9f38b4
·
verified ·
1 Parent(s): a586b2f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. miniCUDA124/bin/nvjpeg64_12.dll +3 -0
  3. miniCUDA124/include/thrust/system/cuda/detail/async/copy.h +545 -0
  4. miniCUDA124/include/thrust/system/cuda/detail/async/customization.h +135 -0
  5. miniCUDA124/include/thrust/system/cuda/detail/async/exclusive_scan.h +209 -0
  6. miniCUDA124/include/thrust/system/cuda/detail/async/for_each.h +165 -0
  7. miniCUDA124/include/thrust/system/cuda/detail/async/inclusive_scan.h +202 -0
  8. miniCUDA124/include/thrust/system/cuda/detail/async/reduce.h +351 -0
  9. miniCUDA124/include/thrust/system/cuda/detail/async/scan.h +43 -0
  10. miniCUDA124/include/thrust/system/cuda/detail/async/sort.h +525 -0
  11. miniCUDA124/include/thrust/system/cuda/detail/async/transform.h +169 -0
  12. miniCUDA124/include/thrust/system/cuda/detail/core/agent_launcher.h +1172 -0
  13. miniCUDA124/include/thrust/system/cuda/detail/core/alignment.h +258 -0
  14. miniCUDA124/include/thrust/system/cuda/detail/core/triple_chevron_launch.h +160 -0
  15. miniCUDA124/include/thrust/system/cuda/detail/core/util.h +811 -0
  16. miniCUDA124/include/thrust/system/cuda/detail/internal/copy_cross_system.h +252 -0
  17. miniCUDA124/include/thrust/system/cuda/detail/internal/copy_device_to_device.h +114 -0
  18. miniCUDA124/include/thrust/system/cuda/detail/reduce_by_key.h +1217 -0
  19. miniCUDA124/include/thrust/system/cuda/detail/remove.h +142 -0
  20. miniCUDA124/include/thrust/system/cuda/detail/replace.h +221 -0
  21. miniCUDA124/include/thrust/system/cuda/detail/reverse.h +105 -0
  22. miniCUDA124/include/thrust/system/cuda/detail/scan.h +363 -0
  23. miniCUDA124/include/thrust/system/cuda/detail/scan_by_key.h +500 -0
  24. miniCUDA124/include/thrust/system/cuda/detail/scatter.h +114 -0
  25. miniCUDA124/include/thrust/system/cuda/detail/sequence.h +30 -0
  26. miniCUDA124/include/thrust/system/cuda/detail/set_operations.h +1947 -0
  27. miniCUDA124/include/thrust/system/cuda/detail/sort.h +636 -0
  28. miniCUDA124/include/thrust/system/cuda/detail/swap_ranges.h +110 -0
  29. miniCUDA124/include/thrust/system/cuda/detail/tabulate.h +91 -0
  30. miniCUDA124/include/thrust/system/cuda/detail/temporary_buffer.h +30 -0
  31. miniCUDA124/include/thrust/system/cuda/detail/terminate.h +70 -0
  32. miniCUDA124/include/thrust/system/cuda/detail/transform.h +424 -0
  33. miniCUDA124/include/thrust/system/cuda/detail/transform_reduce.h +76 -0
  34. miniCUDA124/include/thrust/system/cuda/detail/transform_scan.h +117 -0
  35. miniCUDA124/include/thrust/system/cuda/detail/uninitialized_copy.h +119 -0
  36. miniCUDA124/include/thrust/system/cuda/detail/uninitialized_fill.h +117 -0
  37. miniCUDA124/include/thrust/system/cuda/detail/unique.h +829 -0
  38. miniCUDA124/include/thrust/system/cuda/detail/unique_by_key.h +927 -0
  39. miniCUDA124/include/thrust/system/cuda/detail/util.h +650 -0
  40. miniCUDA124/include/thrust/system/detail/adl/adjacent_difference.h +52 -0
  41. miniCUDA124/include/thrust/system/detail/adl/assign_value.h +52 -0
  42. miniCUDA124/include/thrust/system/detail/adl/binary_search.h +52 -0
  43. miniCUDA124/include/thrust/system/detail/adl/copy.h +52 -0
  44. miniCUDA124/include/thrust/system/detail/adl/copy_if.h +52 -0
  45. miniCUDA124/include/thrust/system/detail/adl/count.h +52 -0
  46. miniCUDA124/include/thrust/system/detail/adl/equal.h +52 -0
  47. miniCUDA124/include/thrust/system/detail/adl/extrema.h +52 -0
  48. miniCUDA124/include/thrust/system/detail/adl/fill.h +52 -0
  49. miniCUDA124/include/thrust/system/detail/adl/find.h +52 -0
  50. miniCUDA124/include/thrust/system/detail/adl/for_each.h +52 -0
.gitattributes CHANGED
@@ -95,3 +95,4 @@ miniCUDA124/bin/nvprof.exe filter=lfs diff=lfs merge=lfs -text
95
  miniCUDA124/bin/npps64_12.dll filter=lfs diff=lfs merge=lfs -text
96
  miniCUDA124/bin/nvrtc64_120_0.dll filter=lfs diff=lfs merge=lfs -text
97
  miniCUDA124/bin/nvrtc-builtins64_124.dll filter=lfs diff=lfs merge=lfs -text
 
 
95
  miniCUDA124/bin/npps64_12.dll filter=lfs diff=lfs merge=lfs -text
96
  miniCUDA124/bin/nvrtc64_120_0.dll filter=lfs diff=lfs merge=lfs -text
97
  miniCUDA124/bin/nvrtc-builtins64_124.dll filter=lfs diff=lfs merge=lfs -text
98
+ miniCUDA124/bin/nvjpeg64_12.dll filter=lfs diff=lfs merge=lfs -text
miniCUDA124/bin/nvjpeg64_12.dll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae60b58ff90f87e2b5002c6ddc3b7eef0538e0537a9011042303e646175ce7af
3
+ size 4913152
miniCUDA124/include/thrust/system/cuda/detail/async/copy.h ADDED
@@ -0,0 +1,545 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+
28
+ // TODO: Move into system::cuda
29
+
30
+ #pragma once
31
+
32
+ #include <thrust/detail/config.h>
33
+
34
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
35
+ # pragma GCC system_header
36
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
37
+ # pragma clang system_header
38
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
39
+ # pragma system_header
40
+ #endif // no system header
41
+ #include <thrust/detail/cpp14_required.h>
42
+
43
+ #if THRUST_CPP_DIALECT >= 2014
44
+
45
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
46
+
47
+ #include <thrust/system/cuda/config.h>
48
+
49
+ #include <thrust/system/cuda/detail/async/customization.h>
50
+ #include <thrust/system/cuda/detail/async/transform.h>
51
+ #include <thrust/system/cuda/detail/cross_system.h>
52
+ #include <thrust/system/cuda/future.h>
53
+ #include <thrust/iterator/iterator_traits.h>
54
+ #include <thrust/type_traits/logical_metafunctions.h>
55
+ #include <thrust/detail/static_assert.h>
56
+ #include <thrust/type_traits/is_trivially_relocatable.h>
57
+ #include <thrust/type_traits/is_contiguous_iterator.h>
58
+ #include <thrust/distance.h>
59
+ #include <thrust/advance.h>
60
+ #include <thrust/uninitialized_copy.h>
61
+
62
+ #include <type_traits>
63
+
64
+ THRUST_NAMESPACE_BEGIN
65
+
66
+ namespace system { namespace cuda { namespace detail
67
+ {
68
+
69
+ // ContiguousIterator input and output iterators
70
+ // TriviallyCopyable elements
71
+ // Host to device, device to host, device to device
72
+ template <
73
+ typename FromPolicy, typename ToPolicy
74
+ , typename ForwardIt, typename OutputIt, typename Size
75
+ >
76
+ auto async_copy_n(
77
+ FromPolicy& from_exec
78
+ , ToPolicy& to_exec
79
+ , ForwardIt first
80
+ , Size n
81
+ , OutputIt output
82
+ ) ->
83
+ typename std::enable_if<
84
+ is_indirectly_trivially_relocatable_to<ForwardIt, OutputIt>::value
85
+ , unique_eager_event
86
+ >::type
87
+ {
88
+ using T = typename iterator_traits<ForwardIt>::value_type;
89
+
90
+ auto const device_alloc = get_async_device_allocator(
91
+ select_device_system(from_exec, to_exec)
92
+ );
93
+
94
+ using pointer
95
+ = typename thrust::detail::allocator_traits<decltype(device_alloc)>::
96
+ template rebind_traits<void>::pointer;
97
+
98
+ unique_eager_event e;
99
+
100
+ // Set up stream with dependencies.
101
+
102
+ cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(
103
+ select_device_system(from_exec, to_exec)
104
+ );
105
+
106
+ if (thrust::cuda_cub::default_stream() != user_raw_stream)
107
+ {
108
+ e = make_dependent_event(
109
+ std::tuple_cat(
110
+ std::make_tuple(
111
+ unique_stream(nonowning, user_raw_stream)
112
+ )
113
+ , extract_dependencies(
114
+ std::move(thrust::detail::derived_cast(from_exec))
115
+ )
116
+ , extract_dependencies(
117
+ std::move(thrust::detail::derived_cast(to_exec))
118
+ )
119
+ )
120
+ );
121
+ }
122
+ else
123
+ {
124
+ e = make_dependent_event(
125
+ std::tuple_cat(
126
+ extract_dependencies(
127
+ std::move(thrust::detail::derived_cast(from_exec))
128
+ )
129
+ , extract_dependencies(
130
+ std::move(thrust::detail::derived_cast(to_exec))
131
+ )
132
+ )
133
+ );
134
+ }
135
+
136
+ // Run copy.
137
+
138
+ thrust::cuda_cub::throw_on_error(
139
+ cudaMemcpyAsync(
140
+ thrust::raw_pointer_cast(&*output)
141
+ , thrust::raw_pointer_cast(&*first)
142
+ , sizeof(T) * n
143
+ , direction_of_copy(from_exec, to_exec)
144
+ , e.stream().native_handle()
145
+ )
146
+ , "after copy launch"
147
+ );
148
+
149
+ return e;
150
+ }
151
+
152
+ // Non-ContiguousIterator input or output, or non-TriviallyRelocatable value type
153
+ // Device to device
154
+ template <
155
+ typename FromPolicy, typename ToPolicy
156
+ , typename ForwardIt, typename OutputIt, typename Size
157
+ >
158
+ auto async_copy_n(
159
+ thrust::cuda::execution_policy<FromPolicy>& from_exec
160
+ , thrust::cuda::execution_policy<ToPolicy>& to_exec
161
+ , ForwardIt first
162
+ , Size n
163
+ , OutputIt output
164
+ ) ->
165
+ typename std::enable_if<
166
+ conjunction<
167
+ negation<
168
+ is_indirectly_trivially_relocatable_to<ForwardIt, OutputIt>
169
+ >
170
+ , decltype(is_device_to_device_copy(from_exec, to_exec))
171
+ >::value
172
+ , unique_eager_event
173
+ >::type
174
+ {
175
+ using T = typename iterator_traits<ForwardIt>::value_type;
176
+
177
+ return async_transform_n(
178
+ select_device_system(from_exec, to_exec)
179
+ , first, n, output, thrust::identity<T>()
180
+ );
181
+ }
182
+
183
+ template <typename OutputIt>
184
+ void async_copy_n_compile_failure_no_cuda_to_non_contiguous_output()
185
+ {
186
+ THRUST_STATIC_ASSERT_MSG(
187
+ (negation<is_contiguous_iterator<OutputIt>>::value)
188
+ , "copying to non-ContiguousIterators in another system from the CUDA system "
189
+ "is not supported; use `THRUST_PROCLAIM_CONTIGUOUS_ITERATOR(Iterator)` to "
190
+ "indicate that an iterator points to elements that are contiguous in memory."
191
+ );
192
+ }
193
+
194
+ // Non-ContiguousIterator output iterator
195
+ // TriviallyRelocatable value type
196
+ // Device to host, host to device
197
+ template <
198
+ typename FromPolicy, typename ToPolicy
199
+ , typename ForwardIt, typename OutputIt, typename Size
200
+ >
201
+ auto async_copy_n(
202
+ FromPolicy& from_exec
203
+ , ToPolicy& to_exec
204
+ , ForwardIt first
205
+ , Size n
206
+ , OutputIt output
207
+ ) ->
208
+ typename std::enable_if<
209
+ conjunction<
210
+ negation<is_contiguous_iterator<OutputIt>>
211
+ , is_trivially_relocatable_to<
212
+ typename iterator_traits<ForwardIt>::value_type
213
+ , typename iterator_traits<OutputIt>::value_type
214
+ >
215
+ , disjunction<
216
+ decltype(is_host_to_device_copy(from_exec, to_exec))
217
+ , decltype(is_device_to_host_copy(from_exec, to_exec))
218
+ >
219
+ >::value
220
+ , unique_eager_event
221
+ >::type
222
+ {
223
+ async_copy_n_compile_failure_no_cuda_to_non_contiguous_output<OutputIt>();
224
+
225
+ return {};
226
+ }
227
+
228
+ // Workaround for MSVC's lack of expression SFINAE and also for an NVCC bug.
229
+ // In NVCC, when two SFINAE-enabled overloads are only distinguishable by a
230
+ // part of a SFINAE condition that is in a `decltype`, NVCC thinks they are the
231
+ // same overload and emits an error.
232
+ template <
233
+ typename FromPolicy, typename ToPolicy
234
+ , typename ForwardIt, typename OutputIt
235
+ // MSVC2015 WAR: doesn't like decltype(...)::value in superclass definition
236
+ , typename IsH2DCopy = decltype(is_host_to_device_copy(
237
+ std::declval<FromPolicy const&>()
238
+ , std::declval<ToPolicy const&>()))
239
+ >
240
+ struct is_buffered_trivially_relocatable_host_to_device_copy
241
+ : thrust::integral_constant<
242
+ bool
243
+ , !is_contiguous_iterator<ForwardIt>::value
244
+ && is_contiguous_iterator<OutputIt>::value
245
+ && is_trivially_relocatable_to<
246
+ typename iterator_traits<ForwardIt>::value_type
247
+ , typename iterator_traits<OutputIt>::value_type
248
+ >::value
249
+ && IsH2DCopy::value
250
+ >
251
+ {};
252
+
253
+ // Non-ContiguousIterator input iterator, ContiguousIterator output iterator
254
+ // TriviallyRelocatable value type
255
+ // Host to device
256
+ template <
257
+ typename FromPolicy, typename ToPolicy
258
+ , typename ForwardIt, typename OutputIt, typename Size
259
+ >
260
+ auto async_copy_n(
261
+ FromPolicy& from_exec
262
+ , thrust::cuda::execution_policy<ToPolicy>& to_exec
263
+ , ForwardIt first
264
+ , Size n
265
+ , OutputIt output
266
+ ) ->
267
+ typename std::enable_if<
268
+ is_buffered_trivially_relocatable_host_to_device_copy<
269
+ FromPolicy
270
+ , thrust::cuda::execution_policy<ToPolicy>
271
+ , ForwardIt, OutputIt
272
+ >::value
273
+ , unique_eager_event
274
+ >::type
275
+ {
276
+ using T = typename iterator_traits<ForwardIt>::value_type;
277
+
278
+ auto const host_alloc = get_async_host_allocator(
279
+ from_exec
280
+ );
281
+
282
+ // Create host-side buffer.
283
+
284
+ auto buffer = uninitialized_allocate_unique_n<T>(host_alloc, n);
285
+
286
+ auto const buffer_ptr = buffer.get();
287
+
288
+ // Copy into host-side buffer.
289
+
290
+ // TODO: Switch to an async call once we have async interfaces for host
291
+ // systems and support for cross system dependencies.
292
+ uninitialized_copy_n(from_exec, first, n, buffer_ptr);
293
+
294
+ // Run device-side copy.
295
+
296
+ auto new_to_exec = thrust::detail::derived_cast(to_exec).rebind_after(
297
+ std::tuple_cat(
298
+ std::make_tuple(
299
+ std::move(buffer)
300
+ )
301
+ , extract_dependencies(
302
+ std::move(thrust::detail::derived_cast(from_exec))
303
+ )
304
+ , extract_dependencies(
305
+ std::move(thrust::detail::derived_cast(to_exec))
306
+ )
307
+ )
308
+ );
309
+
310
+ THRUST_STATIC_ASSERT((
311
+ std::tuple_size<decltype(
312
+ extract_dependencies(to_exec)
313
+ )>::value + 1
314
+ <=
315
+ std::tuple_size<decltype(
316
+ extract_dependencies(new_to_exec)
317
+ )>::value
318
+ ));
319
+
320
+ return async_copy_n(
321
+ from_exec
322
+ // TODO: We have to cast back to the right execution_policy class. Ideally,
323
+ // we should be moving here.
324
+ , new_to_exec
325
+ , buffer_ptr
326
+ , n
327
+ , output
328
+ );
329
+ }
330
+
331
+ // Workaround for MSVC's lack of expression SFINAE and also for an NVCC bug.
332
+ // In NVCC, when two SFINAE-enabled overloads are only distinguishable by a
333
+ // part of a SFINAE condition that is in a `decltype`, NVCC thinks they are the
334
+ // same overload and emits an error.
335
+ template <
336
+ typename FromPolicy, typename ToPolicy
337
+ , typename ForwardIt, typename OutputIt
338
+ // MSVC2015 WAR: doesn't like decltype(...)::value in superclass definition
339
+ , typename IsD2HCopy = decltype(is_device_to_host_copy(
340
+ std::declval<FromPolicy const&>()
341
+ , std::declval<ToPolicy const&>()))
342
+ >
343
+ struct is_buffered_trivially_relocatable_device_to_host_copy
344
+ : thrust::integral_constant<
345
+ bool
346
+ , !is_contiguous_iterator<ForwardIt>::value
347
+ && is_contiguous_iterator<OutputIt>::value
348
+ && is_trivially_relocatable_to<
349
+ typename iterator_traits<ForwardIt>::value_type
350
+ , typename iterator_traits<OutputIt>::value_type
351
+ >::value
352
+ && IsD2HCopy::value
353
+ >
354
+ {};
355
+
356
+ // Non-ContiguousIterator input iterator, ContiguousIterator output iterator
357
+ // TriviallyRelocatable value type
358
+ // Device to host
359
+ template <
360
+ typename FromPolicy, typename ToPolicy
361
+ , typename ForwardIt, typename OutputIt, typename Size
362
+ >
363
+ auto async_copy_n(
364
+ thrust::cuda::execution_policy<FromPolicy>& from_exec
365
+ , ToPolicy& to_exec
366
+ , ForwardIt first
367
+ , Size n
368
+ , OutputIt output
369
+ ) ->
370
+ typename std::enable_if<
371
+ is_buffered_trivially_relocatable_device_to_host_copy<
372
+ thrust::cuda::execution_policy<FromPolicy>
373
+ , ToPolicy
374
+ , ForwardIt, OutputIt
375
+ >::value
376
+ , unique_eager_event
377
+ >::type
378
+ {
379
+ using T = typename iterator_traits<ForwardIt>::value_type;
380
+
381
+ auto const device_alloc = get_async_device_allocator(
382
+ from_exec
383
+ );
384
+
385
+ // Create device-side buffer.
386
+
387
+ auto buffer = uninitialized_allocate_unique_n<T>(device_alloc, n);
388
+
389
+ auto const buffer_ptr = buffer.get();
390
+
391
+ // Run device-side copy.
392
+
393
+ auto f0 = async_copy_n(
394
+ from_exec
395
+ , from_exec
396
+ , first
397
+ , n
398
+ , buffer_ptr
399
+ );
400
+
401
+ // Run copy back to host.
402
+
403
+ auto new_from_exec = thrust::detail::derived_cast(from_exec).rebind_after(
404
+ std::move(buffer)
405
+ , std::move(f0)
406
+ );
407
+
408
+ THRUST_STATIC_ASSERT((
409
+ std::tuple_size<decltype(
410
+ extract_dependencies(from_exec)
411
+ )>::value + 1
412
+ <=
413
+ std::tuple_size<decltype(
414
+ extract_dependencies(new_from_exec)
415
+ )>::value
416
+ ));
417
+
418
+ return async_copy_n(
419
+ new_from_exec
420
+ , to_exec
421
+ , buffer_ptr
422
+ , n
423
+ , output
424
+ );
425
+ }
426
+
427
+ template <typename InputType, typename OutputType>
428
+ void async_copy_n_compile_failure_non_trivially_relocatable_elements()
429
+ {
430
+ THRUST_STATIC_ASSERT_MSG(
431
+ (is_trivially_relocatable_to<OutputType, InputType>::value)
432
+ , "only sequences of TriviallyRelocatable elements can be copied to and from "
433
+ "the CUDA system; use `THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(T)` to "
434
+ "indicate that a type can be copied by bitwise (e.g. by `memcpy`)"
435
+ );
436
+ }
437
+
438
+ // Non-TriviallyRelocatable value type
439
+ // Host to device, device to host
440
+ template <
441
+ typename FromPolicy, typename ToPolicy
442
+ , typename ForwardIt, typename OutputIt, typename Size
443
+ >
444
+ auto async_copy_n(
445
+ FromPolicy& from_exec
446
+ , ToPolicy& to_exec
447
+ , ForwardIt first
448
+ , Size n
449
+ , OutputIt output
450
+ ) ->
451
+ typename std::enable_if<
452
+ conjunction<
453
+ negation<
454
+ is_trivially_relocatable_to<
455
+ typename iterator_traits<ForwardIt>::value_type
456
+ , typename iterator_traits<OutputIt>::value_type
457
+ >
458
+ >
459
+ , disjunction<
460
+ decltype(is_host_to_device_copy(from_exec, to_exec))
461
+ , decltype(is_device_to_host_copy(from_exec, to_exec))
462
+ >
463
+ >::value
464
+ , unique_eager_event
465
+ >::type
466
+ {
467
+ // TODO: We could do more here with cudaHostRegister.
468
+
469
+ async_copy_n_compile_failure_non_trivially_relocatable_elements<
470
+ typename thrust::iterator_traits<ForwardIt>::value_type
471
+ , typename std::add_lvalue_reference<
472
+ typename thrust::iterator_traits<OutputIt>::value_type
473
+ >::type
474
+ >();
475
+
476
+ return {};
477
+ }
478
+
479
+ }}} // namespace system::cuda::detail
480
+
481
+ namespace cuda_cub
482
+ {
483
+
484
+ // ADL entry point.
485
+ template <
486
+ typename FromPolicy, typename ToPolicy
487
+ , typename ForwardIt, typename Sentinel, typename OutputIt
488
+ >
489
+ auto async_copy(
490
+ thrust::cuda::execution_policy<FromPolicy>& from_exec
491
+ , thrust::cpp::execution_policy<ToPolicy>& to_exec
492
+ , ForwardIt first
493
+ , Sentinel last
494
+ , OutputIt output
495
+ )
496
+ THRUST_RETURNS(
497
+ thrust::system::cuda::detail::async_copy_n(
498
+ from_exec, to_exec, first, distance(first, last), output
499
+ )
500
+ )
501
+
502
+ // ADL entry point.
503
+ template <
504
+ typename FromPolicy, typename ToPolicy
505
+ , typename ForwardIt, typename Sentinel, typename OutputIt
506
+ >
507
+ auto async_copy(
508
+ thrust::cpp::execution_policy<FromPolicy>& from_exec
509
+ , thrust::cuda::execution_policy<ToPolicy>& to_exec
510
+ , ForwardIt first
511
+ , Sentinel last
512
+ , OutputIt output
513
+ )
514
+ THRUST_RETURNS(
515
+ thrust::system::cuda::detail::async_copy_n(
516
+ from_exec, to_exec, first, distance(first, last), output
517
+ )
518
+ )
519
+
520
+ // ADL entry point.
521
+ template <
522
+ typename FromPolicy, typename ToPolicy
523
+ , typename ForwardIt, typename Sentinel, typename OutputIt
524
+ >
525
+ auto async_copy(
526
+ thrust::cuda::execution_policy<FromPolicy>& from_exec
527
+ , thrust::cuda::execution_policy<ToPolicy>& to_exec
528
+ , ForwardIt first
529
+ , Sentinel last
530
+ , OutputIt output
531
+ )
532
+ THRUST_RETURNS(
533
+ thrust::system::cuda::detail::async_copy_n(
534
+ from_exec, to_exec, first, distance(first, last), output
535
+ )
536
+ )
537
+
538
+ } // cuda_cub
539
+
540
+ THRUST_NAMESPACE_END
541
+
542
+ #endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
543
+
544
+ #endif
545
+
miniCUDA124/include/thrust/system/cuda/detail/async/customization.h ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+
28
+ // TODO: Move into system::cuda
29
+
30
+ #pragma once
31
+
32
+ #include <thrust/detail/config.h>
33
+
34
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
35
+ # pragma GCC system_header
36
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
37
+ # pragma clang system_header
38
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
39
+ # pragma system_header
40
+ #endif // no system header
41
+ #include <thrust/detail/cpp14_required.h>
42
+
43
+ #if THRUST_CPP_DIALECT >= 2014
44
+
45
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
46
+
47
+ #include <thrust/system/cuda/config.h>
48
+
49
+ #include <thrust/detail/type_deduction.h>
50
+ #include <thrust/detail/cstdint.h>
51
+ #include <thrust/detail/execute_with_allocator.h>
52
+ #include <thrust/system/cuda/memory_resource.h>
53
+ #include <thrust/mr/host_memory_resource.h>
54
+ #include <thrust/mr/allocator.h>
55
+ #include <thrust/mr/disjoint_sync_pool.h>
56
+ #include <thrust/mr/sync_pool.h>
57
+ #include <thrust/per_device_resource.h>
58
+
59
+ THRUST_NAMESPACE_BEGIN
60
+
61
+ namespace system { namespace cuda { namespace detail
62
+ {
63
+
64
+ using default_async_host_resource =
65
+ thrust::mr::synchronized_pool_resource<
66
+ thrust::host_memory_resource
67
+ >;
68
+
69
+ template <typename DerivedPolicy>
70
+ auto get_async_host_allocator(
71
+ thrust::detail::execution_policy_base<DerivedPolicy>&
72
+ )
73
+ THRUST_RETURNS(
74
+ thrust::mr::stateless_resource_allocator<
75
+ thrust::detail::uint8_t, default_async_host_resource
76
+ >{}
77
+ )
78
+
79
+ ///////////////////////////////////////////////////////////////////////////////
80
+
81
+ using default_async_device_resource =
82
+ thrust::mr::disjoint_synchronized_pool_resource<
83
+ thrust::system::cuda::memory_resource
84
+ , thrust::mr::new_delete_resource
85
+ >;
86
+
87
+ template <typename DerivedPolicy>
88
+ auto get_async_device_allocator(
89
+ thrust::detail::execution_policy_base<DerivedPolicy>&
90
+ )
91
+ THRUST_RETURNS(
92
+ thrust::per_device_allocator<
93
+ thrust::detail::uint8_t, default_async_device_resource, par_t
94
+ >{}
95
+ )
96
+
97
+ template <typename Allocator, template <typename> class BaseSystem>
98
+ auto get_async_device_allocator(
99
+ thrust::detail::execute_with_allocator<Allocator, BaseSystem>& exec
100
+ )
101
+ THRUST_RETURNS(exec.get_allocator())
102
+
103
+ template <typename Allocator, template <typename> class BaseSystem>
104
+ auto get_async_device_allocator(
105
+ thrust::detail::execute_with_allocator_and_dependencies<
106
+ Allocator, BaseSystem
107
+ >& exec
108
+ )
109
+ THRUST_RETURNS(exec.get_allocator())
110
+
111
+ ///////////////////////////////////////////////////////////////////////////////
112
+
113
+ using default_async_universal_host_pinned_resource =
114
+ thrust::mr::synchronized_pool_resource<
115
+ thrust::system::cuda::universal_host_pinned_memory_resource
116
+ >;
117
+
118
+ template <typename DerivedPolicy>
119
+ auto get_async_universal_host_pinned_allocator(
120
+ thrust::detail::execution_policy_base<DerivedPolicy>&
121
+ )
122
+ THRUST_RETURNS(
123
+ thrust::mr::stateless_resource_allocator<
124
+ thrust::detail::uint8_t, default_async_universal_host_pinned_resource
125
+ >{}
126
+ )
127
+
128
+ }}} // namespace system::cuda::detail
129
+
130
+ THRUST_NAMESPACE_END
131
+
132
+ #endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
133
+
134
+ #endif
135
+
miniCUDA124/include/thrust/system/cuda/detail/async/exclusive_scan.h ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+
28
+ #pragma once
29
+
30
+ #include <thrust/detail/config.h>
31
+
32
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
33
+ # pragma GCC system_header
34
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
35
+ # pragma clang system_header
36
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
37
+ # pragma system_header
38
+ #endif // no system header
39
+ #include <thrust/detail/cpp14_required.h>
40
+
41
+ #if THRUST_CPP_DIALECT >= 2014
42
+
43
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
44
+
45
+ #include <thrust/iterator/iterator_traits.h>
46
+
47
+ #include <thrust/system/cuda/config.h>
48
+ #include <thrust/system/cuda/detail/async/customization.h>
49
+ #include <thrust/system/cuda/detail/util.h>
50
+ #include <thrust/system/cuda/future.h>
51
+
52
+ #include <thrust/type_traits/remove_cvref.h>
53
+
54
+ #include <thrust/distance.h>
55
+
56
+ #include <type_traits>
57
+
58
+ // TODO specialize for thrust::plus to use e.g. ExclusiveSum instead of ExcScan
59
+ // - Note that thrust::plus<> is transparent, cub::Sum is not. This should be
60
+ // fixed in CUB first).
61
+ // - Need to check if CUB actually optimizes for sums before putting in effort
62
+
63
+ THRUST_NAMESPACE_BEGIN
64
+ namespace system
65
+ {
66
+ namespace cuda
67
+ {
68
+ namespace detail
69
+ {
70
+
71
+ template <typename DerivedPolicy,
72
+ typename ForwardIt,
73
+ typename Size,
74
+ typename OutputIt,
75
+ typename InitialValueType,
76
+ typename BinaryOp>
77
+ unique_eager_event
78
+ async_exclusive_scan_n(execution_policy<DerivedPolicy>& policy,
79
+ ForwardIt first,
80
+ Size n,
81
+ OutputIt out,
82
+ InitialValueType init,
83
+ BinaryOp op)
84
+ {
85
+ using InputValueT = cub::detail::InputValue<InitialValueType>;
86
+ using Dispatch32 = cub::DispatchScan<ForwardIt,
87
+ OutputIt,
88
+ BinaryOp,
89
+ InputValueT,
90
+ thrust::detail::int32_t,
91
+ InitialValueType>;
92
+ using Dispatch64 = cub::DispatchScan<ForwardIt,
93
+ OutputIt,
94
+ BinaryOp,
95
+ InputValueT,
96
+ thrust::detail::int64_t,
97
+ InitialValueType>;
98
+
99
+ InputValueT init_value(init);
100
+
101
+ auto const device_alloc = get_async_device_allocator(policy);
102
+ unique_eager_event ev;
103
+
104
+ // Determine temporary device storage requirements.
105
+ cudaError_t status;
106
+ size_t tmp_size = 0;
107
+ {
108
+ THRUST_INDEX_TYPE_DISPATCH2(status,
109
+ Dispatch32::Dispatch,
110
+ Dispatch64::Dispatch,
111
+ n,
112
+ (nullptr,
113
+ tmp_size,
114
+ first,
115
+ out,
116
+ op,
117
+ init_value,
118
+ n_fixed,
119
+ nullptr));
120
+ thrust::cuda_cub::throw_on_error(status,
121
+ "after determining tmp storage "
122
+ "requirements for exclusive_scan");
123
+ }
124
+
125
+ // Allocate temporary storage.
126
+ auto content = uninitialized_allocate_unique_n<thrust::detail::uint8_t>(
127
+ device_alloc, tmp_size
128
+ );
129
+ void* const tmp_ptr = raw_pointer_cast(content.get());
130
+
131
+ // Set up stream with dependencies.
132
+ cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy);
133
+
134
+ if (thrust::cuda_cub::default_stream() != user_raw_stream)
135
+ {
136
+ ev = make_dependent_event(
137
+ std::tuple_cat(
138
+ std::make_tuple(
139
+ std::move(content),
140
+ unique_stream(nonowning, user_raw_stream)
141
+ ),
142
+ extract_dependencies(std::move(thrust::detail::derived_cast(policy)))));
143
+ }
144
+ else
145
+ {
146
+ ev = make_dependent_event(
147
+ std::tuple_cat(
148
+ std::make_tuple(std::move(content)),
149
+ extract_dependencies(std::move(thrust::detail::derived_cast(policy)))));
150
+ }
151
+
152
+ // Run scan.
153
+ {
154
+ THRUST_INDEX_TYPE_DISPATCH2(status,
155
+ Dispatch32::Dispatch,
156
+ Dispatch64::Dispatch,
157
+ n,
158
+ (tmp_ptr,
159
+ tmp_size,
160
+ first,
161
+ out,
162
+ op,
163
+ init_value,
164
+ n_fixed,
165
+ user_raw_stream));
166
+ thrust::cuda_cub::throw_on_error(status,
167
+ "after dispatching exclusive_scan kernel");
168
+ }
169
+
170
+ return ev;
171
+ }
172
+
173
+ }}} // namespace system::cuda::detail
174
+
175
+ namespace cuda_cub
176
+ {
177
+
178
+ // ADL entry point.
179
+ template <typename DerivedPolicy,
180
+ typename ForwardIt,
181
+ typename Sentinel,
182
+ typename OutputIt,
183
+ typename InitialValueType,
184
+ typename BinaryOp>
185
+ auto async_exclusive_scan(execution_policy<DerivedPolicy>& policy,
186
+ ForwardIt first,
187
+ Sentinel&& last,
188
+ OutputIt&& out,
189
+ InitialValueType &&init,
190
+ BinaryOp&& op)
191
+ THRUST_RETURNS(
192
+ thrust::system::cuda::detail::async_exclusive_scan_n(
193
+ policy,
194
+ first,
195
+ distance(first, THRUST_FWD(last)),
196
+ THRUST_FWD(out),
197
+ THRUST_FWD(init),
198
+ THRUST_FWD(op)
199
+ )
200
+ )
201
+
202
+ } // namespace cuda_cub
203
+
204
+ THRUST_NAMESPACE_END
205
+
206
+ #endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
207
+
208
+ #endif // C++14
209
+
miniCUDA124/include/thrust/system/cuda/detail/async/for_each.h ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /******************************************************************************
3
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ * * Redistributions of source code must retain the above copyright
8
+ * notice, this list of conditions and the following disclaimer.
9
+ * * Redistributions in binary form must reproduce the above copyright
10
+ * notice, this list of conditions and the following disclaimer in the
11
+ * documentation and/or other materials provided with the distribution.
12
+ * * Neither the name of the NVIDIA CORPORATION nor the
13
+ * names of its contributors may be used to endorse or promote products
14
+ * derived from this software without specific prior written permission.
15
+ *
16
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
+ *
27
+ ******************************************************************************/
28
+
29
+ // TODO: Move into system::cuda
30
+
31
+ #pragma once
32
+
33
+ #include <thrust/detail/config.h>
34
+
35
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
36
+ # pragma GCC system_header
37
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
38
+ # pragma clang system_header
39
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
40
+ # pragma system_header
41
+ #endif // no system header
42
+ #include <thrust/detail/cpp14_required.h>
43
+
44
+ #if THRUST_CPP_DIALECT >= 2014
45
+
46
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
47
+
48
+ #include <thrust/system/cuda/config.h>
49
+
50
+ #include <thrust/system/cuda/detail/async/customization.h>
51
+ #include <thrust/system/cuda/detail/parallel_for.h>
52
+ #include <thrust/system/cuda/future.h>
53
+ #include <thrust/iterator/iterator_traits.h>
54
+ #include <thrust/distance.h>
55
+
56
+ #include <type_traits>
57
+
58
+ THRUST_NAMESPACE_BEGIN
59
+
60
+ namespace system { namespace cuda { namespace detail
61
+ {
62
+
63
+ template <typename ForwardIt, typename UnaryFunction>
64
+ struct async_for_each_fn
65
+ {
66
+ ForwardIt first;
67
+ UnaryFunction f;
68
+
69
+ __host__ __device__
70
+ async_for_each_fn(ForwardIt&& first_, UnaryFunction&& f_)
71
+ : first(std::move(first_)), f(std::move(f_))
72
+ {}
73
+
74
+ template <typename Index>
75
+ __host__ __device__
76
+ void operator()(Index idx)
77
+ {
78
+ f(thrust::raw_reference_cast(first[idx]));
79
+ }
80
+ };
81
+
82
+ template <
83
+ typename DerivedPolicy
84
+ , typename ForwardIt, typename Size, typename UnaryFunction
85
+ >
86
+ unique_eager_event async_for_each_n(
87
+ execution_policy<DerivedPolicy>& policy,
88
+ ForwardIt first,
89
+ Size n,
90
+ UnaryFunction func
91
+ ) {
92
+ unique_eager_event e;
93
+
94
+ // Set up stream with dependencies.
95
+
96
+ cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy);
97
+
98
+ if (thrust::cuda_cub::default_stream() != user_raw_stream)
99
+ {
100
+ e = make_dependent_event(
101
+ std::tuple_cat(
102
+ std::make_tuple(
103
+ unique_stream(nonowning, user_raw_stream)
104
+ )
105
+ , extract_dependencies(
106
+ std::move(thrust::detail::derived_cast(policy))
107
+ )
108
+ )
109
+ );
110
+ }
111
+ else
112
+ {
113
+ e = make_dependent_event(
114
+ extract_dependencies(
115
+ std::move(thrust::detail::derived_cast(policy))
116
+ )
117
+ );
118
+ }
119
+
120
+ // Run for_each.
121
+
122
+ async_for_each_fn<ForwardIt, UnaryFunction> wrapped(
123
+ std::move(first), std::move(func)
124
+ );
125
+
126
+ thrust::cuda_cub::throw_on_error(
127
+ thrust::cuda_cub::__parallel_for::parallel_for(
128
+ n, std::move(wrapped), e.stream().native_handle()
129
+ )
130
+ , "after for_each launch"
131
+ );
132
+
133
+ return e;
134
+ }
135
+
136
+ }}} // namespace system::cuda::detail
137
+
138
+ namespace cuda_cub
139
+ {
140
+
141
+ // ADL entry point.
142
+ template <
143
+ typename DerivedPolicy
144
+ , typename ForwardIt, typename Sentinel, typename UnaryFunction
145
+ >
146
+ auto async_for_each(
147
+ execution_policy<DerivedPolicy>& policy,
148
+ ForwardIt first,
149
+ Sentinel last,
150
+ UnaryFunction&& func
151
+ )
152
+ THRUST_RETURNS(
153
+ thrust::system::cuda::detail::async_for_each_n(
154
+ policy, first, distance(first, last), THRUST_FWD(func)
155
+ )
156
+ );
157
+
158
+ } // cuda_cub
159
+
160
+ THRUST_NAMESPACE_END
161
+
162
+ #endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
163
+
164
+ #endif
165
+
miniCUDA124/include/thrust/system/cuda/detail/async/inclusive_scan.h ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+
28
+ #pragma once
29
+
30
+ #include <thrust/detail/config.h>
31
+
32
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
33
+ # pragma GCC system_header
34
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
35
+ # pragma clang system_header
36
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
37
+ # pragma system_header
38
+ #endif // no system header
39
+ #include <thrust/detail/cpp14_required.h>
40
+
41
+ #if THRUST_CPP_DIALECT >= 2014
42
+
43
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
44
+
45
+ #include <thrust/iterator/iterator_traits.h>
46
+
47
+ #include <thrust/system/cuda/config.h>
48
+ #include <thrust/system/cuda/detail/async/customization.h>
49
+ #include <thrust/system/cuda/detail/util.h>
50
+ #include <thrust/system/cuda/future.h>
51
+
52
+ #include <thrust/type_traits/remove_cvref.h>
53
+
54
+ #include <thrust/distance.h>
55
+
56
+ #include <type_traits>
57
+
58
+ // TODO specialize for thrust::plus to use e.g. InclusiveSum instead of IncScan
59
+ // - Note that thrust::plus<> is transparent, cub::Sum is not. This should be
60
+ // fixed in CUB first).
61
+ // - Need to check if CUB actually optimizes for sums before putting in effort
62
+
63
+ THRUST_NAMESPACE_BEGIN
64
+ namespace system
65
+ {
66
+ namespace cuda
67
+ {
68
+ namespace detail
69
+ {
70
+
71
+ template <typename DerivedPolicy,
72
+ typename ForwardIt,
73
+ typename Size,
74
+ typename OutputIt,
75
+ typename BinaryOp>
76
+ unique_eager_event
77
+ async_inclusive_scan_n(execution_policy<DerivedPolicy>& policy,
78
+ ForwardIt first,
79
+ Size n,
80
+ OutputIt out,
81
+ BinaryOp op)
82
+ {
83
+ using AccumT = typename thrust::iterator_traits<ForwardIt>::value_type;
84
+ using Dispatch32 = cub::DispatchScan<ForwardIt,
85
+ OutputIt,
86
+ BinaryOp,
87
+ cub::NullType,
88
+ thrust::detail::int32_t,
89
+ AccumT>;
90
+ using Dispatch64 = cub::DispatchScan<ForwardIt,
91
+ OutputIt,
92
+ BinaryOp,
93
+ cub::NullType,
94
+ thrust::detail::int64_t,
95
+ AccumT>;
96
+
97
+ auto const device_alloc = get_async_device_allocator(policy);
98
+ unique_eager_event ev;
99
+
100
+ // Determine temporary device storage requirements.
101
+ cudaError_t status;
102
+ size_t tmp_size = 0;
103
+ {
104
+ THRUST_INDEX_TYPE_DISPATCH2(status,
105
+ Dispatch32::Dispatch,
106
+ Dispatch64::Dispatch,
107
+ n,
108
+ (nullptr,
109
+ tmp_size,
110
+ first,
111
+ out,
112
+ op,
113
+ cub::NullType{},
114
+ n_fixed,
115
+ nullptr));
116
+ thrust::cuda_cub::throw_on_error(status,
117
+ "after determining tmp storage "
118
+ "requirements for inclusive_scan");
119
+ }
120
+
121
+ // Allocate temporary storage.
122
+ auto content = uninitialized_allocate_unique_n<thrust::detail::uint8_t>(
123
+ device_alloc, tmp_size
124
+ );
125
+ void* const tmp_ptr = raw_pointer_cast(content.get());
126
+
127
+ // Set up stream with dependencies.
128
+ cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy);
129
+
130
+ if (thrust::cuda_cub::default_stream() != user_raw_stream)
131
+ {
132
+ ev = make_dependent_event(
133
+ std::tuple_cat(
134
+ std::make_tuple(
135
+ std::move(content),
136
+ unique_stream(nonowning, user_raw_stream)
137
+ ),
138
+ extract_dependencies(std::move(thrust::detail::derived_cast(policy)))));
139
+ }
140
+ else
141
+ {
142
+ ev = make_dependent_event(
143
+ std::tuple_cat(
144
+ std::make_tuple(std::move(content)),
145
+ extract_dependencies(std::move(thrust::detail::derived_cast(policy)))));
146
+ }
147
+
148
+ // Run scan.
149
+ {
150
+ THRUST_INDEX_TYPE_DISPATCH2(status,
151
+ Dispatch32::Dispatch,
152
+ Dispatch64::Dispatch,
153
+ n,
154
+ (tmp_ptr,
155
+ tmp_size,
156
+ first,
157
+ out,
158
+ op,
159
+ cub::NullType{},
160
+ n_fixed,
161
+ user_raw_stream));
162
+ thrust::cuda_cub::throw_on_error(status,
163
+ "after dispatching inclusive_scan kernel");
164
+ }
165
+
166
+ return ev;
167
+ }
168
+
169
+ }}} // namespace system::cuda::detail
170
+
171
+ namespace cuda_cub
172
+ {
173
+
174
+ // ADL entry point.
175
+ template <typename DerivedPolicy,
176
+ typename ForwardIt,
177
+ typename Sentinel,
178
+ typename OutputIt,
179
+ typename BinaryOp>
180
+ auto async_inclusive_scan(execution_policy<DerivedPolicy>& policy,
181
+ ForwardIt first,
182
+ Sentinel&& last,
183
+ OutputIt&& out,
184
+ BinaryOp&& op)
185
+ THRUST_RETURNS(
186
+ thrust::system::cuda::detail::async_inclusive_scan_n(
187
+ policy,
188
+ first,
189
+ distance(first, THRUST_FWD(last)),
190
+ THRUST_FWD(out),
191
+ THRUST_FWD(op)
192
+ )
193
+ )
194
+
195
+ } // namespace cuda_cub
196
+
197
+ THRUST_NAMESPACE_END
198
+
199
+ #endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
200
+
201
+ #endif // C++14
202
+
miniCUDA124/include/thrust/system/cuda/detail/async/reduce.h ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+
28
+ // TODO: Optimize for thrust::plus
29
+
30
+ // TODO: Move into system::cuda
31
+
32
+ #pragma once
33
+
34
+ #include <thrust/detail/config.h>
35
+
36
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
37
+ # pragma GCC system_header
38
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
39
+ # pragma clang system_header
40
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
41
+ # pragma system_header
42
+ #endif // no system header
43
+ #include <thrust/detail/cpp14_required.h>
44
+
45
+ #if THRUST_CPP_DIALECT >= 2014
46
+
47
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
48
+
49
+ #include <thrust/system/cuda/config.h>
50
+
51
+ #include <thrust/system/cuda/detail/async/customization.h>
52
+ #include <thrust/system/cuda/detail/reduce.h>
53
+ #include <thrust/system/cuda/future.h>
54
+ #include <thrust/type_traits/remove_cvref.h>
55
+ #include <thrust/iterator/iterator_traits.h>
56
+ #include <thrust/distance.h>
57
+
58
+ #include <type_traits>
59
+
60
+ THRUST_NAMESPACE_BEGIN
61
+
62
+ namespace system { namespace cuda { namespace detail
63
+ {
64
+
65
+ template <
66
+ typename DerivedPolicy
67
+ , typename ForwardIt, typename Size, typename T, typename BinaryOp
68
+ >
69
+ unique_eager_future<remove_cvref_t<T>> async_reduce_n(
70
+ execution_policy<DerivedPolicy>& policy
71
+ , ForwardIt first
72
+ , Size n
73
+ , T init
74
+ , BinaryOp op
75
+ ) {
76
+ using U = remove_cvref_t<T>;
77
+
78
+ auto const device_alloc = get_async_device_allocator(policy);
79
+
80
+ using pointer
81
+ = typename thrust::detail::allocator_traits<decltype(device_alloc)>::
82
+ template rebind_traits<U>::pointer;
83
+
84
+ unique_eager_future_promise_pair<U, pointer> fp;
85
+
86
+ // Determine temporary device storage requirements.
87
+
88
+ size_t tmp_size = 0;
89
+ thrust::cuda_cub::throw_on_error(
90
+ cub::DeviceReduce::Reduce(
91
+ nullptr
92
+ , tmp_size
93
+ , first
94
+ , static_cast<U*>(nullptr)
95
+ , n
96
+ , op
97
+ , init
98
+ , nullptr // Null stream, just for sizing.
99
+ )
100
+ , "after reduction sizing"
101
+ );
102
+
103
+ // Allocate temporary storage.
104
+
105
+ auto content = uninitialized_allocate_unique_n<thrust::detail::uint8_t>(
106
+ device_alloc, sizeof(U) + tmp_size
107
+ );
108
+
109
+ // The array was dynamically allocated, so we assume that it's suitably
110
+ // aligned for any type of data. `malloc`/`cudaMalloc`/`new`/`std::allocator`
111
+ // make this guarantee.
112
+ auto const content_ptr = content.get();
113
+ U* const ret_ptr = thrust::detail::aligned_reinterpret_cast<U*>(
114
+ raw_pointer_cast(content_ptr)
115
+ );
116
+ void* const tmp_ptr = static_cast<void*>(
117
+ raw_pointer_cast(content_ptr + sizeof(U))
118
+ );
119
+
120
+ // Set up stream with dependencies.
121
+
122
+ cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy);
123
+
124
+ if (thrust::cuda_cub::default_stream() != user_raw_stream)
125
+ {
126
+ fp = make_dependent_future<U, pointer>(
127
+ [] (decltype(content) const& c)
128
+ {
129
+ return pointer(
130
+ thrust::detail::aligned_reinterpret_cast<U*>(
131
+ raw_pointer_cast(c.get())
132
+ )
133
+ );
134
+ }
135
+ , std::tuple_cat(
136
+ std::make_tuple(
137
+ std::move(content)
138
+ , unique_stream(nonowning, user_raw_stream)
139
+ )
140
+ , extract_dependencies(
141
+ std::move(thrust::detail::derived_cast(policy))
142
+ )
143
+ )
144
+ );
145
+ }
146
+ else
147
+ {
148
+ fp = make_dependent_future<U, pointer>(
149
+ [] (decltype(content) const& c)
150
+ {
151
+ return pointer(
152
+ thrust::detail::aligned_reinterpret_cast<U*>(
153
+ raw_pointer_cast(c.get())
154
+ )
155
+ );
156
+ }
157
+ , std::tuple_cat(
158
+ std::make_tuple(
159
+ std::move(content)
160
+ )
161
+ , extract_dependencies(
162
+ std::move(thrust::detail::derived_cast(policy))
163
+ )
164
+ )
165
+ );
166
+ }
167
+
168
+ // Run reduction.
169
+
170
+ thrust::cuda_cub::throw_on_error(
171
+ cub::DeviceReduce::Reduce(
172
+ tmp_ptr
173
+ , tmp_size
174
+ , first
175
+ , ret_ptr
176
+ , n
177
+ , op
178
+ , init
179
+ , fp.future.stream().native_handle()
180
+ )
181
+ , "after reduction launch"
182
+ );
183
+
184
+ return std::move(fp.future);
185
+ }
186
+
187
+ }}} // namespace system::cuda::detail
188
+
189
+ namespace cuda_cub
190
+ {
191
+
192
+ // ADL entry point.
193
+ template <
194
+ typename DerivedPolicy
195
+ , typename ForwardIt, typename Sentinel, typename T, typename BinaryOp
196
+ >
197
+ auto async_reduce(
198
+ execution_policy<DerivedPolicy>& policy
199
+ , ForwardIt first
200
+ , Sentinel last
201
+ , T init
202
+ , BinaryOp op
203
+ )
204
+ THRUST_RETURNS(
205
+ thrust::system::cuda::detail::async_reduce_n(
206
+ policy, first, distance(first, last), init, op
207
+ )
208
+ )
209
+
210
+ } // cuda_cub
211
+
212
+ ///////////////////////////////////////////////////////////////////////////////
213
+
214
+ namespace system { namespace cuda { namespace detail
215
+ {
216
+
217
+ template <
218
+ typename DerivedPolicy
219
+ , typename ForwardIt, typename Size, typename OutputIt
220
+ , typename T, typename BinaryOp
221
+ >
222
+ unique_eager_event async_reduce_into_n(
223
+ execution_policy<DerivedPolicy>& policy
224
+ , ForwardIt first
225
+ , Size n
226
+ , OutputIt output
227
+ , T init
228
+ , BinaryOp op
229
+ ) {
230
+ using U = remove_cvref_t<T>;
231
+
232
+ auto const device_alloc = get_async_device_allocator(policy);
233
+
234
+ unique_eager_event e;
235
+
236
+ // Determine temporary device storage requirements.
237
+
238
+ size_t tmp_size = 0;
239
+ thrust::cuda_cub::throw_on_error(
240
+ cub::DeviceReduce::Reduce(
241
+ nullptr
242
+ , tmp_size
243
+ , first
244
+ , static_cast<U*>(nullptr)
245
+ , n
246
+ , op
247
+ , init
248
+ , nullptr // Null stream, just for sizing.
249
+ )
250
+ , "after reduction sizing"
251
+ );
252
+
253
+ // Allocate temporary storage.
254
+
255
+ auto content = uninitialized_allocate_unique_n<thrust::detail::uint8_t>(
256
+ device_alloc, tmp_size
257
+ );
258
+
259
+ // The array was dynamically allocated, so we assume that it's suitably
260
+ // aligned for any type of data. `malloc`/`cudaMalloc`/`new`/`std::allocator`
261
+ // make this guarantee.
262
+ auto const content_ptr = content.get();
263
+
264
+ void* const tmp_ptr = static_cast<void*>(
265
+ raw_pointer_cast(content_ptr)
266
+ );
267
+
268
+ // Set up stream with dependencies.
269
+
270
+ cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy);
271
+
272
+ if (thrust::cuda_cub::default_stream() != user_raw_stream)
273
+ {
274
+ e = make_dependent_event(
275
+ std::tuple_cat(
276
+ std::make_tuple(
277
+ std::move(content)
278
+ , unique_stream(nonowning, user_raw_stream)
279
+ )
280
+ , extract_dependencies(
281
+ std::move(thrust::detail::derived_cast(policy))
282
+ )
283
+ )
284
+ );
285
+ }
286
+ else
287
+ {
288
+ e = make_dependent_event(
289
+ std::tuple_cat(
290
+ std::make_tuple(
291
+ std::move(content)
292
+ )
293
+ , extract_dependencies(
294
+ std::move(thrust::detail::derived_cast(policy))
295
+ )
296
+ )
297
+ );
298
+ }
299
+
300
+ // Run reduction.
301
+
302
+ thrust::cuda_cub::throw_on_error(
303
+ cub::DeviceReduce::Reduce(
304
+ tmp_ptr
305
+ , tmp_size
306
+ , first
307
+ , output
308
+ , n
309
+ , op
310
+ , init
311
+ , e.stream().native_handle()
312
+ )
313
+ , "after reduction launch"
314
+ );
315
+
316
+ return e;
317
+ }
318
+
319
+ }}} // namespace system::cuda::detail
320
+
321
+ namespace cuda_cub
322
+ {
323
+
324
+ // ADL entry point.
325
+ template <
326
+ typename DerivedPolicy
327
+ , typename ForwardIt, typename Sentinel, typename OutputIt
328
+ , typename T, typename BinaryOp
329
+ >
330
+ auto async_reduce_into(
331
+ execution_policy<DerivedPolicy>& policy
332
+ , ForwardIt first
333
+ , Sentinel last
334
+ , OutputIt output
335
+ , T init
336
+ , BinaryOp op
337
+ )
338
+ THRUST_RETURNS(
339
+ thrust::system::cuda::detail::async_reduce_into_n(
340
+ policy, first, distance(first, last), output, init, op
341
+ )
342
+ )
343
+
344
+ } // cuda_cub
345
+
346
+ THRUST_NAMESPACE_END
347
+
348
+ #endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
349
+
350
+ #endif
351
+
miniCUDA124/include/thrust/system/cuda/detail/async/scan.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+
28
+ #pragma once
29
+
30
+ #include <thrust/detail/config.h>
31
+
32
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
33
+ # pragma GCC system_header
34
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
35
+ # pragma clang system_header
36
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
37
+ # pragma system_header
38
+ #endif // no system header
39
+
40
+ #include <thrust/detail/cpp14_required.h>
41
+
42
+ #include <thrust/system/cuda/detail/async/exclusive_scan.h>
43
+ #include <thrust/system/cuda/detail/async/inclusive_scan.h>
miniCUDA124/include/thrust/system/cuda/detail/async/sort.h ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+
28
+ // TODO: Move into system::cuda
29
+
30
+ #pragma once
31
+
32
+ #include <thrust/detail/config.h>
33
+
34
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
35
+ # pragma GCC system_header
36
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
37
+ # pragma clang system_header
38
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
39
+ # pragma system_header
40
+ #endif // no system header
41
+ #include <thrust/detail/cpp14_required.h>
42
+
43
+ #if THRUST_CPP_DIALECT >= 2014
44
+
45
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
46
+
47
+ #include <thrust/system/cuda/config.h>
48
+
49
+ #include <thrust/system/cuda/detail/async/customization.h>
50
+ #include <thrust/system/cuda/detail/async/copy.h>
51
+ #include <thrust/system/cuda/detail/sort.h>
52
+ #include <thrust/detail/alignment.h>
53
+ #include <thrust/system/cuda/future.h>
54
+ #include <thrust/type_traits/is_trivially_relocatable.h>
55
+ #include <thrust/type_traits/is_contiguous_iterator.h>
56
+ #include <thrust/type_traits/is_operator_less_or_greater_function_object.h>
57
+ #include <thrust/type_traits/logical_metafunctions.h>
58
+ #include <thrust/iterator/iterator_traits.h>
59
+ #include <thrust/detail/static_assert.h>
60
+ #include <thrust/distance.h>
61
+
62
+ #include <type_traits>
63
+
64
+ THRUST_NAMESPACE_BEGIN
65
+
66
+ namespace system { namespace cuda { namespace detail
67
+ {
68
+
69
+ // Non-ContiguousIterator input and output iterators
70
+ template <
71
+ typename DerivedPolicy
72
+ , typename ForwardIt, typename Size, typename StrictWeakOrdering
73
+ >
74
+ auto async_stable_sort_n(
75
+ execution_policy<DerivedPolicy>& policy,
76
+ ForwardIt first,
77
+ Size n,
78
+ StrictWeakOrdering comp
79
+ ) ->
80
+ typename std::enable_if<
81
+ negation<is_contiguous_iterator<ForwardIt>>::value
82
+ , unique_eager_event
83
+ >::type
84
+ {
85
+ using T = typename iterator_traits<ForwardIt>::value_type;
86
+
87
+ auto const device_alloc = get_async_device_allocator(policy);
88
+
89
+ // Create device-side buffer.
90
+
91
+ // FIXME: Combine this temporary allocation with the main one for CUB.
92
+ auto device_buffer = uninitialized_allocate_unique_n<T>(device_alloc, n);
93
+
94
+ auto const device_buffer_ptr = device_buffer.get();
95
+
96
+ // Synthesize a suitable new execution policy, because we don't want to
97
+ // try and extract twice from the one we were passed.
98
+ typename remove_cvref_t<decltype(policy)>::tag_type tag_policy{};
99
+
100
+ // Copy from the input into the buffer.
101
+
102
+ auto new_policy0 = thrust::detail::derived_cast(policy).rebind_after(
103
+ std::move(device_buffer)
104
+ );
105
+
106
+ THRUST_STATIC_ASSERT((
107
+ std::tuple_size<decltype(
108
+ extract_dependencies(policy)
109
+ )>::value + 1
110
+ <=
111
+ std::tuple_size<decltype(
112
+ extract_dependencies(new_policy0)
113
+ )>::value
114
+ ));
115
+
116
+ auto f0 = async_copy_n(
117
+ new_policy0
118
+ , tag_policy
119
+ , first
120
+ , n
121
+ , device_buffer_ptr
122
+ );
123
+
124
+ // Sort the buffer.
125
+
126
+ auto new_policy1 = thrust::detail::derived_cast(policy).rebind_after(
127
+ std::move(f0)
128
+ );
129
+
130
+ THRUST_STATIC_ASSERT((
131
+ std::tuple_size<decltype(
132
+ extract_dependencies(policy)
133
+ )>::value + 1
134
+ <=
135
+ std::tuple_size<decltype(
136
+ extract_dependencies(new_policy1)
137
+ )>::value
138
+ ));
139
+
140
+ auto f1 = async_sort_n(
141
+ new_policy1
142
+ , tag_policy
143
+ , device_buffer_ptr
144
+ , n
145
+ , comp
146
+ );
147
+
148
+ // Copy from the buffer into the input.
149
+ // FIXME: Combine this with the potential memcpy at the end of the main sort
150
+ // routine.
151
+
152
+ auto new_policy2 = thrust::detail::derived_cast(policy).rebind_after(
153
+ std::move(f1)
154
+ );
155
+
156
+ THRUST_STATIC_ASSERT((
157
+ std::tuple_size<decltype(
158
+ extract_dependencies(policy)
159
+ )>::value + 1
160
+ <=
161
+ std::tuple_size<decltype(
162
+ extract_dependencies(new_policy2)
163
+ )>::value
164
+ ));
165
+
166
+ return async_copy_n(
167
+ new_policy2
168
+ , tag_policy
169
+ , device_buffer_ptr
170
+ , n
171
+ , first
172
+ );
173
+ }
174
+
175
+ // ContiguousIterator iterators
176
+ // Non-Scalar value type or user-defined StrictWeakOrdering
177
+ template <
178
+ typename DerivedPolicy
179
+ , typename ForwardIt, typename Size, typename StrictWeakOrdering
180
+ >
181
+ auto async_stable_sort_n(
182
+ execution_policy<DerivedPolicy>& policy,
183
+ ForwardIt first,
184
+ Size n,
185
+ StrictWeakOrdering comp
186
+ ) ->
187
+ typename std::enable_if<
188
+ conjunction<
189
+ is_contiguous_iterator<ForwardIt>
190
+ , disjunction<
191
+ negation<
192
+ std::is_scalar<
193
+ typename iterator_traits<ForwardIt>::value_type
194
+ >
195
+ >
196
+ , negation<
197
+ is_operator_less_or_greater_function_object<StrictWeakOrdering>
198
+ >
199
+ >
200
+ >::value
201
+ , unique_eager_event
202
+ >::type
203
+ {
204
+ auto const device_alloc = get_async_device_allocator(policy);
205
+
206
+ unique_eager_event e;
207
+
208
+ // Determine temporary device storage requirements.
209
+
210
+ size_t tmp_size = 0;
211
+ thrust::cuda_cub::throw_on_error(
212
+ thrust::cuda_cub::__merge_sort::doit_step<
213
+ /* Sort items? */ std::false_type, /* Stable? */ std::true_type
214
+ >(
215
+ nullptr
216
+ , tmp_size
217
+ , first
218
+ , static_cast<thrust::detail::uint8_t*>(nullptr) // Items.
219
+ , n
220
+ , comp
221
+ , nullptr // Null stream, just for sizing.
222
+ )
223
+ , "after merge sort sizing"
224
+ );
225
+
226
+ // Allocate temporary storage.
227
+
228
+ auto content = uninitialized_allocate_unique_n<thrust::detail::uint8_t>(
229
+ device_alloc, tmp_size
230
+ );
231
+
232
+ // The array was dynamically allocated, so we assume that it's suitably
233
+ // aligned for any type of data. `malloc`/`cudaMalloc`/`new`/`std::allocator`
234
+ // make this guarantee.
235
+ auto const content_ptr = content.get();
236
+
237
+ void* const tmp_ptr = static_cast<void*>(
238
+ raw_pointer_cast(content_ptr)
239
+ );
240
+
241
+ // Set up stream with dependencies.
242
+
243
+ cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy);
244
+
245
+ if (thrust::cuda_cub::default_stream() != user_raw_stream)
246
+ {
247
+ e = make_dependent_event(
248
+ std::tuple_cat(
249
+ std::make_tuple(
250
+ std::move(content)
251
+ , unique_stream(nonowning, user_raw_stream)
252
+ )
253
+ , extract_dependencies(
254
+ std::move(thrust::detail::derived_cast(policy))
255
+ )
256
+ )
257
+ );
258
+ }
259
+ else
260
+ {
261
+ e = make_dependent_event(
262
+ std::tuple_cat(
263
+ std::make_tuple(
264
+ std::move(content)
265
+ )
266
+ , extract_dependencies(
267
+ std::move(thrust::detail::derived_cast(policy))
268
+ )
269
+ )
270
+ );
271
+ }
272
+
273
+ // Run merge sort.
274
+
275
+ thrust::cuda_cub::throw_on_error(
276
+ thrust::cuda_cub::__merge_sort::doit_step<
277
+ /* Sort items? */ std::false_type, /* Stable? */ std::true_type
278
+ >(
279
+ tmp_ptr
280
+ , tmp_size
281
+ , first
282
+ , static_cast<thrust::detail::uint8_t*>(nullptr) // Items.
283
+ , n
284
+ , comp
285
+ , e.stream().native_handle()
286
+ )
287
+ , "after merge sort sizing"
288
+ );
289
+
290
+ return e;
291
+ }
292
+
293
+ template <typename T, typename Size, typename StrictWeakOrdering>
294
+ typename std::enable_if<
295
+ is_operator_less_function_object<StrictWeakOrdering>::value
296
+ , cudaError_t
297
+ >::type
298
+ invoke_radix_sort(
299
+ cudaStream_t stream
300
+ , void* tmp_ptr
301
+ , std::size_t& tmp_size
302
+ , cub::DoubleBuffer<T>& keys
303
+ , Size& n
304
+ , StrictWeakOrdering
305
+ )
306
+ {
307
+ return cub::DeviceRadixSort::SortKeys(
308
+ tmp_ptr
309
+ , tmp_size
310
+ , keys
311
+ , n
312
+ , 0
313
+ , sizeof(T) * 8
314
+ , stream
315
+ );
316
+ }
317
+
318
+ template <typename T, typename Size, typename StrictWeakOrdering>
319
+ typename std::enable_if<
320
+ is_operator_greater_function_object<StrictWeakOrdering>::value
321
+ , cudaError_t
322
+ >::type
323
+ invoke_radix_sort(
324
+ cudaStream_t stream
325
+ , void* tmp_ptr
326
+ , std::size_t& tmp_size
327
+ , cub::DoubleBuffer<T>& keys
328
+ , Size& n
329
+ , StrictWeakOrdering
330
+ )
331
+ {
332
+ return cub::DeviceRadixSort::SortKeysDescending(
333
+ tmp_ptr
334
+ , tmp_size
335
+ , keys
336
+ , n
337
+ , 0
338
+ , sizeof(T) * 8
339
+ , stream
340
+ );
341
+ }
342
+
343
+ // ContiguousIterator iterators
344
+ // Scalar value type
345
+ // operator< or operator>
346
+ template <
347
+ typename DerivedPolicy
348
+ , typename ForwardIt, typename Size, typename StrictWeakOrdering
349
+ >
350
+ auto async_stable_sort_n(
351
+ execution_policy<DerivedPolicy>& policy
352
+ , ForwardIt first
353
+ , Size n
354
+ , StrictWeakOrdering comp
355
+ ) ->
356
+ typename std::enable_if<
357
+ conjunction<
358
+ is_contiguous_iterator<ForwardIt>
359
+ , std::is_scalar<
360
+ typename iterator_traits<ForwardIt>::value_type
361
+ >
362
+ , is_operator_less_or_greater_function_object<StrictWeakOrdering>
363
+ >::value
364
+ , unique_eager_event
365
+ >::type
366
+ {
367
+ using T = typename iterator_traits<ForwardIt>::value_type;
368
+
369
+ auto const device_alloc = get_async_device_allocator(policy);
370
+
371
+ unique_eager_event e;
372
+
373
+ cub::DoubleBuffer<T> keys(
374
+ raw_pointer_cast(&*first), nullptr
375
+ );
376
+
377
+ // Determine temporary device storage requirements.
378
+
379
+ size_t tmp_size = 0;
380
+ thrust::cuda_cub::throw_on_error(
381
+ invoke_radix_sort(
382
+ nullptr // Null stream, just for sizing.
383
+ , nullptr
384
+ , tmp_size
385
+ , keys
386
+ , n
387
+ , comp
388
+ )
389
+ , "after radix sort sizing"
390
+ );
391
+
392
+ // Allocate temporary storage.
393
+
394
+ size_t keys_temp_storage = thrust::detail::aligned_storage_size(
395
+ sizeof(T) * n, 128
396
+ );
397
+
398
+ auto content = uninitialized_allocate_unique_n<thrust::detail::uint8_t>(
399
+ device_alloc, keys_temp_storage + tmp_size
400
+ );
401
+
402
+ // The array was dynamically allocated, so we assume that it's suitably
403
+ // aligned for any type of data. `malloc`/`cudaMalloc`/`new`/`std::allocator`
404
+ // make this guarantee.
405
+ auto const content_ptr = content.get();
406
+
407
+ keys.d_buffers[1] = thrust::detail::aligned_reinterpret_cast<T*>(
408
+ raw_pointer_cast(content_ptr)
409
+ );
410
+
411
+ void* const tmp_ptr = static_cast<void*>(
412
+ raw_pointer_cast(content_ptr + keys_temp_storage)
413
+ );
414
+
415
+ // Set up stream with dependencies.
416
+
417
+ cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy);
418
+
419
+ if (thrust::cuda_cub::default_stream() != user_raw_stream)
420
+ {
421
+ e = make_dependent_event(
422
+ std::tuple_cat(
423
+ std::make_tuple(
424
+ std::move(content)
425
+ , unique_stream(nonowning, user_raw_stream)
426
+ )
427
+ , extract_dependencies(
428
+ std::move(thrust::detail::derived_cast(policy))
429
+ )
430
+ )
431
+ );
432
+ }
433
+ else
434
+ {
435
+ e = make_dependent_event(
436
+ std::tuple_cat(
437
+ std::make_tuple(
438
+ std::move(content)
439
+ )
440
+ , extract_dependencies(
441
+ std::move(thrust::detail::derived_cast(policy))
442
+ )
443
+ )
444
+ );
445
+ }
446
+
447
+ // Run radix sort.
448
+
449
+ thrust::cuda_cub::throw_on_error(
450
+ invoke_radix_sort(
451
+ e.stream().native_handle()
452
+ , tmp_ptr
453
+ , tmp_size
454
+ , keys
455
+ , n
456
+ , comp
457
+ )
458
+ , "after radix sort launch"
459
+ );
460
+
461
+ if (0 != keys.selector)
462
+ {
463
+ auto new_policy0 = thrust::detail::derived_cast(policy).rebind_after(
464
+ std::move(e)
465
+ );
466
+
467
+ THRUST_STATIC_ASSERT((
468
+ std::tuple_size<decltype(
469
+ extract_dependencies(policy)
470
+ )>::value + 1
471
+ <=
472
+ std::tuple_size<decltype(
473
+ extract_dependencies(new_policy0)
474
+ )>::value
475
+ ));
476
+
477
+ // Synthesize a suitable new execution policy, because we don't want to
478
+ // try and extract twice from the one we were passed.
479
+ typename remove_cvref_t<decltype(policy)>::tag_type tag_policy{};
480
+
481
+ using return_future = decltype(e);
482
+ return return_future(async_copy_n(
483
+ new_policy0
484
+ , tag_policy
485
+ , keys.d_buffers[1]
486
+ , n
487
+ , keys.d_buffers[0]
488
+ ));
489
+ }
490
+ else
491
+ return e;
492
+ }
493
+
494
+ }}} // namespace system::cuda::detail
495
+
496
+ namespace cuda_cub
497
+ {
498
+
499
+ // ADL entry point.
500
+ template <
501
+ typename DerivedPolicy
502
+ , typename ForwardIt, typename Sentinel, typename StrictWeakOrdering
503
+ >
504
+ auto async_stable_sort(
505
+ execution_policy<DerivedPolicy>& policy,
506
+ ForwardIt first,
507
+ Sentinel last,
508
+ StrictWeakOrdering comp
509
+ )
510
+ // A GCC 5 bug requires an explicit trailing return type here, so stick with
511
+ // THRUST_DECLTYPE_RETURNS for now.
512
+ THRUST_DECLTYPE_RETURNS(
513
+ thrust::system::cuda::detail::async_stable_sort_n(
514
+ policy, first, distance(first, last), comp
515
+ )
516
+ )
517
+
518
+ } // cuda_cub
519
+
520
+ THRUST_NAMESPACE_END
521
+
522
+ #endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
523
+
524
+ #endif
525
+
miniCUDA124/include/thrust/system/cuda/detail/async/transform.h ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+
28
+ // TODO: Move into system::cuda
29
+
30
+ #pragma once
31
+
32
+ #include <thrust/detail/config.h>
33
+
34
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
35
+ # pragma GCC system_header
36
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
37
+ # pragma clang system_header
38
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
39
+ # pragma system_header
40
+ #endif // no system header
41
+ #include <thrust/detail/cpp14_required.h>
42
+
43
+ #if THRUST_CPP_DIALECT >= 2014
44
+
45
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
46
+
47
+ #include <thrust/system/cuda/config.h>
48
+
49
+ #include <thrust/system/cuda/detail/async/customization.h>
50
+ #include <thrust/system/cuda/detail/parallel_for.h>
51
+ #include <thrust/system/cuda/future.h>
52
+ #include <thrust/iterator/iterator_traits.h>
53
+ #include <thrust/distance.h>
54
+ #include <thrust/advance.h>
55
+
56
+ #include <type_traits>
57
+
58
+ THRUST_NAMESPACE_BEGIN
59
+
60
+ namespace system { namespace cuda { namespace detail
61
+ {
62
+
63
+ template <typename ForwardIt, typename OutputIt, typename UnaryOperation>
64
+ struct async_transform_fn
65
+ {
66
+ ForwardIt first_;
67
+ OutputIt output_;
68
+ UnaryOperation op_;
69
+
70
+ __host__ __device__
71
+ async_transform_fn(ForwardIt&& first, OutputIt&& output, UnaryOperation&& op)
72
+ : first_(std::move(first)), output_(std::move(output)), op_(std::move(op))
73
+ {}
74
+
75
+ template <typename Index>
76
+ __host__ __device__
77
+ void operator()(Index idx)
78
+ {
79
+ output_[idx] = op_(thrust::raw_reference_cast(first_[idx]));
80
+ }
81
+ };
82
+
83
+ template <
84
+ typename DerivedPolicy
85
+ , typename ForwardIt, typename Size, typename OutputIt, typename UnaryOperation
86
+ >
87
+ unique_eager_event async_transform_n(
88
+ execution_policy<DerivedPolicy>& policy,
89
+ ForwardIt first,
90
+ Size n,
91
+ OutputIt output,
92
+ UnaryOperation op
93
+ ) {
94
+ unique_eager_event e;
95
+
96
+ // Set up stream with dependencies.
97
+
98
+ cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy);
99
+
100
+ if (thrust::cuda_cub::default_stream() != user_raw_stream)
101
+ {
102
+ e = make_dependent_event(
103
+ std::tuple_cat(
104
+ std::make_tuple(
105
+ unique_stream(nonowning, user_raw_stream)
106
+ )
107
+ , extract_dependencies(
108
+ std::move(thrust::detail::derived_cast(policy))
109
+ )
110
+ )
111
+ );
112
+ }
113
+ else
114
+ {
115
+ e = make_dependent_event(
116
+ extract_dependencies(
117
+ std::move(thrust::detail::derived_cast(policy))
118
+ )
119
+ );
120
+ }
121
+
122
+ // Run transform.
123
+
124
+ async_transform_fn<ForwardIt, OutputIt, UnaryOperation> wrapped(
125
+ std::move(first), std::move(output), std::move(op)
126
+ );
127
+
128
+ thrust::cuda_cub::throw_on_error(
129
+ thrust::cuda_cub::__parallel_for::parallel_for(
130
+ n, std::move(wrapped), e.stream().native_handle()
131
+ )
132
+ , "after transform launch"
133
+ );
134
+
135
+ return e;
136
+ }
137
+
138
+ }}} // namespace system::cuda::detail
139
+
140
+ namespace cuda_cub
141
+ {
142
+
143
+ // ADL entry point.
144
+ template <
145
+ typename DerivedPolicy
146
+ , typename ForwardIt, typename Sentinel, typename OutputIt
147
+ , typename UnaryOperation
148
+ >
149
+ auto async_transform(
150
+ execution_policy<DerivedPolicy>& policy,
151
+ ForwardIt first,
152
+ Sentinel last,
153
+ OutputIt output,
154
+ UnaryOperation&& op
155
+ )
156
+ THRUST_RETURNS(
157
+ thrust::system::cuda::detail::async_transform_n(
158
+ policy, first, distance(first, last), output, THRUST_FWD(op)
159
+ )
160
+ );
161
+
162
+ } // cuda_cub
163
+
164
+ THRUST_NAMESPACE_END
165
+
166
+ #endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
167
+
168
+ #endif
169
+
miniCUDA124/include/thrust/system/cuda/detail/core/agent_launcher.h ADDED
@@ -0,0 +1,1172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #include <cub/detail/device_synchronize.cuh>
40
+
41
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
42
+ #include <thrust/system/cuda/detail/core/triple_chevron_launch.h>
43
+ #include <thrust/system/cuda/detail/core/util.h>
44
+ #include <thrust/system/cuda/detail/guarded_cuda_runtime_api.h>
45
+
46
+ #include <cassert>
47
+
48
+ #include <nv/target>
49
+
50
+ /**
51
+ * @def THRUST_DISABLE_KERNEL_VISIBILITY_WARNING_SUPPRESSION
52
+ * If defined, the default suppression of kernel visibility attribute warning is disabled.
53
+ */
54
+ #if !defined(THRUST_DISABLE_KERNEL_VISIBILITY_WARNING_SUPPRESSION)
55
+ _CCCL_DIAG_SUPPRESS_GCC("-Wattributes")
56
+ _CCCL_DIAG_SUPPRESS_CLANG("-Wattributes")
57
+ #if !defined(_CCCL_CUDA_COMPILER_NVHPC)
58
+ _CCCL_DIAG_SUPPRESS_NVHPC(attribute_requires_external_linkage)
59
+ #endif // !_LIBCUDACXX_COMPILER_NVHPC_CUDA
60
+ #endif // !THRUST_DISABLE_KERNEL_VISIBILITY_WARNING_SUPPRESSION
61
+
62
+ THRUST_NAMESPACE_BEGIN
63
+
64
+ namespace cuda_cub {
65
+ namespace core {
66
+
67
+ #ifndef THRUST_DETAIL_KERNEL_ATTRIBUTES
68
+ #define THRUST_DETAIL_KERNEL_ATTRIBUTES CCCL_DETAIL_KERNEL_ATTRIBUTES
69
+ #endif
70
+
71
+ #if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA)
72
+ #if 0
73
+ template <class Agent, class... Args>
74
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void
75
+ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
76
+ _kernel_agent(Args... args)
77
+ {
78
+ extern __shared__ char shmem[];
79
+ Agent::entry(args..., shmem);
80
+ }
81
+ #else
82
+ template <class Agent, class _0>
83
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
84
+ _kernel_agent(_0 x0)
85
+ {
86
+ extern __shared__ char shmem[];
87
+ Agent::entry(x0, shmem);
88
+ }
89
+ template <class Agent, class _0, class _1>
90
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
91
+ _kernel_agent(_0 x0, _1 x1)
92
+ {
93
+ extern __shared__ char shmem[];
94
+ Agent::entry(x0, x1, shmem);
95
+ }
96
+ template <class Agent, class _0, class _1, class _2>
97
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
98
+ _kernel_agent(_0 x0, _1 x1, _2 x2)
99
+ {
100
+ extern __shared__ char shmem[];
101
+ Agent::entry(x0, x1, x2, shmem);
102
+ }
103
+ template <class Agent, class _0, class _1, class _2, class _3>
104
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
105
+ _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3)
106
+ {
107
+ extern __shared__ char shmem[];
108
+ Agent::entry(x0, x1, x2, x3, shmem);
109
+ }
110
+ template <class Agent, class _0, class _1, class _2, class _3, class _4>
111
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
112
+ _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4)
113
+ {
114
+ extern __shared__ char shmem[];
115
+ Agent::entry(x0, x1, x2, x3, x4, shmem);
116
+ }
117
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5>
118
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
119
+ _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5)
120
+ {
121
+ extern __shared__ char shmem[];
122
+ Agent::entry(x0, x1, x2, x3, x4, x5, shmem);
123
+ }
124
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6>
125
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
126
+ _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6)
127
+ {
128
+ extern __shared__ char shmem[];
129
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, shmem);
130
+ }
131
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7>
132
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
133
+ _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7)
134
+ {
135
+ extern __shared__ char shmem[];
136
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, shmem);
137
+ }
138
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8>
139
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
140
+ _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8)
141
+ {
142
+ extern __shared__ char shmem[];
143
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, shmem);
144
+ }
145
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
146
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
147
+ _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9)
148
+ {
149
+ extern __shared__ char shmem[];
150
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, shmem);
151
+ }
152
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA>
153
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
154
+ _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA)
155
+ {
156
+ extern __shared__ char shmem[];
157
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, shmem);
158
+ }
159
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB>
160
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
161
+ _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB)
162
+ {
163
+ extern __shared__ char shmem[];
164
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, shmem);
165
+ }
166
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC>
167
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
168
+ _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC)
169
+ {
170
+ extern __shared__ char shmem[];
171
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, shmem);
172
+ }
173
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD>
174
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
175
+ _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD)
176
+ {
177
+ extern __shared__ char shmem[];
178
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, shmem);
179
+ }
180
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE>
181
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
182
+ _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE)
183
+ {
184
+ extern __shared__ char shmem[];
185
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE, shmem);
186
+ }
187
+ #endif
188
+
189
+ ////////////////////////////////////////////////////////////
190
+
191
+
192
+ #if 0
193
+ template <class Agent, class... Args>
194
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void
195
+ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
196
+ _kernel_agent_vshmem(char* vshmem, Args... args)
197
+ {
198
+ extern __shared__ char shmem[];
199
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
200
+ Agent::entry(args..., vshmem);
201
+ }
202
+ #else
203
+ template <class Agent, class _0>
204
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
205
+ _kernel_agent_vshmem(char* vshmem, _0 x0)
206
+ {
207
+ extern __shared__ char shmem[];
208
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
209
+ Agent::entry(x0, vshmem);
210
+ }
211
+ template <class Agent, class _0, class _1>
212
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
213
+ _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1)
214
+ {
215
+ extern __shared__ char shmem[];
216
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
217
+ Agent::entry(x0, x1, vshmem);
218
+ }
219
+ template <class Agent, class _0, class _1, class _2>
220
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
221
+ _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2)
222
+ {
223
+ extern __shared__ char shmem[];
224
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
225
+ Agent::entry(x0, x1, x2, vshmem);
226
+ }
227
+ template <class Agent, class _0, class _1, class _2, class _3>
228
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
229
+ _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3)
230
+ {
231
+ extern __shared__ char shmem[];
232
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
233
+ Agent::entry(x0, x1, x2, x3, vshmem);
234
+ }
235
+ template <class Agent, class _0, class _1, class _2, class _3, class _4>
236
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
237
+ _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4)
238
+ {
239
+ extern __shared__ char shmem[];
240
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
241
+ Agent::entry(x0, x1, x2, x3, x4, vshmem);
242
+ }
243
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5>
244
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
245
+ _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5)
246
+ {
247
+ extern __shared__ char shmem[];
248
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
249
+ Agent::entry(x0, x1, x2, x3, x4, x5, vshmem);
250
+ }
251
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6>
252
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
253
+ _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6)
254
+ {
255
+ extern __shared__ char shmem[];
256
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
257
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, vshmem);
258
+ }
259
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7>
260
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
261
+ _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7)
262
+ {
263
+ extern __shared__ char shmem[];
264
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
265
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, vshmem);
266
+ }
267
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8>
268
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
269
+ _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8)
270
+ {
271
+ extern __shared__ char shmem[];
272
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
273
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, vshmem);
274
+ }
275
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
276
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
277
+ _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9)
278
+ {
279
+ extern __shared__ char shmem[];
280
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
281
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, vshmem);
282
+ }
283
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA>
284
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
285
+ _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA)
286
+ {
287
+ extern __shared__ char shmem[];
288
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
289
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, vshmem);
290
+ }
291
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB>
292
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
293
+ _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB)
294
+ {
295
+ extern __shared__ char shmem[];
296
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
297
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, vshmem);
298
+ }
299
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC>
300
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
301
+ _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC)
302
+ {
303
+ extern __shared__ char shmem[];
304
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
305
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, vshmem);
306
+ }
307
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD>
308
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
309
+ _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD)
310
+ {
311
+ extern __shared__ char shmem[];
312
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
313
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, vshmem);
314
+ }
315
+ template <class Agent, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE>
316
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS)
317
+ _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE)
318
+ {
319
+ extern __shared__ char shmem[];
320
+ vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size<typename Agent::ptx_plan>::value;
321
+ Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE, vshmem);
322
+ }
323
+ #endif
324
+ #else
325
+ #if 0
326
+ template <class , class... Args >
327
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(Args... args) {}
328
+ template <class , class... Args >
329
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*, Args... args) {}
330
+ #else
331
+ template <class, class _0>
332
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0) {}
333
+ template <class, class _0, class _1>
334
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1) {}
335
+ template <class, class _0, class _1, class _2>
336
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1,_2) {}
337
+ template <class, class _0, class _1, class _2, class _3>
338
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1,_2,_3) {}
339
+ template <class, class _0, class _1, class _2, class _3, class _4>
340
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1,_2,_3, _4) {}
341
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5>
342
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1,_2,_3, _4, _5) {}
343
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6>
344
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1,_2,_3, _4, _5, _6) {}
345
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7>
346
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1,_2,_3, _4, _5, _6, _7) {}
347
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8>
348
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1,_2,_3, _4, _5, _6, _7, _8) {}
349
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
350
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9) {}
351
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA>
352
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA) {}
353
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB>
354
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB) {}
355
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC>
356
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB,_xC) {}
357
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD>
358
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB,_xC, _xD) {}
359
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE>
360
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB,_xC, _xD, _xE) {}
361
+ ////////////////////////////////////////////////////////////
362
+ template <class, class _0>
363
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0) {}
364
+ template <class, class _0, class _1>
365
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1) {}
366
+ template <class, class _0, class _1, class _2>
367
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1,_2) {}
368
+ template <class, class _0, class _1, class _2, class _3>
369
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1,_2,_3) {}
370
+ template <class, class _0, class _1, class _2, class _3, class _4>
371
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1,_2,_3, _4) {}
372
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5>
373
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1,_2,_3, _4, _5) {}
374
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6>
375
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1,_2,_3, _4, _5, _6) {}
376
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7>
377
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1,_2,_3, _4, _5, _6, _7) {}
378
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8>
379
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1,_2,_3, _4, _5, _6, _7, _8) {}
380
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
381
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9) {}
382
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA>
383
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA) {}
384
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB>
385
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB) {}
386
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC>
387
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC) {}
388
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD>
389
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC, _xD) {}
390
+ template <class, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE>
391
+ THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC, _xD, _xE) {}
392
+ #endif
393
+ #endif
394
+
395
+
396
+ template<class Agent>
397
+ struct AgentLauncher : Agent
398
+ {
399
+ core::AgentPlan plan;
400
+ size_t count;
401
+ cudaStream_t stream;
402
+ char const* name;
403
+ unsigned int grid;
404
+ char* vshmem;
405
+ bool has_shmem;
406
+ size_t shmem_size;
407
+
408
+ enum
409
+ {
410
+ MAX_SHMEM_PER_BLOCK = 48 * 1024,
411
+ };
412
+ typedef
413
+ typename has_enough_shmem<Agent,
414
+ MAX_SHMEM_PER_BLOCK>::type has_enough_shmem_t;
415
+ typedef
416
+ has_enough_shmem<Agent,
417
+ MAX_SHMEM_PER_BLOCK> shm1;
418
+
419
+ template <class Size>
420
+ THRUST_RUNTIME_FUNCTION
421
+ AgentLauncher(AgentPlan plan_,
422
+ Size count_,
423
+ cudaStream_t stream_,
424
+ char const* name_)
425
+ : plan(plan_),
426
+ count((size_t)count_),
427
+ stream(stream_),
428
+ name(name_),
429
+ grid(static_cast<unsigned int>((count + plan.items_per_tile - 1) / plan.items_per_tile)),
430
+ vshmem(NULL),
431
+ has_shmem((size_t)core::get_max_shared_memory_per_block() >= (size_t)plan.shared_memory_size),
432
+ shmem_size(has_shmem ? plan.shared_memory_size : 0)
433
+ {
434
+ assert(count > 0);
435
+ }
436
+
437
+ template <class Size>
438
+ THRUST_RUNTIME_FUNCTION
439
+ AgentLauncher(AgentPlan plan_,
440
+ Size count_,
441
+ cudaStream_t stream_,
442
+ char* vshmem,
443
+ char const* name_)
444
+ : plan(plan_),
445
+ count((size_t)count_),
446
+ stream(stream_),
447
+ name(name_),
448
+ grid(static_cast<unsigned int>((count + plan.items_per_tile - 1) / plan.items_per_tile)),
449
+ vshmem(vshmem),
450
+ has_shmem((size_t)core::get_max_shared_memory_per_block() >= (size_t)plan.shared_memory_size),
451
+ shmem_size(has_shmem ? plan.shared_memory_size : 0)
452
+ {
453
+ assert(count > 0);
454
+ }
455
+
456
+ THRUST_RUNTIME_FUNCTION
457
+ AgentLauncher(AgentPlan plan_,
458
+ cudaStream_t stream_,
459
+ char const* name_)
460
+ : plan(plan_),
461
+ count(0),
462
+ stream(stream_),
463
+ name(name_),
464
+ grid(plan.grid_size),
465
+ vshmem(NULL),
466
+ has_shmem((size_t)core::get_max_shared_memory_per_block() >= (size_t)plan.shared_memory_size),
467
+ shmem_size(has_shmem ? plan.shared_memory_size : 0)
468
+ {
469
+ assert(plan.grid_size > 0);
470
+ }
471
+
472
+ THRUST_RUNTIME_FUNCTION
473
+ AgentLauncher(AgentPlan plan_,
474
+ cudaStream_t stream_,
475
+ char* vshmem,
476
+ char const* name_)
477
+ : plan(plan_),
478
+ count(0),
479
+ stream(stream_),
480
+ name(name_),
481
+ grid(plan.grid_size),
482
+ vshmem(vshmem),
483
+ has_shmem((size_t)core::get_max_shared_memory_per_block() >= (size_t)plan.shared_memory_size),
484
+ shmem_size(has_shmem ? plan.shared_memory_size : 0)
485
+ {
486
+ assert(plan.grid_size > 0);
487
+ }
488
+
489
+ #if 0
490
+ THRUST_RUNTIME_FUNCTION
491
+ AgentPlan static get_plan(cudaStream_t s, void* d_ptr = 0)
492
+ {
493
+ // in separable compilation mode, we have no choice
494
+ // but to call kernel to get agent_plan
495
+ // otherwise the risk is something may fail
496
+ // if user mix & match ptx versions in a separably compiled function
497
+ // http://nvbugs/1772071
498
+ // XXX may be it is too string of a requirements, consider relaxing it in
499
+ // the future
500
+ #ifdef __CUDACC_RDC__
501
+ return core::get_agent_plan<Agent>(s, d_ptr);
502
+ #else
503
+ return get_agent_plan<Agent>(core::get_ptx_version());
504
+ #endif
505
+ }
506
+ THRUST_RUNTIME_FUNCTION
507
+ AgentPlan static get_plan_default()
508
+ {
509
+ return get_agent_plan<Agent>(sm_arch<0>::type::ver);
510
+ }
511
+ #endif
512
+
513
+ THRUST_RUNTIME_FUNCTION
514
+ typename core::get_plan<Agent>::type static get_plan(cudaStream_t , void* d_ptr = 0)
515
+ {
516
+ THRUST_UNUSED_VAR(d_ptr);
517
+ return get_agent_plan<Agent>(core::get_ptx_version());
518
+ }
519
+
520
+ THRUST_RUNTIME_FUNCTION
521
+ typename core::get_plan<Agent>::type static get_plan()
522
+ {
523
+ return get_agent_plan<Agent>(lowest_supported_sm_arch::ver);
524
+ }
525
+
526
+ THRUST_RUNTIME_FUNCTION void sync() const
527
+ {
528
+ CubDebug(cub::detail::DebugSyncStream(stream));
529
+ }
530
+
531
+ template<class K>
532
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
533
+ max_blocks_per_sm_impl(K k, int block_threads)
534
+ {
535
+ int occ;
536
+ cudaError_t status = cub::MaxSmOccupancy(occ, k, block_threads);
537
+ return cuda_optional<int>(status == cudaSuccess ? occ : -1, status);
538
+ }
539
+
540
+ template <class K>
541
+ cuda_optional<int> THRUST_RUNTIME_FUNCTION
542
+ max_sm_occupancy(K k) const
543
+ {
544
+ return max_blocks_per_sm_impl(k, plan.block_threads);
545
+ }
546
+
547
+ template<class K>
548
+ THRUST_RUNTIME_FUNCTION
549
+ void print_info(K k) const
550
+ {
551
+ #if THRUST_DEBUG_SYNC_FLAG
552
+ cuda_optional<int> occ = max_sm_occupancy(k);
553
+ const int ptx_version = core::get_ptx_version();
554
+ if (count > 0)
555
+ {
556
+ _CubLog("Invoking %s<<<%u, %d, %d, %lld>>>(), %llu items total, %d items per thread, %d SM occupancy, %d vshmem size, %d ptx_version \n",
557
+ name,
558
+ grid,
559
+ plan.block_threads,
560
+ (has_shmem ? (int)plan.shared_memory_size : 0),
561
+ (long long)stream,
562
+ (long long)count,
563
+ plan.items_per_thread,
564
+ (int)occ,
565
+ (!has_shmem ? (int)plan.shared_memory_size : 0),
566
+ (int)ptx_version);
567
+ }
568
+ else
569
+ {
570
+ _CubLog("Invoking %s<<<%u, %d, %d, %lld>>>(), %d items per thread, %d SM occupancy, %d vshmem size, %d ptx_version\n",
571
+ name,
572
+ grid,
573
+ plan.block_threads,
574
+ (has_shmem ? (int)plan.shared_memory_size : 0),
575
+ (long long)stream,
576
+ plan.items_per_thread,
577
+ (int)occ,
578
+ (!has_shmem ? (int)plan.shared_memory_size : 0),
579
+ (int)ptx_version);
580
+ }
581
+ #else
582
+ (void)k;
583
+ #endif
584
+ }
585
+
586
+ ////////////////////
587
+ // Variadic code
588
+ ////////////////////
589
+
590
+ #if 0
591
+ template<class... Args>
592
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
593
+ get_max_blocks_per_sm(AgentPlan plan)
594
+ {
595
+ return max_blocks_per_sm_impl(_kernel_agent<Agent, Args...>, plan.block_threads);
596
+ }
597
+ #else
598
+ template<class _0>
599
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
600
+ get_max_blocks_per_sm(AgentPlan plan)
601
+ {
602
+ void (*ptr)(_0) = _kernel_agent<Agent, _0>;
603
+ return max_blocks_per_sm_impl(ptr, plan.block_threads);
604
+ }
605
+ template<class _0, class _1>
606
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
607
+ get_max_blocks_per_sm(AgentPlan plan)
608
+ {
609
+ void (*ptr)(_0, _1) = _kernel_agent<Agent, _0, _1>;
610
+ return max_blocks_per_sm_impl(ptr, plan.block_threads);
611
+ }
612
+ template<class _0, class _1, class _2>
613
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
614
+ get_max_blocks_per_sm(AgentPlan plan)
615
+ {
616
+ void (*ptr)(_0,_1,_2) = _kernel_agent<Agent, _0, _1, _2>;
617
+ return max_blocks_per_sm_impl(ptr, plan.block_threads);
618
+ }
619
+ template<class _0, class _1, class _2, class _3>
620
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
621
+ get_max_blocks_per_sm(AgentPlan plan)
622
+ {
623
+ void (*ptr)(_0,_1,_2,_3) = _kernel_agent<Agent, _0, _1, _2,_3>;
624
+ return max_blocks_per_sm_impl(ptr, plan.block_threads);
625
+ }
626
+ template<class _0, class _1, class _2, class _3, class _4>
627
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
628
+ get_max_blocks_per_sm(AgentPlan plan)
629
+ {
630
+ void (*ptr)(_0,_1,_2,_3,_4) = _kernel_agent<Agent, _0, _1, _2,_3,_4>;
631
+ return max_blocks_per_sm_impl(ptr, plan.block_threads);
632
+ }
633
+ template<class _0, class _1, class _2, class _3, class _4, class _5>
634
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
635
+ get_max_blocks_per_sm(AgentPlan plan)
636
+ {
637
+ void (*ptr)(_0,_1,_2,_3,_4,_5) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5>;
638
+ return max_blocks_per_sm_impl(ptr, plan.block_threads);
639
+ }
640
+ template<class _0, class _1, class _2, class _3, class _4, class _5, class _6>
641
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
642
+ get_max_blocks_per_sm(AgentPlan plan)
643
+ {
644
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6>;
645
+ return max_blocks_per_sm_impl(ptr, plan.block_threads);
646
+ }
647
+ template<class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7>
648
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
649
+ get_max_blocks_per_sm(AgentPlan plan)
650
+ {
651
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7>;
652
+ return max_blocks_per_sm_impl(ptr, plan.block_threads);
653
+ }
654
+ template<class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8>
655
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
656
+ get_max_blocks_per_sm(AgentPlan plan)
657
+ {
658
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7,_8>;
659
+ return max_blocks_per_sm_impl(ptr, plan.block_threads);
660
+ }
661
+ template<class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
662
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
663
+ get_max_blocks_per_sm(AgentPlan plan)
664
+ {
665
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7,_8,_9>;
666
+ return max_blocks_per_sm_impl(ptr, plan.block_threads);
667
+ }
668
+ template<class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA>
669
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
670
+ get_max_blocks_per_sm(AgentPlan plan)
671
+ {
672
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7,_8,_9,_xA>;
673
+ return max_blocks_per_sm_impl(ptr, plan.block_threads);
674
+ }
675
+ template<class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB>
676
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
677
+ get_max_blocks_per_sm(AgentPlan plan)
678
+ {
679
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB>;
680
+ return max_blocks_per_sm_impl(ptr, plan.block_threads);
681
+ }
682
+ template<class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC>
683
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
684
+ get_max_blocks_per_sm(AgentPlan plan)
685
+ {
686
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC>;
687
+ return max_blocks_per_sm_impl(ptr, plan.block_threads);
688
+ }
689
+ template<class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD>
690
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
691
+ get_max_blocks_per_sm(AgentPlan plan)
692
+ {
693
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD>;
694
+ return max_blocks_per_sm_impl(ptr, plan.block_threads);
695
+ }
696
+ template<class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE>
697
+ static cuda_optional<int> THRUST_RUNTIME_FUNCTION
698
+ get_max_blocks_per_sm(AgentPlan plan)
699
+ {
700
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD,_xE) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD,_xE>;
701
+ return max_blocks_per_sm_impl(ptr, plan.block_threads);
702
+ }
703
+ #endif
704
+
705
+
706
+
707
+ #if 0
708
+
709
+ // If we are guaranteed to have enough shared memory
710
+ // don't compile other kernel which accepts pointer
711
+ // and save on compilations
712
+ template <class... Args>
713
+ void THRUST_RUNTIME_FUNCTION
714
+ launch_impl(thrust::detail::true_type, Args... args) const
715
+ {
716
+ assert(has_shmem && vshmem == NULL);
717
+ print_info(_kernel_agent<Agent, Args...>);
718
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
719
+ .doit(_kernel_agent<Agent, Args...>, args...);
720
+ }
721
+
722
+ // If there is a risk of not having enough shared memory
723
+ // we compile generic kernel instead.
724
+ // This kernel is likely to be somewhat slower, but it can accomodate
725
+ // both shared and virtualized shared memories.
726
+ // Alternative option is to compile two kernels, one using shared and one
727
+ // using virtualized shared memory. While this can be slightly faster if we
728
+ // do actually have enough shared memory, the compilation time will double.
729
+ //
730
+ template <class... Args>
731
+ void THRUST_RUNTIME_FUNCTION
732
+ launch_impl(thrust::detail::false_type, Args... args) const
733
+ {
734
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
735
+ print_info(_kernel_agent_vshmem<Agent, Args...>);
736
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
737
+ .doit(_kernel_agent_vshmem<Agent, Args...>, vshmem, args...);
738
+ }
739
+
740
+ template <class... Args>
741
+ void THRUST_RUNTIME_FUNCTION
742
+ launch(Args... args) const
743
+ {
744
+ launch_impl(has_enough_shmem_t(),args...);
745
+ sync();
746
+ }
747
+ #else
748
+ template <class _0>
749
+ void THRUST_RUNTIME_FUNCTION
750
+ launch_impl(thrust::detail::false_type, _0 x0) const
751
+ {
752
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
753
+ void (*ptr)(char*, _0) = _kernel_agent_vshmem<Agent, _0>;
754
+ print_info(ptr);
755
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
756
+ .doit(ptr, vshmem, x0);
757
+ }
758
+ template <class _0, class _1>
759
+ void THRUST_RUNTIME_FUNCTION
760
+ launch_impl(thrust::detail::false_type, _0 x0, _1 x1) const
761
+ {
762
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
763
+ void (*ptr)(char*, _0, _1) = _kernel_agent_vshmem<Agent, _0, _1>;
764
+ print_info(ptr);
765
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
766
+ .doit(ptr, vshmem, x0, x1);
767
+ }
768
+ template <class _0, class _1, class _2>
769
+ void THRUST_RUNTIME_FUNCTION
770
+ launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2) const
771
+ {
772
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
773
+ void (*ptr)(char*, _0, _1, _2) = _kernel_agent_vshmem<Agent, _0, _1, _2>;
774
+ print_info(ptr);
775
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
776
+ .doit(ptr, vshmem, x0, x1, x2);
777
+ }
778
+ template <class _0, class _1, class _2, class _3>
779
+ void THRUST_RUNTIME_FUNCTION
780
+ launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3) const
781
+ {
782
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
783
+ void (*ptr)(char*, _0, _1, _2, _3) = _kernel_agent_vshmem<Agent, _0, _1, _2, _3>;
784
+ print_info(ptr);
785
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
786
+ .doit(ptr, vshmem, x0, x1, x2, x3);
787
+ }
788
+ template <class _0, class _1, class _2, class _3, class _4>
789
+ void THRUST_RUNTIME_FUNCTION
790
+ launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4) const
791
+ {
792
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
793
+ void (*ptr)(char*, _0, _1, _2, _3, _4) = _kernel_agent_vshmem<Agent, _0, _1, _2, _3, _4>;
794
+ print_info(ptr);
795
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
796
+ .doit(ptr, vshmem, x0, x1, x2, x3, x4);
797
+ }
798
+ template <class _0, class _1, class _2, class _3, class _4, class _5>
799
+ void THRUST_RUNTIME_FUNCTION
800
+ launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) const
801
+ {
802
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
803
+ void (*ptr)(char*, _0, _1, _2, _3, _4, _5) = _kernel_agent_vshmem<Agent, _0, _1, _2, _3, _4, _5>;
804
+ print_info(ptr);
805
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
806
+ .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5);
807
+ }
808
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6>
809
+ void THRUST_RUNTIME_FUNCTION
810
+ launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) const
811
+ {
812
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
813
+ void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6) = _kernel_agent_vshmem<Agent, _0, _1, _2, _3, _4, _5, _6>;
814
+ print_info(ptr);
815
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
816
+ .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6);
817
+ }
818
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7>
819
+ void THRUST_RUNTIME_FUNCTION
820
+ launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) const
821
+ {
822
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
823
+ void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7) = _kernel_agent_vshmem<Agent, _0, _1, _2, _3, _4, _5, _6, _7>;
824
+ print_info(ptr);
825
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
826
+ .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7);
827
+ }
828
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8>
829
+ void THRUST_RUNTIME_FUNCTION
830
+ launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) const
831
+ {
832
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
833
+ void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8) = _kernel_agent_vshmem<Agent, _0, _1, _2, _3, _4, _5, _6, _7, _8>;
834
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
835
+ .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8);
836
+ }
837
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
838
+ void THRUST_RUNTIME_FUNCTION
839
+ launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) const
840
+ {
841
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
842
+ void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9) = _kernel_agent_vshmem<Agent, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9>;
843
+ print_info(ptr);
844
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
845
+ .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9);
846
+ }
847
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA>
848
+ void THRUST_RUNTIME_FUNCTION
849
+ launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9,_xA xA) const
850
+ {
851
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
852
+ void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA) = _kernel_agent_vshmem<Agent, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA>;
853
+ print_info(ptr);
854
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
855
+ .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA);
856
+ }
857
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB>
858
+ void THRUST_RUNTIME_FUNCTION
859
+ launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9,_xA xA,_xB xB) const
860
+ {
861
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
862
+ void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB) = _kernel_agent_vshmem<Agent, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB>;
863
+ print_info(ptr);
864
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
865
+ .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB);
866
+ }
867
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC>
868
+ void THRUST_RUNTIME_FUNCTION
869
+ launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9,_xA xA,_xB xB,_xC xC) const
870
+ {
871
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
872
+ void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC) = _kernel_agent_vshmem<Agent, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC>;
873
+ print_info(ptr);
874
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
875
+ .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC);
876
+ }
877
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD>
878
+ void THRUST_RUNTIME_FUNCTION
879
+ launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9,_xA xA,_xB xB,_xC xC,_xD xD) const
880
+ {
881
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
882
+ void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC, _xD) = _kernel_agent_vshmem<Agent, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC, _xD>;
883
+ print_info(ptr);
884
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
885
+ .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD);
886
+ }
887
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE>
888
+ void THRUST_RUNTIME_FUNCTION
889
+ launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9,_xA xA,_xB xB,_xC xC,_xD xD,_xE xE) const
890
+ {
891
+ assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0));
892
+ void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC, _xD, _xE) = _kernel_agent_vshmem<Agent, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC, _xD, _xE>;
893
+ print_info(ptr);
894
+ launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream)
895
+ .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE);
896
+ }
897
+
898
+ ////////////////////////////////////////////////////////
899
+ ////////////////////////////////////////////////////////
900
+ ////////////////////////////////////////////////////////
901
+
902
+ template <class _0>
903
+ void THRUST_RUNTIME_FUNCTION
904
+ launch_impl(thrust::detail::true_type, _0 x0) const
905
+ {
906
+ assert(has_shmem && vshmem == NULL);
907
+ void (*ptr)(_0) = _kernel_agent<Agent, _0>;
908
+ print_info(ptr);
909
+ launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream)
910
+ .doit(ptr, x0);
911
+ }
912
+ template <class _0, class _1>
913
+ void THRUST_RUNTIME_FUNCTION
914
+ launch_impl(thrust::detail::true_type, _0 x0, _1 x1) const
915
+ {
916
+ assert(has_shmem && vshmem == NULL);
917
+ void (*ptr)(_0, _1) = _kernel_agent<Agent, _0, _1>;
918
+ print_info(ptr);
919
+ launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream)
920
+ .doit(ptr, x0, x1);
921
+ }
922
+ template <class _0, class _1, class _2>
923
+ void THRUST_RUNTIME_FUNCTION
924
+ launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2) const
925
+ {
926
+ assert(has_shmem && vshmem == NULL);
927
+ void (*ptr)(_0,_1,_2) = _kernel_agent<Agent, _0, _1, _2>;
928
+ print_info(ptr);
929
+ launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream)
930
+ .doit(ptr, x0, x1, x2);
931
+ }
932
+ template <class _0, class _1, class _2, class _3>
933
+ void THRUST_RUNTIME_FUNCTION
934
+ launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3) const
935
+ {
936
+ assert(has_shmem && vshmem == NULL);
937
+ void (*ptr)(_0,_1,_2,_3) = _kernel_agent<Agent, _0, _1, _2,_3>;
938
+ print_info(ptr);
939
+ launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream)
940
+ .doit(ptr, x0, x1, x2, x3);
941
+ }
942
+ template <class _0, class _1, class _2, class _3, class _4>
943
+ void THRUST_RUNTIME_FUNCTION
944
+ launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4) const
945
+ {
946
+ assert(has_shmem && vshmem == NULL);
947
+ void (*ptr)(_0,_1,_2,_3,_4) = _kernel_agent<Agent, _0, _1, _2,_3,_4>;
948
+ print_info(ptr);
949
+ launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream)
950
+ .doit(ptr, x0, x1, x2, x3, x4);
951
+ }
952
+ template <class _0, class _1, class _2, class _3, class _4, class _5>
953
+ void THRUST_RUNTIME_FUNCTION
954
+ launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) const
955
+ {
956
+ assert(has_shmem && vshmem == NULL);
957
+ void (*ptr)(_0,_1,_2,_3,_4,_5) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5>;
958
+ print_info(ptr);
959
+ launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream)
960
+ .doit(ptr, x0, x1, x2, x3, x4, x5);
961
+ }
962
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6>
963
+ void THRUST_RUNTIME_FUNCTION
964
+ launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) const
965
+ {
966
+ assert(has_shmem && vshmem == NULL);
967
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6>;
968
+ print_info(ptr);
969
+ launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream)
970
+ .doit(ptr, x0, x1, x2, x3, x4, x5, x6);
971
+ }
972
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7>
973
+ void THRUST_RUNTIME_FUNCTION
974
+ launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) const
975
+ {
976
+ assert(has_shmem && vshmem == NULL);
977
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7>;
978
+ print_info(ptr);
979
+ launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream)
980
+ .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7);
981
+ }
982
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8>
983
+ void THRUST_RUNTIME_FUNCTION
984
+ launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) const
985
+ {
986
+ assert(has_shmem && vshmem == NULL);
987
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7,_8>;
988
+ print_info(ptr);
989
+ launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream)
990
+ .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8);
991
+ }
992
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
993
+ void THRUST_RUNTIME_FUNCTION
994
+ launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) const
995
+ {
996
+ assert(has_shmem && vshmem == NULL);
997
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7,_8,_9>;
998
+ print_info(ptr);
999
+ launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream)
1000
+ .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9);
1001
+ }
1002
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA>
1003
+ void THRUST_RUNTIME_FUNCTION
1004
+ launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA) const
1005
+ {
1006
+ assert(has_shmem && vshmem == NULL);
1007
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7,_8,_9,_xA>;
1008
+ print_info(ptr);
1009
+ launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream)
1010
+ .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA);
1011
+ }
1012
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB>
1013
+ void THRUST_RUNTIME_FUNCTION
1014
+ launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB) const
1015
+ {
1016
+ assert(has_shmem && vshmem == NULL);
1017
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB>;
1018
+ print_info(ptr);
1019
+ launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream)
1020
+ .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB);
1021
+ }
1022
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC>
1023
+ void THRUST_RUNTIME_FUNCTION
1024
+ launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC) const
1025
+ {
1026
+ assert(has_shmem && vshmem == NULL);
1027
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC>;
1028
+ print_info(ptr);
1029
+ launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream)
1030
+ .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC);
1031
+ }
1032
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD>
1033
+ void THRUST_RUNTIME_FUNCTION
1034
+ launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD) const
1035
+ {
1036
+ assert(has_shmem && vshmem == NULL);
1037
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD>;
1038
+ print_info(ptr);
1039
+ launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream)
1040
+ .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD);
1041
+ }
1042
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE>
1043
+ void THRUST_RUNTIME_FUNCTION
1044
+ launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE) const
1045
+ {
1046
+ assert(has_shmem && vshmem == NULL);
1047
+ void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD,_xE) = _kernel_agent<Agent, _0, _1, _2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD,_xE>;
1048
+ print_info(ptr);
1049
+ launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream)
1050
+ .doit(ptr,x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE);
1051
+ }
1052
+
1053
+ ////////////////////////////////////////////////////////
1054
+ ////////////////////////////////////////////////////////
1055
+ ////////////////////////////////////////////////////////
1056
+
1057
+ template <class _0>
1058
+ void THRUST_RUNTIME_FUNCTION
1059
+ launch(_0 x0) const
1060
+ {
1061
+ launch_impl(has_enough_shmem_t(), x0);
1062
+ sync();
1063
+ }
1064
+ template <class _0, class _1>
1065
+ void THRUST_RUNTIME_FUNCTION
1066
+ launch(_0 x0, _1 x1) const
1067
+ {
1068
+ launch_impl(has_enough_shmem_t(), x0, x1);
1069
+ sync();
1070
+ }
1071
+ template <class _0, class _1, class _2>
1072
+ void THRUST_RUNTIME_FUNCTION
1073
+ launch(_0 x0, _1 x1, _2 x2) const
1074
+ {
1075
+ launch_impl(has_enough_shmem_t(), x0, x1, x2);
1076
+ sync();
1077
+ }
1078
+ template <class _0, class _1, class _2, class _3>
1079
+ void THRUST_RUNTIME_FUNCTION
1080
+ launch(_0 x0, _1 x1, _2 x2, _3 x3) const
1081
+ {
1082
+ launch_impl(has_enough_shmem_t(), x0, x1, x2, x3);
1083
+ sync();
1084
+ }
1085
+ template <class _0, class _1, class _2, class _3, class _4>
1086
+ void THRUST_RUNTIME_FUNCTION
1087
+ launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4) const
1088
+ {
1089
+ launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4);
1090
+ sync();
1091
+ }
1092
+ template <class _0, class _1, class _2, class _3, class _4, class _5>
1093
+ void THRUST_RUNTIME_FUNCTION
1094
+ launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) const
1095
+ {
1096
+ launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5);
1097
+ sync();
1098
+ }
1099
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6>
1100
+ void THRUST_RUNTIME_FUNCTION
1101
+ launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) const
1102
+ {
1103
+ launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6);
1104
+ sync();
1105
+ }
1106
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7>
1107
+ void THRUST_RUNTIME_FUNCTION
1108
+ launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) const
1109
+ {
1110
+ launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7);
1111
+ sync();
1112
+ }
1113
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8>
1114
+ void THRUST_RUNTIME_FUNCTION
1115
+ launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) const
1116
+ {
1117
+ launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8);
1118
+ sync();
1119
+ }
1120
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
1121
+ void THRUST_RUNTIME_FUNCTION
1122
+ launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) const
1123
+ {
1124
+ launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9);
1125
+ sync();
1126
+ }
1127
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA>
1128
+ void THRUST_RUNTIME_FUNCTION
1129
+ launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA) const
1130
+ {
1131
+ launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA);
1132
+ sync();
1133
+ }
1134
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB>
1135
+ void THRUST_RUNTIME_FUNCTION
1136
+ launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB) const
1137
+ {
1138
+ launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB);
1139
+ sync();
1140
+ }
1141
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC>
1142
+ void THRUST_RUNTIME_FUNCTION
1143
+ launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC) const
1144
+ {
1145
+ launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC);
1146
+ sync();
1147
+ }
1148
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD>
1149
+ void THRUST_RUNTIME_FUNCTION
1150
+ launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD) const
1151
+ {
1152
+ launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD);
1153
+ sync();
1154
+ }
1155
+ template <class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9, class _xA, class _xB, class _xC, class _xD, class _xE>
1156
+ void THRUST_RUNTIME_FUNCTION
1157
+ launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE) const
1158
+ {
1159
+ launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE);
1160
+ sync();
1161
+ }
1162
+ #endif
1163
+
1164
+
1165
+ };
1166
+
1167
+ } // namespace core
1168
+ } // namespace cuda_cub
1169
+
1170
+ THRUST_NAMESPACE_END
1171
+
1172
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/core/alignment.h ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ // TODO: This can probably be removed.
18
+
19
+ #pragma once
20
+
21
+ #include <thrust/detail/config.h>
22
+
23
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
24
+ # pragma GCC system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
26
+ # pragma clang system_header
27
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
28
+ # pragma system_header
29
+ #endif // no system header
30
+
31
+ #include <thrust/system/cuda/detail/util.h>
32
+
33
+ THRUST_NAMESPACE_BEGIN
34
+ namespace cuda_cub {
35
+ namespace alignment_of_detail {
36
+
37
+
38
+ template <typename T>
39
+ class alignment_of_impl;
40
+
41
+ template <typename T, std::size_t size_diff>
42
+ struct helper
43
+ {
44
+ static const std::size_t value = size_diff;
45
+ };
46
+
47
+ template <typename T>
48
+ class helper<T, 0>
49
+ {
50
+ public:
51
+ static const std::size_t value = alignment_of_impl<T>::value;
52
+ };
53
+
54
+ template <typename T>
55
+ class alignment_of_impl
56
+ {
57
+ private:
58
+ struct big
59
+ {
60
+ T x;
61
+ char c;
62
+ };
63
+
64
+ public:
65
+ static const std::size_t value = helper<big, sizeof(big) - sizeof(T)>::value;
66
+ };
67
+
68
+
69
+ } // end alignment_of_detail
70
+
71
+
72
+ template <typename T>
73
+ struct alignment_of
74
+ : alignment_of_detail::alignment_of_impl<T>
75
+ {
76
+ };
77
+
78
+
79
+ template <std::size_t Align>
80
+ struct aligned_type;
81
+
82
+ // __align__ is CUDA-specific, so guard it
83
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
84
+
85
+ // implementing aligned_type portably is tricky:
86
+
87
+ #if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC
88
+ // implement aligned_type with specialization because MSVC
89
+ // requires literals as arguments to declspec(align(n))
90
+ template <>
91
+ struct aligned_type<1>
92
+ {
93
+ struct __align__(1) type{};
94
+ };
95
+
96
+ template <>
97
+ struct aligned_type<2>
98
+ {
99
+ struct __align__(2) type{};
100
+ };
101
+
102
+ template <>
103
+ struct aligned_type<4>
104
+ {
105
+ struct __align__(4) type{};
106
+ };
107
+
108
+ template <>
109
+ struct aligned_type<8>
110
+ {
111
+ struct __align__(8) type{};
112
+ };
113
+
114
+ template <>
115
+ struct aligned_type<16>
116
+ {
117
+ struct __align__(16) type{};
118
+ };
119
+
120
+ template <>
121
+ struct aligned_type<32>
122
+ {
123
+ struct __align__(32) type{};
124
+ };
125
+
126
+ template <>
127
+ struct aligned_type<64>
128
+ {
129
+ struct __align__(64) type{};
130
+ };
131
+
132
+ template <>
133
+ struct aligned_type<128>
134
+ {
135
+ struct __align__(128) type{};
136
+ };
137
+
138
+ template <>
139
+ struct aligned_type<256>
140
+ {
141
+ struct __align__(256) type{};
142
+ };
143
+
144
+ template <>
145
+ struct aligned_type<512>
146
+ {
147
+ struct __align__(512) type{};
148
+ };
149
+
150
+ template <>
151
+ struct aligned_type<1024>
152
+ {
153
+ struct __align__(1024) type{};
154
+ };
155
+
156
+ template <>
157
+ struct aligned_type<2048>
158
+ {
159
+ struct __align__(2048) type{};
160
+ };
161
+
162
+ template <>
163
+ struct aligned_type<4096>
164
+ {
165
+ struct __align__(4096) type{};
166
+ };
167
+
168
+ template <>
169
+ struct aligned_type<8192>
170
+ {
171
+ struct __align__(8192) type{};
172
+ };
173
+ #elif (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) && (THRUST_GCC_VERSION < 40300)
174
+ // implement aligned_type with specialization because gcc 4.2
175
+ // requires literals as arguments to __attribute__(aligned(n))
176
+ template <>
177
+ struct aligned_type<1>
178
+ {
179
+ struct __align__(1) type{};
180
+ };
181
+
182
+ template <>
183
+ struct aligned_type<2>
184
+ {
185
+ struct __align__(2) type{};
186
+ };
187
+
188
+ template <>
189
+ struct aligned_type<4>
190
+ {
191
+ struct __align__(4) type{};
192
+ };
193
+
194
+ template <>
195
+ struct aligned_type<8>
196
+ {
197
+ struct __align__(8) type{};
198
+ };
199
+
200
+ template <>
201
+ struct aligned_type<16>
202
+ {
203
+ struct __align__(16) type{};
204
+ };
205
+
206
+ template <>
207
+ struct aligned_type<32>
208
+ {
209
+ struct __align__(32) type{};
210
+ };
211
+
212
+ template <>
213
+ struct aligned_type<64>
214
+ {
215
+ struct __align__(64) type{};
216
+ };
217
+
218
+ template <>
219
+ struct aligned_type<128>
220
+ {
221
+ struct __align__(128) type{};
222
+ };
223
+
224
+ #else
225
+ // assume the compiler allows template parameters as
226
+ // arguments to __align__
227
+ template <std::size_t Align>
228
+ struct aligned_type
229
+ {
230
+ struct __align__(Align) type{};
231
+ };
232
+ #endif // THRUST_HOST_COMPILER
233
+ #else
234
+ template <std::size_t Align>
235
+ struct aligned_type
236
+ {
237
+ struct type
238
+ {
239
+ };
240
+ };
241
+ #endif // THRUST_DEVICE_COMPILER
242
+
243
+
244
+ template <std::size_t Len, std::size_t Align>
245
+ struct aligned_storage
246
+ {
247
+ union type
248
+ {
249
+ unsigned char data[Len];
250
+
251
+ typename aligned_type<Align>::type align;
252
+ };
253
+ };
254
+
255
+
256
+ } // end cuda_
257
+
258
+ THRUST_NAMESPACE_END
miniCUDA124/include/thrust/system/cuda/detail/core/triple_chevron_launch.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+ #include <thrust/system/cuda/detail/core/alignment.h>
39
+ #include <thrust/system/cuda/detail/guarded_cuda_runtime_api.h>
40
+
41
+ #include <cassert>
42
+
43
+ THRUST_NAMESPACE_BEGIN
44
+
45
+ namespace cuda_cub {
46
+ namespace launcher {
47
+
48
+ struct _CCCL_ATTRIBUTE_HIDDEN triple_chevron
49
+ {
50
+ typedef size_t Size;
51
+ dim3 const grid;
52
+ dim3 const block;
53
+ Size const shared_mem;
54
+ cudaStream_t const stream;
55
+
56
+ THRUST_RUNTIME_FUNCTION
57
+ triple_chevron(dim3 grid_,
58
+ dim3 block_,
59
+ Size shared_mem_ = 0,
60
+ cudaStream_t stream_ = 0)
61
+ : grid(grid_),
62
+ block(block_),
63
+ shared_mem(shared_mem_),
64
+ stream(stream_) {}
65
+
66
+ template<class K, class... Args>
67
+ cudaError_t __host__
68
+ doit_host(K k, Args const&... args) const
69
+ {
70
+ k<<<grid, block, shared_mem, stream>>>(args...);
71
+ return cudaPeekAtLastError();
72
+ }
73
+
74
+ template<class T>
75
+ size_t __device__
76
+ align_up(size_t offset) const
77
+ {
78
+ size_t alignment = alignment_of<T>::value;
79
+ return alignment * ((offset + (alignment - 1))/ alignment);
80
+ }
81
+
82
+ size_t __device__ argument_pack_size(size_t size) const { return size; }
83
+ template <class Arg, class... Args>
84
+ size_t __device__
85
+ argument_pack_size(size_t size, Arg const& arg, Args const&... args) const
86
+ {
87
+ size = align_up<Arg>(size);
88
+ return argument_pack_size(size + sizeof(Arg), args...);
89
+ }
90
+
91
+ template <class Arg>
92
+ size_t __device__ copy_arg(char* buffer, size_t offset, Arg arg) const
93
+ {
94
+ offset = align_up<Arg>(offset);
95
+ for (int i = 0; i != sizeof(Arg); ++i)
96
+ buffer[offset+i] = *((char*)&arg + i);
97
+ return offset + sizeof(Arg);
98
+ }
99
+
100
+ __device__
101
+ void fill_arguments(char*, size_t) const
102
+ {}
103
+
104
+ template<class Arg, class... Args>
105
+ __device__
106
+ void fill_arguments(char* buffer,
107
+ size_t offset,
108
+ Arg const& arg,
109
+ Args const& ... args) const
110
+ {
111
+ fill_arguments(buffer, copy_arg(buffer, offset, arg), args...);
112
+ }
113
+
114
+ #ifdef THRUST_RDC_ENABLED
115
+ template<class K, class... Args>
116
+ cudaError_t __device__
117
+ doit_device(K k, Args const&... args) const
118
+ {
119
+ const size_t size = argument_pack_size(0,args...);
120
+ void *param_buffer = cudaGetParameterBuffer(64,size);
121
+ fill_arguments((char*)param_buffer, 0, args...);
122
+ return launch_device(k, param_buffer);
123
+ }
124
+
125
+ template <class K>
126
+ cudaError_t __device__
127
+ launch_device(K k, void* buffer) const
128
+ {
129
+ return cudaLaunchDevice((void*)k,
130
+ buffer,
131
+ dim3(grid),
132
+ dim3(block),
133
+ shared_mem,
134
+ stream);
135
+ }
136
+ #else
137
+ template<class K, class... Args>
138
+ cudaError_t __device__
139
+ doit_device(K, Args const&... ) const
140
+ {
141
+ return cudaErrorNotSupported;
142
+ }
143
+ #endif
144
+
145
+ __thrust_exec_check_disable__
146
+ template <class K, class... Args>
147
+ THRUST_FUNCTION
148
+ cudaError_t doit(K k, Args const&... args) const
149
+ {
150
+ NV_IF_TARGET(NV_IS_HOST,
151
+ (return doit_host(k, args...);),
152
+ (return doit_device(k, args...);));
153
+ }
154
+
155
+ }; // struct triple_chevron
156
+
157
+ } // namespace launcher
158
+ } // namespace cuda_
159
+
160
+ THRUST_NAMESPACE_END
miniCUDA124/include/thrust/system/cuda/detail/core/util.h ADDED
@@ -0,0 +1,811 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+ #include <thrust/detail/raw_pointer_cast.h>
39
+ #include <thrust/system/cuda/config.h>
40
+ #include <thrust/system/cuda/detail/util.h>
41
+ #include <thrust/system/system_error.h>
42
+ #include <thrust/type_traits/is_contiguous_iterator.h>
43
+
44
+ #include <cub/block/block_load.cuh>
45
+ #include <cub/block/block_scan.cuh>
46
+ #include <cub/block/block_store.cuh>
47
+ #include <cub/util_temporary_storage.cuh>
48
+
49
+ #include <nv/target>
50
+
51
+ THRUST_NAMESPACE_BEGIN
52
+
53
+ namespace cuda_cub {
54
+ namespace core {
55
+
56
+ #ifdef _NVHPC_CUDA
57
+ # if (__NVCOMPILER_CUDA_ARCH__ >= 600)
58
+ # define THRUST_TUNING_ARCH sm60
59
+ # elif (__NVCOMPILER_CUDA_ARCH__ >= 520)
60
+ # define THRUST_TUNING_ARCH sm52
61
+ # elif (__NVCOMPILER_CUDA_ARCH__ >= 350)
62
+ # define THRUST_TUNING_ARCH sm35
63
+ # else
64
+ # define THRUST_TUNING_ARCH sm30
65
+ # endif
66
+ #else
67
+ # if (__CUDA_ARCH__ >= 600)
68
+ # define THRUST_TUNING_ARCH sm60
69
+ # elif (__CUDA_ARCH__ >= 520)
70
+ # define THRUST_TUNING_ARCH sm52
71
+ # elif (__CUDA_ARCH__ >= 350)
72
+ # define THRUST_TUNING_ARCH sm35
73
+ # elif (__CUDA_ARCH__ >= 300)
74
+ # define THRUST_TUNING_ARCH sm30
75
+ # elif !defined (__CUDA_ARCH__)
76
+ # define THRUST_TUNING_ARCH sm30
77
+ # endif
78
+ #endif
79
+
80
+ // Typelist - a container of types, supports up to 10 types
81
+ // --------------------------------------------------------------------------
82
+
83
+ class _;
84
+ template <class = _, class = _, class = _, class = _, class = _, class = _, class = _, class = _, class = _, class = _>
85
+ struct typelist;
86
+
87
+ // -------------------------------------
88
+
89
+ // supported SM arch
90
+ // ---------------------
91
+ struct sm30 { enum { ver = 300, warpSize = 32 }; };
92
+ struct sm35 { enum { ver = 350, warpSize = 32 }; };
93
+ struct sm52 { enum { ver = 520, warpSize = 32 }; };
94
+ struct sm60 { enum { ver = 600, warpSize = 32 }; };
95
+
96
+ // list of sm, checked from left to right order
97
+ // the rightmost is the lowest sm arch supported
98
+ // --------------------------------------------
99
+ typedef typelist<sm60,sm52,sm35,sm30> sm_list;
100
+
101
+ // lowest supported SM arch
102
+ // --------------------------------------------------------------------------
103
+
104
+ template<class, class>
105
+ struct lowest_supported_sm_arch_impl;
106
+
107
+ template <class SM, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
108
+ struct lowest_supported_sm_arch_impl<SM, typelist<_0, _1, _2, _3, _4, _5, _6, _7, _8, _9> >
109
+ : lowest_supported_sm_arch_impl<_0, typelist< _1, _2, _3, _4, _5, _6, _7, _8, _9> > {};
110
+ template <class SM>
111
+ struct lowest_supported_sm_arch_impl<SM, typelist<> >
112
+ {
113
+ typedef SM type;
114
+ };
115
+
116
+ typedef typename lowest_supported_sm_arch_impl<_,sm_list>::type lowest_supported_sm_arch;
117
+
118
+ // metafunction to match next viable PtxPlan specialization
119
+ // --------------------------------------------------------------------------
120
+
121
+ __THRUST_DEFINE_HAS_NESTED_TYPE(has_tuning_t, tuning)
122
+ __THRUST_DEFINE_HAS_NESTED_TYPE(has_type_t, type)
123
+
124
+ template <template <class> class, class, class>
125
+ struct specialize_plan_impl_loop;
126
+ template <template <class> class, class>
127
+ struct specialize_plan_impl_match;
128
+
129
+ // we loop through the sm_list
130
+ template <template <class> class P, class SM, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
131
+ struct specialize_plan_impl_loop<P, SM, typelist<_0, _1, _2, _3, _4, _5, _6, _7, _8, _9> >
132
+ : specialize_plan_impl_loop<P, SM, typelist< _1, _2, _3, _4, _5, _6, _7, _8, _9> > {};
133
+
134
+ // until we find first lowest match
135
+ template <template <class> class P, class SM, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
136
+ struct specialize_plan_impl_loop <P, SM, typelist<SM, _1, _2, _3, _4, _5, _6, _7, _8, _9> >
137
+ : specialize_plan_impl_match<P, typelist<SM, _1, _2, _3, _4, _5, _6, _7, _8, _9> > {};
138
+
139
+ template<class, class>
140
+ struct has_sm_tuning_impl;
141
+
142
+ // specializing for Tunig which needs 1 arg
143
+ template <class SM,
144
+ template <class, class> class Tuning,
145
+ class _0>
146
+ struct has_sm_tuning_impl<SM, Tuning<lowest_supported_sm_arch, _0> > : has_type_t<Tuning<SM, _0> > {};
147
+
148
+ // specializing for Tunig which needs 2 args
149
+ template <class SM,
150
+ template <class, class,class> class Tuning,
151
+ class _0, class _1>
152
+ struct has_sm_tuning_impl<SM, Tuning<lowest_supported_sm_arch, _0, _1> > : has_type_t<Tuning<SM, _0, _1> > {};
153
+
154
+ template <template <class> class P, class SM>
155
+ struct has_sm_tuning : has_sm_tuning_impl<SM, typename P<lowest_supported_sm_arch>::tuning > {};
156
+
157
+ // once first match is found in sm_list, all remaining sm are possible
158
+ // candidate for tuning, so pick the first available
159
+ // if the plan P has SM-level tuning then pick it,
160
+ // otherwise move on to the next sm in the sm_list
161
+ template <template <class> class P, class SM, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
162
+ struct specialize_plan_impl_match<P, typelist<SM, _1, _2, _3, _4, _5, _6, _7, _8, _9> >
163
+ : thrust::detail::conditional<
164
+ has_sm_tuning<P, SM>::value,
165
+ P<SM>,
166
+ specialize_plan_impl_match<P, typelist<_1, _2, _3, _4, _5, _6, _7, _8, _9> > >::type {};
167
+
168
+ template <template <class> class Plan, class SM = THRUST_TUNING_ARCH>
169
+ struct specialize_plan_msvc10_war
170
+ {
171
+ // if Plan has tuning type, this means it has SM-specific tuning
172
+ // so loop through sm_list to find match,
173
+ // otherwise just specialize on provided SM
174
+ typedef thrust::detail::conditional<has_tuning_t<Plan<lowest_supported_sm_arch> >::value,
175
+ specialize_plan_impl_loop<Plan, SM, sm_list>,
176
+ Plan<SM> >
177
+ type;
178
+ };
179
+
180
+ template <template <class> class Plan, class SM = THRUST_TUNING_ARCH>
181
+ struct specialize_plan : specialize_plan_msvc10_war<Plan,SM>::type::type {};
182
+
183
+
184
+ /////////////////////////
185
+ /////////////////////////
186
+ /////////////////////////
187
+
188
+ // retrieve temp storage size from an Agent
189
+ // ---------------------------------------------------------------------------
190
+ // metafunction introspects Agent, and if it finds TempStorage type
191
+ // it will return its size
192
+
193
+ __THRUST_DEFINE_HAS_NESTED_TYPE(has_temp_storage, TempStorage)
194
+
195
+ template <class Agent, class U>
196
+ struct temp_storage_size_impl;
197
+
198
+ template <class Agent>
199
+ struct temp_storage_size_impl<Agent, thrust::detail::false_type>
200
+ {
201
+ enum
202
+ {
203
+ value = 0
204
+ };
205
+ };
206
+
207
+ template <class Agent>
208
+ struct temp_storage_size_impl<Agent, thrust::detail::true_type>
209
+ {
210
+ enum
211
+ {
212
+ value = sizeof(typename Agent::TempStorage)
213
+ };
214
+ };
215
+
216
+ template <class Agent>
217
+ struct temp_storage_size
218
+ : temp_storage_size_impl<Agent, typename has_temp_storage<Agent>::type>
219
+ {
220
+ };
221
+
222
+ // check whether all Agents requires < MAX_SHMEM shared memory
223
+ // ---------------------------------------------------------------------------
224
+ // if so, we can use simpler kernel for dispatch, which assumes that all
225
+ // shared memory is on chip.
226
+ // Otherwise, a kernel will be compiled which can also accept virtualized
227
+ // shared memory, in case there is not enough on chip. This kernel is about
228
+ // 10% slower
229
+
230
+ template <bool, class, size_t, class>
231
+ struct has_enough_shmem_impl;
232
+
233
+ template <bool V, class A, size_t S, class _0, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
234
+ struct has_enough_shmem_impl<V, A, S, typelist<_0, _1, _2, _3, _4, _5, _6, _7, _8, _9> >
235
+ : has_enough_shmem_impl<
236
+ V && (temp_storage_size<specialize_plan<A::template PtxPlan, _0> >::value <= S),
237
+ A,
238
+ S,
239
+ typelist<_1, _2, _3, _4, _5, _6, _7, _8, _9> >
240
+ {
241
+ };
242
+ template <bool V, class A, size_t S>
243
+ struct has_enough_shmem_impl<V, A, S, typelist<> >
244
+ {
245
+ enum
246
+ {
247
+ value = V
248
+ };
249
+ typedef typename thrust::detail::conditional<value,
250
+ thrust::detail::true_type,
251
+ thrust::detail::false_type>::type type;
252
+ };
253
+
254
+ template <class Agent, size_t MAX_SHMEM>
255
+ struct has_enough_shmem : has_enough_shmem_impl<true, Agent, MAX_SHMEM, sm_list>
256
+ {
257
+ };
258
+
259
+ /////////////////////////
260
+ /////////////////////////
261
+ /////////////////////////
262
+
263
+ // AgentPlan structure and helpers
264
+ // --------------------------------
265
+
266
+ struct AgentPlan
267
+ {
268
+ int block_threads;
269
+ int items_per_thread;
270
+ int items_per_tile;
271
+ int shared_memory_size;
272
+ int grid_size;
273
+
274
+ THRUST_RUNTIME_FUNCTION
275
+ AgentPlan() {}
276
+
277
+ THRUST_RUNTIME_FUNCTION
278
+ AgentPlan(int block_threads_,
279
+ int items_per_thread_,
280
+ int shared_memory_size_,
281
+ int grid_size_ = 0)
282
+ : block_threads(block_threads_),
283
+ items_per_thread(items_per_thread_),
284
+ items_per_tile(items_per_thread * block_threads),
285
+ shared_memory_size(shared_memory_size_),
286
+ grid_size(grid_size_)
287
+ {
288
+ }
289
+
290
+ THRUST_RUNTIME_FUNCTION
291
+ AgentPlan(AgentPlan const& plan)
292
+ : block_threads(plan.block_threads),
293
+ items_per_thread(plan.items_per_thread),
294
+ items_per_tile(plan.items_per_tile),
295
+ shared_memory_size(plan.shared_memory_size),
296
+ grid_size(plan.grid_size) {}
297
+
298
+ template <class PtxPlan>
299
+ THRUST_RUNTIME_FUNCTION
300
+ AgentPlan(PtxPlan,
301
+ typename thrust::detail::disable_if_convertible<
302
+ PtxPlan,
303
+ AgentPlan>::type* = NULL)
304
+ : block_threads(PtxPlan::BLOCK_THREADS),
305
+ items_per_thread(PtxPlan::ITEMS_PER_THREAD),
306
+ items_per_tile(PtxPlan::ITEMS_PER_TILE),
307
+ shared_memory_size(temp_storage_size<PtxPlan>::value),
308
+ grid_size(0)
309
+ {
310
+ }
311
+ }; // struct AgentPlan
312
+
313
+
314
+ __THRUST_DEFINE_HAS_NESTED_TYPE(has_Plan, Plan)
315
+
316
+ template <class Agent>
317
+ struct return_Plan
318
+ {
319
+ typedef typename Agent::Plan type;
320
+ };
321
+
322
+ template <class Agent>
323
+ struct get_plan : thrust::detail::conditional<
324
+ has_Plan<Agent>::value,
325
+ return_Plan<Agent>,
326
+ thrust::detail::identity_<AgentPlan> >::type
327
+ {
328
+ };
329
+
330
+ // returns AgentPlan corresponding to a given ptx version
331
+ // ------------------------------------------------------
332
+
333
+ template<class, class>
334
+ struct get_agent_plan_impl;
335
+
336
+ template<class Agent, class SM, class _1, class _2, class _3, class _4, class _5, class _6, class _7, class _8, class _9>
337
+ struct get_agent_plan_impl<Agent,typelist<SM,_1,_2,_3,_4,_5,_6,_7,_8,_9> >
338
+ {
339
+ typedef typename get_plan<Agent>::type Plan;
340
+ Plan THRUST_RUNTIME_FUNCTION
341
+ static get(int ptx_version)
342
+ {
343
+ if (ptx_version >= SM::ver)
344
+ return Plan(specialize_plan<Agent::template PtxPlan, SM>());
345
+ else
346
+ return get_agent_plan_impl<Agent,
347
+ typelist<_1, _2, _3, _4, _5, _6, _7, _8, _9> >::
348
+ get(ptx_version);
349
+ }
350
+ };
351
+
352
+ template<class Agent>
353
+ struct get_agent_plan_impl<Agent,typelist<lowest_supported_sm_arch> >
354
+ {
355
+ typedef typename get_plan<Agent>::type Plan;
356
+ Plan THRUST_RUNTIME_FUNCTION
357
+ static get(int /* ptx_version */)
358
+ {
359
+ typedef typename get_plan<Agent>::type Plan;
360
+ return Plan(specialize_plan<Agent::template PtxPlan, lowest_supported_sm_arch>());
361
+ }
362
+ };
363
+
364
+ template <class Agent>
365
+ THRUST_RUNTIME_FUNCTION
366
+ typename get_plan<Agent>::type get_agent_plan(int ptx_version)
367
+ {
368
+ NV_IF_TARGET(
369
+ NV_IS_DEVICE,
370
+ (
371
+ THRUST_UNUSED_VAR(ptx_version);
372
+ using plan_type = typename get_plan<Agent>::type;
373
+ using ptx_plan = typename Agent::ptx_plan;
374
+ return plan_type{ptx_plan{}};
375
+ ), // NV_IS_HOST:
376
+ ( return get_agent_plan_impl<Agent, sm_list>::get(ptx_version); ));
377
+ }
378
+
379
+ // XXX keep this dead-code for now as a gentle reminder
380
+ // that kernel luunch which reats plan values is the most robust
381
+ // mechanism to extract sm-specific tuning parameters
382
+ // TODO: since we are unable to afford kernel launch + cudaMemcpy ON EVERY
383
+ // algorithm invocation, we need to design a good caching strategy
384
+ // such that when the algorithm is called multiple times, only the
385
+ // first invocation will invoke kernel launch + cudaMemcpy, but
386
+ // the subsequent invocations, will just read cached values from host mem
387
+ // If launched from device, this is just a device-function call
388
+ // no caching is required.
389
+ // ----------------------------------------------------------------------------
390
+ // if we don't know ptx version, we can call kernel
391
+ // to retrieve AgentPlan from device code. Slower, but guaranteed to work
392
+ // -----------------------------------------------------------------------
393
+ #if 0
394
+ template<class Agent>
395
+ void __global__ get_agent_plan_kernel(AgentPlan *plan);
396
+
397
+ static __device__ AgentPlan agent_plan_device;
398
+
399
+ template<class Agent>
400
+ AgentPlan __device__ get_agent_plan_dev()
401
+ {
402
+ AgentPlan plan;
403
+ plan.block_threads = Agent::ptx_plan::BLOCK_THREADS;
404
+ plan.items_per_thread = Agent::ptx_plan::ITEMS_PER_THREAD;
405
+ plan.items_per_tile = Agent::ptx_plan::ITEMS_PER_TILE;
406
+ plan.shared_memory_size = temp_storage_size<typename Agent::ptx_plan>::value;
407
+ return plan;
408
+ }
409
+
410
+ template <class Agent, class F>
411
+ AgentPlan __host__ __device__ __forceinline__
412
+ xget_agent_plan_impl(F f, cudaStream_t s, void* d_ptr)
413
+ {
414
+ AgentPlan plan;
415
+ #ifdef __CUDA_ARCH__
416
+ plan = get_agent_plan_dev<Agent>();
417
+ #else
418
+ static cub::Mutex mutex;
419
+ bool lock = false;
420
+ if (d_ptr == 0)
421
+ {
422
+ lock = true;
423
+ cudaGetSymbolAddress(&d_ptr, agent_plan_device);
424
+ }
425
+ if (lock)
426
+ mutex.Lock();
427
+ f<<<1,1,0,s>>>((AgentPlan*)d_ptr);
428
+ cudaMemcpyAsync((void*)&plan,
429
+ d_ptr,
430
+ sizeof(AgentPlan),
431
+ cudaMemcpyDeviceToHost,
432
+ s);
433
+ if (lock)
434
+ mutex.Unlock();
435
+ cudaStreamSynchronize(s);
436
+ #endif
437
+ return plan;
438
+ }
439
+
440
+ template <class Agent>
441
+ AgentPlan THRUST_RUNTIME_FUNCTION
442
+ get_agent_plan(cudaStream_t s = 0, void *ptr = 0)
443
+ {
444
+ return xget_agent_plan_impl<Agent>(get_agent_plan_kernel<Agent>,
445
+ s,
446
+ ptr);
447
+ }
448
+
449
+ template<class Agent>
450
+ void __global__ get_agent_plan_kernel(AgentPlan *plan)
451
+ {
452
+ *plan = get_agent_plan_dev<Agent>();
453
+ }
454
+ #endif
455
+
456
+ /////////////////////////
457
+ /////////////////////////
458
+ /////////////////////////
459
+
460
+ THRUST_RUNTIME_FUNCTION
461
+ inline int get_sm_count()
462
+ {
463
+ int dev_id;
464
+ cuda_cub::throw_on_error(cudaGetDevice(&dev_id),
465
+ "get_sm_count :"
466
+ "failed to cudaGetDevice");
467
+
468
+ cudaError_t status;
469
+ int i32value;
470
+ status = cudaDeviceGetAttribute(&i32value,
471
+ cudaDevAttrMultiProcessorCount,
472
+ dev_id);
473
+ cuda_cub::throw_on_error(status,
474
+ "get_sm_count:"
475
+ "failed to sm_count");
476
+ return i32value;
477
+ }
478
+
479
+ THRUST_RUNTIME_FUNCTION
480
+ inline size_t get_max_shared_memory_per_block()
481
+ {
482
+ int dev_id;
483
+ cuda_cub::throw_on_error(cudaGetDevice(&dev_id),
484
+ "get_max_shared_memory_per_block :"
485
+ "failed to cudaGetDevice");
486
+
487
+ cudaError_t status;
488
+ int i32value;
489
+ status = cudaDeviceGetAttribute(&i32value,
490
+ cudaDevAttrMaxSharedMemoryPerBlock,
491
+ dev_id);
492
+ cuda_cub::throw_on_error(status,
493
+ "get_max_shared_memory_per_block :"
494
+ "failed to get max shared memory per block");
495
+
496
+ return static_cast<size_t>(i32value);
497
+ }
498
+
499
+ THRUST_RUNTIME_FUNCTION
500
+ inline size_t virtual_shmem_size(size_t shmem_per_block)
501
+ {
502
+ size_t max_shmem_per_block = core::get_max_shared_memory_per_block();
503
+ if (shmem_per_block > max_shmem_per_block)
504
+ return shmem_per_block;
505
+ else
506
+ return 0;
507
+ }
508
+
509
+ THRUST_RUNTIME_FUNCTION
510
+ inline size_t vshmem_size(size_t shmem_per_block, size_t num_blocks)
511
+ {
512
+ size_t max_shmem_per_block = core::get_max_shared_memory_per_block();
513
+ if (shmem_per_block > max_shmem_per_block)
514
+ return shmem_per_block*num_blocks;
515
+ else
516
+ return 0;
517
+ }
518
+
519
+ // LoadIterator
520
+ // ------------
521
+ // if trivial iterator is passed, wrap loads into LDG
522
+ //
523
+ template <class PtxPlan, class It>
524
+ struct LoadIterator
525
+ {
526
+ typedef typename iterator_traits<It>::value_type value_type;
527
+ typedef typename iterator_traits<It>::difference_type size_type;
528
+
529
+ typedef typename thrust::detail::conditional<
530
+ is_contiguous_iterator<It>::value,
531
+ cub::CacheModifiedInputIterator<PtxPlan::LOAD_MODIFIER,
532
+ value_type,
533
+ size_type>,
534
+ It>::type type;
535
+ }; // struct Iterator
536
+
537
+ template <class PtxPlan, class It>
538
+ typename LoadIterator<PtxPlan, It>::type __device__ __forceinline__
539
+ make_load_iterator_impl(It it, thrust::detail::true_type /* is_trivial */)
540
+ {
541
+ return raw_pointer_cast(&*it);
542
+ }
543
+
544
+ template <class PtxPlan, class It>
545
+ typename LoadIterator<PtxPlan, It>::type __device__ __forceinline__
546
+ make_load_iterator_impl(It it, thrust::detail::false_type /* is_trivial */)
547
+ {
548
+ return it;
549
+ }
550
+
551
+ template <class PtxPlan, class It>
552
+ typename LoadIterator<PtxPlan, It>::type __device__ __forceinline__
553
+ make_load_iterator(PtxPlan const&, It it)
554
+ {
555
+ return make_load_iterator_impl<PtxPlan>(
556
+ it, typename is_contiguous_iterator<It>::type());
557
+ }
558
+
559
+ template<class>
560
+ struct get_arch;
561
+
562
+ template<template<class> class Plan, class Arch>
563
+ struct get_arch<Plan<Arch> > { typedef Arch type; };
564
+
565
+ // BlockLoad
566
+ // -----------
567
+ // a helper metaprogram that returns type of a block loader
568
+ template <class PtxPlan,
569
+ class It,
570
+ class T = typename iterator_traits<It>::value_type>
571
+ struct BlockLoad
572
+ {
573
+ using type = cub::BlockLoad<T,
574
+ PtxPlan::BLOCK_THREADS,
575
+ PtxPlan::ITEMS_PER_THREAD,
576
+ PtxPlan::LOAD_ALGORITHM,
577
+ 1,
578
+ 1,
579
+ get_arch<PtxPlan>::type::ver>;
580
+ };
581
+
582
+ // BlockStore
583
+ // -----------
584
+ // a helper metaprogram that returns type of a block loader
585
+ template <class PtxPlan,
586
+ class It,
587
+ class T = typename iterator_traits<It>::value_type>
588
+ struct BlockStore
589
+ {
590
+ using type = cub::BlockStore<T,
591
+ PtxPlan::BLOCK_THREADS,
592
+ PtxPlan::ITEMS_PER_THREAD,
593
+ PtxPlan::STORE_ALGORITHM,
594
+ 1,
595
+ 1,
596
+ get_arch<PtxPlan>::type::ver>;
597
+ };
598
+
599
+ // cuda_optional
600
+ // --------------
601
+ // used for function that return cudaError_t along with the result
602
+ //
603
+ template <class T>
604
+ class cuda_optional
605
+ {
606
+ cudaError_t status_{cudaSuccess};
607
+ T value_{};
608
+
609
+ public:
610
+ cuda_optional() = default;
611
+
612
+ __host__ __device__
613
+ cuda_optional(T v, cudaError_t status = cudaSuccess) : status_(status), value_(v) {}
614
+
615
+ bool __host__ __device__
616
+ isValid() const { return cudaSuccess == status_; }
617
+
618
+ cudaError_t __host__ __device__
619
+ status() const { return status_; }
620
+
621
+ __host__ __device__ T const &
622
+ value() const { return value_; }
623
+
624
+ __host__ __device__ operator T const &() const { return value_; }
625
+ };
626
+
627
+ THRUST_RUNTIME_FUNCTION
628
+ inline int get_ptx_version()
629
+ {
630
+ int ptx_version = 0;
631
+ if (cub::PtxVersion(ptx_version) != cudaSuccess)
632
+ {
633
+ // Failure might mean that there's no device found
634
+ const int current_device = cub::CurrentDevice();
635
+ if (current_device < 0)
636
+ {
637
+ cuda_cub::throw_on_error(cudaErrorNoDevice, "No GPU is available\n");
638
+ }
639
+
640
+ // Any subsequent failure means the provided device binary does not match
641
+ // the generated function code
642
+ int major = 0, minor = 0;
643
+ cudaError_t attr_status;
644
+
645
+ attr_status = cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, current_device);
646
+ cuda_cub::throw_on_error(attr_status,
647
+ "get_ptx_version :"
648
+ "failed to get major CUDA device compute capability version.");
649
+
650
+ attr_status = cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, current_device);
651
+ cuda_cub::throw_on_error(attr_status,
652
+ "get_ptx_version :"
653
+ "failed to get minor CUDA device compute capability version.");
654
+
655
+ // Index from which SM code has to start in the message below
656
+ int code_offset = 37;
657
+ char str[] = "This program was not compiled for SM \n";
658
+
659
+ auto print_1_helper = [&](int v) {
660
+ str[code_offset] = static_cast<char>(v) + '0';
661
+ code_offset++;
662
+ };
663
+
664
+ // Assume two digits will be enough
665
+ auto print_2_helper = [&](int v) {
666
+ if (v / 10 != 0) {
667
+ print_1_helper(v / 10);
668
+ }
669
+ print_1_helper(v % 10);
670
+ };
671
+
672
+ print_2_helper(major);
673
+ print_2_helper(minor);
674
+
675
+ cuda_cub::throw_on_error(cudaErrorInvalidDevice, str);
676
+ }
677
+
678
+ return ptx_version;
679
+ }
680
+
681
+ THRUST_RUNTIME_FUNCTION
682
+ inline cudaError_t sync_stream(cudaStream_t stream)
683
+ {
684
+ return cub::SyncStream(stream);
685
+ }
686
+
687
+ inline void __device__ sync_threadblock()
688
+ {
689
+ cub::CTA_SYNC();
690
+ }
691
+
692
+ #define CUDA_CUB_RET_IF_FAIL(e) \
693
+ { \
694
+ auto const error = (e); \
695
+ if (cub::Debug(error, __FILE__, __LINE__)) return error; \
696
+ }
697
+
698
+ // uninitialized
699
+ // -------
700
+ // stores type in uninitialized form
701
+ //
702
+ template <class T>
703
+ struct uninitialized
704
+ {
705
+ typedef typename cub::UnitWord<T>::DeviceWord DeviceWord;
706
+
707
+ enum
708
+ {
709
+ WORDS = sizeof(T) / sizeof(DeviceWord)
710
+ };
711
+
712
+ DeviceWord storage[WORDS];
713
+
714
+ __host__ __device__ __forceinline__ T& get()
715
+ {
716
+ return reinterpret_cast<T&>(*this);
717
+ }
718
+
719
+ __host__ __device__ __forceinline__ operator T&() { return get(); }
720
+ };
721
+
722
+ // uninitialized_array
723
+ // --------------
724
+ // allocates uninitialized data on stack
725
+ template<class T, size_t N>
726
+ struct array
727
+ {
728
+ typedef T value_type;
729
+ typedef T ref[N];
730
+ enum {SIZE = N};
731
+ private:
732
+ T data_[N];
733
+
734
+ public:
735
+ __host__ __device__ T* data() { return data_; }
736
+ __host__ __device__ const T* data() const { return data_; }
737
+ __host__ __device__ T& operator[](unsigned int idx) { return ((T*)data_)[idx]; }
738
+ __host__ __device__ T const& operator[](unsigned int idx) const { return ((T*)data_)[idx]; }
739
+ __host__ __device__ unsigned int size() const { return N; }
740
+ __host__ __device__ operator ref&() { return data_; }
741
+ };
742
+
743
+
744
+ // uninitialized_array
745
+ // --------------
746
+ // allocates uninitialized data on stack
747
+ template<class T, size_t N>
748
+ struct uninitialized_array
749
+ {
750
+ typedef T value_type;
751
+ typedef T ref[N];
752
+ enum {SIZE = N};
753
+ private:
754
+ char data_[N * sizeof(T)];
755
+
756
+ public:
757
+ __host__ __device__ T* data() { return data_; }
758
+ __host__ __device__ const T* data() const { return data_; }
759
+ __host__ __device__ T& operator[](unsigned int idx) { return ((T*)data_)[idx]; }
760
+ __host__ __device__ T const& operator[](unsigned int idx) const { return ((T*)data_)[idx]; }
761
+ __host__ __device__ T& operator[](int idx) { return ((T*)data_)[idx]; }
762
+ __host__ __device__ T const& operator[](int idx) const { return ((T*)data_)[idx]; }
763
+ __host__ __device__ unsigned int size() const { return N; }
764
+ __host__ __device__ operator ref&() { return *reinterpret_cast<ref*>(data_); }
765
+ __host__ __device__ ref& get_ref() { return (ref&)*this; }
766
+ };
767
+
768
+ __host__ __device__ __forceinline__ size_t align_to(size_t n, size_t align)
769
+ {
770
+ return ((n+align-1)/align) * align;
771
+ }
772
+
773
+ namespace host {
774
+ inline cuda_optional<size_t> get_max_shared_memory_per_block()
775
+ {
776
+ cudaError_t status = cudaSuccess;
777
+ int dev_id = 0;
778
+ status = cudaGetDevice(&dev_id);
779
+ if (status != cudaSuccess) return cuda_optional<size_t>(0, status);
780
+
781
+ int max_shmem = 0;
782
+ status = cudaDeviceGetAttribute(&max_shmem,
783
+ cudaDevAttrMaxSharedMemoryPerBlock,
784
+ dev_id);
785
+ if (status != cudaSuccess) return cuda_optional<size_t>(0, status);
786
+ return cuda_optional<size_t>(max_shmem, status);
787
+ }
788
+ }
789
+
790
+ template <int ALLOCATIONS>
791
+ THRUST_RUNTIME_FUNCTION cudaError_t
792
+ alias_storage(void* storage_ptr,
793
+ size_t& storage_size,
794
+ void* (&allocations)[ALLOCATIONS],
795
+ size_t (&allocation_sizes)[ALLOCATIONS])
796
+ {
797
+ return cub::AliasTemporaries(storage_ptr,
798
+ storage_size,
799
+ allocations,
800
+ allocation_sizes);
801
+ }
802
+
803
+
804
+ } // namespace core
805
+ using core::sm60;
806
+ using core::sm52;
807
+ using core::sm35;
808
+ using core::sm30;
809
+ } // namespace cuda_
810
+
811
+ THRUST_NAMESPACE_END
miniCUDA124/include/thrust/system/cuda/detail/internal/copy_cross_system.h ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditionu and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ // XXX
40
+ // this file must not be included on its own, ever,
41
+ // but must be part of include in thrust/system/cuda/detail/copy.h
42
+
43
+ #include <thrust/system/cuda/config.h>
44
+
45
+ #include <thrust/distance.h>
46
+ #include <thrust/advance.h>
47
+ #include <thrust/detail/raw_pointer_cast.h>
48
+ #include <thrust/system/cuda/detail/uninitialized_copy.h>
49
+ #include <thrust/system/cuda/detail/util.h>
50
+ #include <thrust/detail/temporary_array.h>
51
+ #include <thrust/type_traits/is_trivially_relocatable.h>
52
+
53
+ THRUST_NAMESPACE_BEGIN
54
+ namespace cuda_cub {
55
+
56
+ namespace __copy {
57
+
58
+
59
+ template <class H,
60
+ class D,
61
+ class T,
62
+ class Size>
63
+ THRUST_HOST_FUNCTION void
64
+ trivial_device_copy(thrust::cpp::execution_policy<H>& ,
65
+ thrust::cuda_cub::execution_policy<D>& device_s,
66
+ T* dst,
67
+ T const* src,
68
+ Size count)
69
+ {
70
+ cudaError status;
71
+ status = cuda_cub::trivial_copy_to_device(dst,
72
+ src,
73
+ count,
74
+ cuda_cub::stream(device_s));
75
+ cuda_cub::throw_on_error(status, "__copy::trivial_device_copy H->D: failed");
76
+ }
77
+
78
+ template <class D,
79
+ class H,
80
+ class T,
81
+ class Size>
82
+ THRUST_HOST_FUNCTION void
83
+ trivial_device_copy(thrust::cuda_cub::execution_policy<D>& device_s,
84
+ thrust::cpp::execution_policy<H>& ,
85
+ T* dst,
86
+ T const* src,
87
+ Size count)
88
+ {
89
+ cudaError status;
90
+ status = cuda_cub::trivial_copy_from_device(dst,
91
+ src,
92
+ count,
93
+ cuda_cub::stream(device_s));
94
+ cuda_cub::throw_on_error(status, "trivial_device_copy D->H failed");
95
+ }
96
+
97
+ template <class System1,
98
+ class System2,
99
+ class InputIt,
100
+ class Size,
101
+ class OutputIt>
102
+ OutputIt __host__
103
+ cross_system_copy_n(thrust::execution_policy<System1>& sys1,
104
+ thrust::execution_policy<System2>& sys2,
105
+ InputIt begin,
106
+ Size n,
107
+ OutputIt result,
108
+ thrust::detail::true_type) // trivial copy
109
+
110
+ {
111
+ typedef typename iterator_traits<InputIt>::value_type InputTy;
112
+ if (n > 0) {
113
+ trivial_device_copy(derived_cast(sys1),
114
+ derived_cast(sys2),
115
+ reinterpret_cast<InputTy*>(thrust::raw_pointer_cast(&*result)),
116
+ reinterpret_cast<InputTy const*>(thrust::raw_pointer_cast(&*begin)),
117
+ n);
118
+ }
119
+
120
+ return result + n;
121
+ }
122
+
123
+ // non-trivial H->D copy
124
+ template <class H,
125
+ class D,
126
+ class InputIt,
127
+ class Size,
128
+ class OutputIt>
129
+ OutputIt __host__
130
+ cross_system_copy_n(thrust::cpp::execution_policy<H>& host_s,
131
+ thrust::cuda_cub::execution_policy<D>& device_s,
132
+ InputIt first,
133
+ Size num_items,
134
+ OutputIt result,
135
+ thrust::detail::false_type) // non-trivial copy
136
+ {
137
+ // get type of the input data
138
+ typedef typename thrust::iterator_value<InputIt>::type InputTy;
139
+
140
+ // copy input data into host temp storage
141
+ InputIt last = first;
142
+ thrust::advance(last, num_items);
143
+ thrust::detail::temporary_array<InputTy, H> temp(host_s, num_items);
144
+
145
+ for (Size idx = 0; idx != num_items; idx++)
146
+ {
147
+ ::new (static_cast<void*>(temp.data().get()+idx)) InputTy(*first);
148
+ ++first;
149
+ }
150
+
151
+ // allocate device temporary storage
152
+ thrust::detail::temporary_array<InputTy, D> d_in_ptr(device_s, num_items);
153
+
154
+ // trivial copy data from host to device
155
+ cudaError status = cuda_cub::trivial_copy_to_device(d_in_ptr.data().get(),
156
+ temp.data().get(),
157
+ num_items,
158
+ cuda_cub::stream(device_s));
159
+ cuda_cub::throw_on_error(status, "__copy:: H->D: failed");
160
+
161
+
162
+ // device->device copy
163
+ OutputIt ret = cuda_cub::copy_n(device_s, d_in_ptr.data(), num_items, result);
164
+
165
+ return ret;
166
+ }
167
+
168
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
169
+ // non-trivial copy D->H, only supported with NVCC compiler
170
+ // because copy ctor must have __device__ annotations, which is nvcc-only
171
+ // feature
172
+ template <class D,
173
+ class H,
174
+ class InputIt,
175
+ class Size,
176
+ class OutputIt>
177
+ OutputIt __host__
178
+ cross_system_copy_n(thrust::cuda_cub::execution_policy<D>& device_s,
179
+ thrust::cpp::execution_policy<H>& host_s,
180
+ InputIt first,
181
+ Size num_items,
182
+ OutputIt result,
183
+ thrust::detail::false_type) // non-trivial copy
184
+
185
+ {
186
+ // get type of the input data
187
+ typedef typename thrust::iterator_value<InputIt>::type InputTy;
188
+
189
+ // allocate device temp storage
190
+ thrust::detail::temporary_array<InputTy, D> d_in_ptr(device_s, num_items);
191
+
192
+ // uninitialize copy into temp device storage
193
+ cuda_cub::uninitialized_copy_n(device_s, first, num_items, d_in_ptr.data());
194
+
195
+ // allocate host temp storage
196
+ thrust::detail::temporary_array<InputTy, H> temp(host_s, num_items);
197
+
198
+ // trivial copy from device to host
199
+ cudaError status;
200
+ status = cuda_cub::trivial_copy_from_device(temp.data().get(),
201
+ d_in_ptr.data().get(),
202
+ num_items,
203
+ cuda_cub::stream(device_s));
204
+ cuda_cub::throw_on_error(status, "__copy:: D->H: failed");
205
+
206
+ // host->host copy
207
+ OutputIt ret = thrust::copy_n(host_s, temp.data(), num_items, result);
208
+
209
+ return ret;
210
+ }
211
+ #endif
212
+
213
+ template <class System1,
214
+ class System2,
215
+ class InputIt,
216
+ class Size,
217
+ class OutputIt>
218
+ OutputIt __host__
219
+ cross_system_copy_n(cross_system<System1, System2> systems,
220
+ InputIt begin,
221
+ Size n,
222
+ OutputIt result)
223
+ {
224
+ return cross_system_copy_n(
225
+ derived_cast(systems.sys1),
226
+ derived_cast(systems.sys2),
227
+ begin,
228
+ n,
229
+ result,
230
+ typename is_indirectly_trivially_relocatable_to<InputIt, OutputIt>::type());
231
+ }
232
+
233
+ template <class System1,
234
+ class System2,
235
+ class InputIterator,
236
+ class OutputIterator>
237
+ OutputIterator __host__
238
+ cross_system_copy(cross_system<System1, System2> systems,
239
+ InputIterator begin,
240
+ InputIterator end,
241
+ OutputIterator result)
242
+ {
243
+ return cross_system_copy_n(systems,
244
+ begin,
245
+ thrust::distance(begin, end),
246
+ result);
247
+ }
248
+
249
+ } // namespace __copy
250
+
251
+ } // namespace cuda_cub
252
+ THRUST_NAMESPACE_END
miniCUDA124/include/thrust/system/cuda/detail/internal/copy_device_to_device.h ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /******************************************************************************
3
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ * * Redistributions of source code must retain the above copyright
8
+ * notice, this list of conditions and the following disclaimer.
9
+ * * Redistributions in binary form must reproduce the above copyright
10
+ * notice, this list of conditions and the following disclaimer in the
11
+ * documentation and/or other materials provided with the distribution.
12
+ * * Neither the name of the NVIDIA CORPORATION nor the
13
+ * names of its contributors may be used to endorse or promote products
14
+ * derived from this software without specific prior written permission.
15
+ *
16
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
+ *
27
+ ******************************************************************************/
28
+ #pragma once
29
+
30
+ #include <thrust/detail/config.h>
31
+
32
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
33
+ # pragma GCC system_header
34
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
35
+ # pragma clang system_header
36
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
37
+ # pragma system_header
38
+ #endif // no system header
39
+
40
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
41
+ #include <thrust/system/cuda/config.h>
42
+ #include <thrust/system/cuda/detail/execution_policy.h>
43
+ #include <thrust/system/cuda/detail/transform.h>
44
+ #include <thrust/system/cuda/detail/util.h>
45
+ #include <thrust/distance.h>
46
+ #include <thrust/functional.h>
47
+ #include <thrust/type_traits/is_trivially_relocatable.h>
48
+
49
+ THRUST_NAMESPACE_BEGIN
50
+ namespace cuda_cub {
51
+
52
+ namespace __copy {
53
+ template <class Derived,
54
+ class InputIt,
55
+ class OutputIt>
56
+ OutputIt THRUST_RUNTIME_FUNCTION
57
+ device_to_device(execution_policy<Derived>& policy,
58
+ InputIt first,
59
+ InputIt last,
60
+ OutputIt result,
61
+ thrust::detail::true_type)
62
+ {
63
+ typedef typename thrust::iterator_traits<InputIt>::value_type InputTy;
64
+ const auto n = thrust::distance(first, last);
65
+ if (n > 0) {
66
+ cudaError status;
67
+ status = trivial_copy_device_to_device(policy,
68
+ reinterpret_cast<InputTy*>(thrust::raw_pointer_cast(&*result)),
69
+ reinterpret_cast<InputTy const*>(thrust::raw_pointer_cast(&*first)),
70
+ n);
71
+ cuda_cub::throw_on_error(status, "__copy:: D->D: failed");
72
+ }
73
+
74
+ return result + n;
75
+ }
76
+
77
+ template <class Derived,
78
+ class InputIt,
79
+ class OutputIt>
80
+ OutputIt THRUST_RUNTIME_FUNCTION
81
+ device_to_device(execution_policy<Derived>& policy,
82
+ InputIt first,
83
+ InputIt last,
84
+ OutputIt result,
85
+ thrust::detail::false_type)
86
+ {
87
+ typedef typename thrust::iterator_traits<InputIt>::value_type InputTy;
88
+ return cuda_cub::transform(policy,
89
+ first,
90
+ last,
91
+ result,
92
+ thrust::identity<InputTy>());
93
+ }
94
+
95
+ template <class Derived,
96
+ class InputIt,
97
+ class OutputIt>
98
+ OutputIt THRUST_RUNTIME_FUNCTION
99
+ device_to_device(execution_policy<Derived>& policy,
100
+ InputIt first,
101
+ InputIt last,
102
+ OutputIt result)
103
+ {
104
+ return device_to_device(policy,
105
+ first,
106
+ last,
107
+ result,
108
+ typename is_indirectly_trivially_relocatable_to<InputIt, OutputIt>::type());
109
+ }
110
+ } // namespace __copy
111
+
112
+ } // namespace cuda_cub
113
+ THRUST_NAMESPACE_END
114
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/reduce_by_key.h ADDED
@@ -0,0 +1,1217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+
41
+ #include <thrust/detail/alignment.h>
42
+ #include <thrust/detail/cstdint.h>
43
+ #include <thrust/detail/minmax.h>
44
+ #include <thrust/detail/mpl/math.h>
45
+ #include <thrust/detail/raw_reference_cast.h>
46
+ #include <thrust/detail/temporary_array.h>
47
+ #include <thrust/detail/type_traits/iterator/is_output_iterator.h>
48
+ #include <thrust/detail/type_traits.h>
49
+ #include <thrust/distance.h>
50
+ #include <thrust/functional.h>
51
+ #include <thrust/pair.h>
52
+ #include <thrust/system/cuda/config.h>
53
+ #include <thrust/system/cuda/detail/cdp_dispatch.h>
54
+ #include <thrust/system/cuda/detail/core/agent_launcher.h>
55
+ #include <thrust/system/cuda/detail/get_value.h>
56
+ #include <thrust/system/cuda/detail/par_to_seq.h>
57
+ #include <thrust/system/cuda/detail/util.h>
58
+
59
+ #include <cub/device/device_reduce.cuh>
60
+ #include <cub/util_math.cuh>
61
+
62
+ THRUST_NAMESPACE_BEGIN
63
+
64
+ template <typename DerivedPolicy,
65
+ typename InputIterator1,
66
+ typename InputIterator2,
67
+ typename OutputIterator1,
68
+ typename OutputIterator2,
69
+ typename BinaryPredicate>
70
+ __host__ __device__ thrust::pair<OutputIterator1, OutputIterator2>
71
+ reduce_by_key(
72
+ const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
73
+ InputIterator1 keys_first,
74
+ InputIterator1 keys_last,
75
+ InputIterator2 values_first,
76
+ OutputIterator1 keys_output,
77
+ OutputIterator2 values_output,
78
+ BinaryPredicate binary_pred);
79
+
80
+ namespace cuda_cub {
81
+
82
+ namespace __reduce_by_key {
83
+
84
+ template<bool> struct is_true : thrust::detail::false_type {};
85
+ template<> struct is_true<true> : thrust::detail::true_type {};
86
+
87
+ namespace mpl = thrust::detail::mpl::math;
88
+
89
+ template <int _BLOCK_THREADS,
90
+ int _ITEMS_PER_THREAD = 1,
91
+ cub::BlockLoadAlgorithm _LOAD_ALGORITHM = cub::BLOCK_LOAD_DIRECT,
92
+ cub::CacheLoadModifier _LOAD_MODIFIER = cub::LOAD_DEFAULT,
93
+ cub::BlockScanAlgorithm _SCAN_ALGORITHM = cub::BLOCK_SCAN_WARP_SCANS>
94
+ struct PtxPolicy
95
+ {
96
+ enum
97
+ {
98
+ BLOCK_THREADS = _BLOCK_THREADS,
99
+ ITEMS_PER_THREAD = _ITEMS_PER_THREAD,
100
+ ITEMS_PER_TILE = BLOCK_THREADS * ITEMS_PER_THREAD
101
+ };
102
+
103
+ static const cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM;
104
+ static const cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER;
105
+ static const cub::BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM;
106
+ }; // struct PtxPolicy
107
+
108
+ template <class Arch, class Key, class Value>
109
+ struct Tuning;
110
+
111
+ template <class Key, class Value>
112
+ struct Tuning<sm30, Key, Value>
113
+ {
114
+ enum
115
+ {
116
+ MAX_INPUT_BYTES = mpl::max<size_t, sizeof(Key), sizeof(Value)>::value,
117
+ COMBINED_INPUT_BYTES = sizeof(Key) + sizeof(Value),
118
+
119
+ NOMINAL_4B_ITEMS_PER_THREAD = 6,
120
+
121
+ ITEMS_PER_THREAD = mpl::min<
122
+ int,
123
+ NOMINAL_4B_ITEMS_PER_THREAD,
124
+ mpl::max<
125
+ int,
126
+ 1,
127
+ static_cast<int>(((NOMINAL_4B_ITEMS_PER_THREAD * 8) +
128
+ COMBINED_INPUT_BYTES - 1) /
129
+ COMBINED_INPUT_BYTES)>::value>::value,
130
+ };
131
+
132
+ typedef PtxPolicy<128,
133
+ ITEMS_PER_THREAD,
134
+ cub::BLOCK_LOAD_WARP_TRANSPOSE,
135
+ cub::LOAD_DEFAULT,
136
+ cub::BLOCK_SCAN_WARP_SCANS>
137
+ type;
138
+ }; // Tuning sm30
139
+
140
+ template<class Key, class Value>
141
+ struct Tuning<sm35,Key,Value> : Tuning<sm30,Key,Value>
142
+ {
143
+ enum
144
+ {
145
+ MAX_INPUT_BYTES = mpl::max<size_t, sizeof(Key), sizeof(Value)>::value,
146
+ COMBINED_INPUT_BYTES = sizeof(Key) + sizeof(Value),
147
+
148
+ NOMINAL_4B_ITEMS_PER_THREAD = 6,
149
+
150
+ ITEMS_PER_THREAD =
151
+ (MAX_INPUT_BYTES <= 8)
152
+ ? 6
153
+ : mpl::min<
154
+ int,
155
+ NOMINAL_4B_ITEMS_PER_THREAD,
156
+ mpl::max<
157
+ int,
158
+ 1,
159
+ ((NOMINAL_4B_ITEMS_PER_THREAD * 8) +
160
+ COMBINED_INPUT_BYTES - 1) /
161
+ COMBINED_INPUT_BYTES>::value>::value,
162
+ };
163
+
164
+ typedef PtxPolicy<128,
165
+ ITEMS_PER_THREAD,
166
+ cub::BLOCK_LOAD_WARP_TRANSPOSE,
167
+ cub::LOAD_LDG,
168
+ cub::BLOCK_SCAN_WARP_SCANS>
169
+ type;
170
+ }; // Tuning sm35
171
+
172
+ template<class Key, class Value>
173
+ struct Tuning<sm52,Key,Value> : Tuning<sm30,Key,Value>
174
+ {
175
+ enum
176
+ {
177
+ MAX_INPUT_BYTES = mpl::max<size_t, sizeof(Key), sizeof(Value)>::value,
178
+ COMBINED_INPUT_BYTES = sizeof(Key) + sizeof(Value),
179
+
180
+ NOMINAL_4B_ITEMS_PER_THREAD = 9,
181
+
182
+ ITEMS_PER_THREAD =
183
+ (MAX_INPUT_BYTES <= 8)
184
+ ? 9
185
+ : mpl::min<
186
+ int,
187
+ NOMINAL_4B_ITEMS_PER_THREAD,
188
+ mpl::max<
189
+ int,
190
+ 1,
191
+ ((NOMINAL_4B_ITEMS_PER_THREAD * 8) +
192
+ COMBINED_INPUT_BYTES - 1) /
193
+ COMBINED_INPUT_BYTES>::value>::value,
194
+ };
195
+
196
+ typedef PtxPolicy<256,
197
+ ITEMS_PER_THREAD,
198
+ cub::BLOCK_LOAD_WARP_TRANSPOSE,
199
+ cub::LOAD_LDG,
200
+ cub::BLOCK_SCAN_WARP_SCANS>
201
+ type;
202
+ }; // Tuning sm52
203
+
204
+ template <class KeysInputIt,
205
+ class ValuesInputIt,
206
+ class KeysOutputIt,
207
+ class ValuesOutputIt,
208
+ class EqualityOp,
209
+ class ReductionOp,
210
+ class NumRunsOutputIt,
211
+ class Size>
212
+ struct ReduceByKeyAgent
213
+ {
214
+ typedef typename iterator_traits<KeysInputIt>::value_type key_type;
215
+ typedef typename iterator_traits<ValuesInputIt>::value_type value_type;
216
+ typedef Size size_type;
217
+
218
+ typedef cub::KeyValuePair<size_type, value_type> size_value_pair_t;
219
+ typedef cub::KeyValuePair<key_type, value_type> key_value_pair_t;
220
+
221
+ typedef cub::ReduceByKeyScanTileState<value_type, size_type> ScanTileState;
222
+ typedef cub::ReduceBySegmentOp<ReductionOp> ReduceBySegmentOp;
223
+
224
+ template<class Arch>
225
+ struct PtxPlan : Tuning<Arch,key_type, value_type>::type
226
+ {
227
+ typedef Tuning<Arch, key_type, value_type> tuning;
228
+
229
+ typedef typename core::LoadIterator<PtxPlan, KeysInputIt>::type KeysLoadIt;
230
+ typedef typename core::LoadIterator<PtxPlan, ValuesInputIt>::type ValuesLoadIt;
231
+
232
+ typedef typename core::BlockLoad<PtxPlan, KeysLoadIt>::type BlockLoadKeys;
233
+ typedef typename core::BlockLoad<PtxPlan, ValuesLoadIt>::type BlockLoadValues;
234
+
235
+ typedef cub::BlockDiscontinuity<key_type,
236
+ PtxPlan::BLOCK_THREADS,
237
+ 1,
238
+ 1,
239
+ Arch::ver>
240
+ BlockDiscontinuityKeys;
241
+
242
+ typedef cub::TilePrefixCallbackOp<size_value_pair_t,
243
+ ReduceBySegmentOp,
244
+ ScanTileState,
245
+ Arch::ver>
246
+ TilePrefixCallback;
247
+ typedef cub::BlockScan<size_value_pair_t,
248
+ PtxPlan::BLOCK_THREADS,
249
+ PtxPlan::SCAN_ALGORITHM,
250
+ 1,
251
+ 1,
252
+ Arch::ver>
253
+ BlockScan;
254
+
255
+ union TempStorage
256
+ {
257
+ struct ScanStorage
258
+ {
259
+ typename BlockScan::TempStorage scan;
260
+ typename TilePrefixCallback::TempStorage prefix;
261
+ typename BlockDiscontinuityKeys::TempStorage discontinuity;
262
+ } scan_storage;
263
+
264
+ typename BlockLoadKeys::TempStorage load_keys;
265
+ typename BlockLoadValues::TempStorage load_values;
266
+
267
+ core::uninitialized_array<key_value_pair_t, PtxPlan::ITEMS_PER_TILE + 1>
268
+ raw_exchange;
269
+ }; // union TempStorage
270
+ }; // struct PtxPlan
271
+
272
+ typedef typename core::specialize_plan_msvc10_war<PtxPlan>::type::type ptx_plan;
273
+
274
+ typedef typename ptx_plan::KeysLoadIt KeysLoadIt;
275
+ typedef typename ptx_plan::ValuesLoadIt ValuesLoadIt;
276
+ typedef typename ptx_plan::BlockLoadKeys BlockLoadKeys;
277
+ typedef typename ptx_plan::BlockLoadValues BlockLoadValues;
278
+ typedef typename ptx_plan::BlockDiscontinuityKeys BlockDiscontinuityKeys;
279
+ typedef typename ptx_plan::TilePrefixCallback TilePrefixCallback;
280
+ typedef typename ptx_plan::BlockScan BlockScan;
281
+ typedef typename ptx_plan::TempStorage TempStorage;
282
+
283
+ enum
284
+ {
285
+ BLOCK_THREADS = ptx_plan::BLOCK_THREADS,
286
+ ITEMS_PER_THREAD = ptx_plan::ITEMS_PER_THREAD,
287
+ ITEMS_PER_TILE = ptx_plan::ITEMS_PER_TILE,
288
+ TWO_PHASE_SCATTER = (ITEMS_PER_THREAD > 1),
289
+
290
+ // Whether or not the scan operation has a zero-valued identity value
291
+ // (true if we're performing addition on a primitive type)
292
+ HAS_IDENTITY_ZERO = thrust::detail::is_same<ReductionOp,
293
+ plus<value_type> >::value &&
294
+ thrust::detail::is_arithmetic<value_type>::value
295
+ };
296
+
297
+ struct impl
298
+ {
299
+ //---------------------------------------------------------------------
300
+ // Per-thread fields
301
+ //---------------------------------------------------------------------
302
+
303
+ TempStorage & storage;
304
+ KeysLoadIt keys_load_it;
305
+ ValuesLoadIt values_load_it;
306
+ KeysOutputIt keys_output_it;
307
+ ValuesOutputIt values_output_it;
308
+ NumRunsOutputIt num_runs_output_it;
309
+ cub::InequalityWrapper<EqualityOp> inequality_op;
310
+ ReduceBySegmentOp scan_op;
311
+
312
+ //---------------------------------------------------------------------
313
+ // Block scan utility methods
314
+ //---------------------------------------------------------------------
315
+
316
+ // Scan with identity (first tile)
317
+ //
318
+ THRUST_DEVICE_FUNCTION void
319
+ scan_tile(size_value_pair_t (&scan_items)[ITEMS_PER_THREAD],
320
+ size_value_pair_t &tile_aggregate,
321
+ thrust::detail::true_type /* has_identity */)
322
+ {
323
+ size_value_pair_t identity;
324
+ identity.value = 0;
325
+ identity.key = 0;
326
+ BlockScan(storage.scan_storage.scan)
327
+ .ExclusiveScan(scan_items, scan_items, identity, scan_op, tile_aggregate);
328
+ }
329
+
330
+ // Scan without identity (first tile).
331
+ // Without an identity, the first output item is undefined.
332
+ //
333
+ THRUST_DEVICE_FUNCTION void
334
+ scan_tile(size_value_pair_t (&scan_items)[ITEMS_PER_THREAD],
335
+ size_value_pair_t &tile_aggregate,
336
+ thrust::detail::false_type /* has_identity */)
337
+ {
338
+ BlockScan(storage.scan_storage.scan)
339
+ .ExclusiveScan(scan_items, scan_items, scan_op, tile_aggregate);
340
+ }
341
+
342
+ // Scan with identity (subsequent tile)
343
+ //
344
+ THRUST_DEVICE_FUNCTION void
345
+ scan_tile(size_value_pair_t (&scan_items)[ITEMS_PER_THREAD],
346
+ size_value_pair_t & tile_aggregate,
347
+ TilePrefixCallback &prefix_op,
348
+ thrust::detail::true_type /* has_identity */)
349
+ {
350
+ BlockScan(storage.scan_storage.scan)
351
+ .ExclusiveScan(scan_items,
352
+ scan_items,
353
+ scan_op,
354
+ prefix_op);
355
+ tile_aggregate = prefix_op.GetBlockAggregate();
356
+ }
357
+
358
+ // Scan without identity (subsequent tile).
359
+ // Without an identity, the first output item is undefined.
360
+ THRUST_DEVICE_FUNCTION void
361
+ scan_tile(size_value_pair_t (&scan_items)[ITEMS_PER_THREAD],
362
+ size_value_pair_t & tile_aggregate,
363
+ TilePrefixCallback &prefix_op,
364
+ thrust::detail::false_type /* has_identity */)
365
+ {
366
+ BlockScan(storage.scan_storage.scan)
367
+ .ExclusiveScan(scan_items,
368
+ scan_items,
369
+ scan_op,
370
+ prefix_op);
371
+ tile_aggregate = prefix_op.GetBlockAggregate();
372
+ }
373
+
374
+ //---------------------------------------------------------------------
375
+ // Zip utility methods
376
+ //---------------------------------------------------------------------
377
+
378
+
379
+ template <bool IS_LAST_TILE>
380
+ THRUST_DEVICE_FUNCTION void
381
+ zip_values_and_flags(size_type num_remaining,
382
+ value_type (&values)[ITEMS_PER_THREAD],
383
+ size_type (&segment_flags)[ITEMS_PER_THREAD],
384
+ size_value_pair_t (&scan_items)[ITEMS_PER_THREAD])
385
+ {
386
+ // Zip values and segment_flags
387
+ #pragma unroll
388
+ for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
389
+ {
390
+ // Set segment_flags for first out-of-bounds item, zero for others
391
+ if (IS_LAST_TILE &&
392
+ Size(threadIdx.x * ITEMS_PER_THREAD) + ITEM == num_remaining)
393
+ segment_flags[ITEM] = 1;
394
+
395
+ scan_items[ITEM].value = values[ITEM];
396
+ scan_items[ITEM].key = segment_flags[ITEM];
397
+ }
398
+ }
399
+
400
+ THRUST_DEVICE_FUNCTION void zip_keys_and_values(
401
+ key_type (&keys)[ITEMS_PER_THREAD],
402
+ size_type (&segment_indices)[ITEMS_PER_THREAD],
403
+ size_value_pair_t (&scan_items)[ITEMS_PER_THREAD],
404
+ key_value_pair_t (&scatter_items)[ITEMS_PER_THREAD])
405
+ {
406
+ // Zip values and segment_flags
407
+ #pragma unroll
408
+ for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
409
+ {
410
+ scatter_items[ITEM].key = keys[ITEM];
411
+ scatter_items[ITEM].value = scan_items[ITEM].value;
412
+ segment_indices[ITEM] = scan_items[ITEM].key;
413
+ }
414
+ }
415
+
416
+ //---------------------------------------------------------------------
417
+ // Scatter utility methods
418
+ //---------------------------------------------------------------------
419
+
420
+ // Directly scatter flagged items to output offsets
421
+ // (specialized for IS_SEGMENTED_REDUCTION_FIXUP == false)
422
+ THRUST_DEVICE_FUNCTION void scatter_direct(
423
+ key_value_pair_t (&scatter_items)[ITEMS_PER_THREAD],
424
+ size_type (&segment_flags)[ITEMS_PER_THREAD],
425
+ size_type (&segment_indices)[ITEMS_PER_THREAD])
426
+ {
427
+ // Scatter flagged keys and values
428
+ #pragma unroll
429
+ for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
430
+ {
431
+ if (segment_flags[ITEM])
432
+ {
433
+ keys_output_it[segment_indices[ITEM]] = scatter_items[ITEM].key;
434
+ values_output_it[segment_indices[ITEM]] = scatter_items[ITEM].value;
435
+ }
436
+ }
437
+ }
438
+
439
+ // 2-phase scatter flagged items to output offsets
440
+ // (specialized for IS_SEGMENTED_REDUCTION_FIXUP == false
441
+ //
442
+ // The exclusive scan causes each head flag to be paired with
443
+ // the previous value aggregate:
444
+ // * the scatter offsets must be decremented for value aggregates
445
+ //
446
+ THRUST_DEVICE_FUNCTION void scatter_two_phase(
447
+ key_value_pair_t (&scatter_items)[ITEMS_PER_THREAD],
448
+ size_type (&segment_flags)[ITEMS_PER_THREAD],
449
+ size_type (&segment_indices)[ITEMS_PER_THREAD],
450
+ size_type num_tile_segments,
451
+ size_type num_tile_segments_prefix)
452
+ {
453
+ using core::sync_threadblock;
454
+
455
+ sync_threadblock();
456
+
457
+ // Compact and scatter keys
458
+ #pragma unroll
459
+ for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
460
+ {
461
+ if (segment_flags[ITEM])
462
+ {
463
+ int idx = static_cast<int>(segment_indices[ITEM] -
464
+ num_tile_segments_prefix);
465
+ storage.raw_exchange[idx] = scatter_items[ITEM];
466
+ }
467
+ }
468
+
469
+ sync_threadblock();
470
+
471
+ for (int item = threadIdx.x; item < num_tile_segments; item += BLOCK_THREADS)
472
+ {
473
+ size_type idx = num_tile_segments_prefix + item;
474
+ key_value_pair_t pair = storage.raw_exchange[item];
475
+ keys_output_it[idx] = pair.key;
476
+ values_output_it[idx] = pair.value;
477
+ }
478
+ }
479
+
480
+
481
+ // Scatter flagged items
482
+ //
483
+ THRUST_DEVICE_FUNCTION void scatter(
484
+ key_value_pair_t (&scatter_items)[ITEMS_PER_THREAD],
485
+ size_type (&segment_flags)[ITEMS_PER_THREAD],
486
+ size_type (&segment_indices)[ITEMS_PER_THREAD],
487
+ size_type num_tile_segments,
488
+ size_type num_tile_segments_prefix)
489
+ {
490
+ // Do a one-phase scatter if (a) two-phase is disabled or
491
+ // (b) the average number of selected items per thread is less than one
492
+ if (TWO_PHASE_SCATTER && (num_tile_segments > BLOCK_THREADS))
493
+ {
494
+ scatter_two_phase(scatter_items,
495
+ segment_flags,
496
+ segment_indices,
497
+ num_tile_segments,
498
+ num_tile_segments_prefix);
499
+ }
500
+ else
501
+ {
502
+ scatter_direct(scatter_items,
503
+ segment_flags,
504
+ segment_indices);
505
+ }
506
+ }
507
+
508
+ //---------------------------------------------------------------------
509
+ // Finalization utility methods
510
+ //---------------------------------------------------------------------
511
+
512
+ // Finalize the carry-out from the last tile
513
+ // (specialized for IS_SEGMENTED_REDUCTION_FIXUP == false)
514
+ THRUST_DEVICE_FUNCTION void
515
+ finalize_last_tile(size_type num_segments,
516
+ size_type num_remaining,
517
+ key_type last_key,
518
+ value_type last_value)
519
+ {
520
+ // Last thread will output final count and last item, if necessary
521
+ if (threadIdx.x == BLOCK_THREADS - 1)
522
+ {
523
+ // If the last tile is a whole tile, the inclusive prefix
524
+ // contains accumulated value reduction for the last segment
525
+ if (num_remaining == ITEMS_PER_TILE)
526
+ {
527
+ // Scatter key and value
528
+ keys_output_it[num_segments] = last_key;
529
+ values_output_it[num_segments] = last_value;
530
+ num_segments++;
531
+ }
532
+
533
+ // Output the total number of items selected
534
+ *num_runs_output_it = num_segments;
535
+ }
536
+ }
537
+
538
+ //---------------------------------------------------------------------
539
+ // Cooperatively scan a device-wide sequence of tiles with other CTAs
540
+ //---------------------------------------------------------------------
541
+
542
+ // Process first tile of input (dynamic chained scan).
543
+ // Returns the running count of segments
544
+ // and aggregated values (including this tile)
545
+ //
546
+ template <bool IS_LAST_TILE>
547
+ THRUST_DEVICE_FUNCTION void
548
+ consume_first_tile(Size num_remaining,
549
+ Size tile_offset,
550
+ ScanTileState &tile_state)
551
+ {
552
+ using core::sync_threadblock;
553
+
554
+ key_type keys[ITEMS_PER_THREAD]; // Tile keys
555
+ key_type pred_keys[ITEMS_PER_THREAD]; // Tile keys shifted up (predecessor)
556
+ value_type values[ITEMS_PER_THREAD]; // Tile values
557
+ size_type segment_flags[ITEMS_PER_THREAD]; // Segment head flags
558
+ size_type segment_indices[ITEMS_PER_THREAD]; // Segment indices
559
+ size_value_pair_t scan_items[ITEMS_PER_THREAD]; // Zipped values and segment flags|indices
560
+ key_value_pair_t scatter_items[ITEMS_PER_THREAD]; // Zipped key value pairs for scattering
561
+
562
+ // Load keys (last tile repeats final element)
563
+ if (IS_LAST_TILE)
564
+ {
565
+ // Fill last elements with the first element
566
+ // because collectives are not suffix guarded
567
+ BlockLoadKeys(storage.load_keys)
568
+ .Load(keys_load_it + tile_offset,
569
+ keys,
570
+ num_remaining,
571
+ *(keys_load_it + tile_offset));
572
+ }
573
+ else
574
+ {
575
+ BlockLoadKeys(storage.load_keys)
576
+ .Load(keys_load_it + tile_offset, keys);
577
+ }
578
+
579
+ sync_threadblock();
580
+
581
+ // Load values (last tile repeats final element)
582
+ if (IS_LAST_TILE)
583
+ {
584
+ BlockLoadValues(storage.load_values)
585
+ .Load(values_load_it + tile_offset,
586
+ values,
587
+ num_remaining,
588
+ *(values_load_it + tile_offset));
589
+ }
590
+ else
591
+ {
592
+ BlockLoadValues(storage.load_values)
593
+ .Load(values_load_it + tile_offset, values);
594
+ }
595
+
596
+ sync_threadblock();
597
+
598
+ // Set head segment_flags.
599
+ // First tile sets the first flag for the first item
600
+ BlockDiscontinuityKeys(storage.scan_storage.discontinuity)
601
+ .FlagHeads(segment_flags, keys, pred_keys, inequality_op);
602
+
603
+ // Unset the flag for the first item in the first tile
604
+ // so we won't scatter it
605
+ //
606
+ if (threadIdx.x == 0)
607
+ segment_flags[0] = 0;
608
+
609
+ // Zip values and segment_flags
610
+ zip_values_and_flags<IS_LAST_TILE>(num_remaining,
611
+ values,
612
+ segment_flags,
613
+ scan_items);
614
+
615
+ // Exclusive scan of values and segment_flags
616
+ size_value_pair_t tile_aggregate;
617
+ scan_tile(scan_items, tile_aggregate, is_true<HAS_IDENTITY_ZERO>());
618
+
619
+ if (threadIdx.x == 0)
620
+ {
621
+ // Update tile status if this is not the last tile
622
+ if (!IS_LAST_TILE)
623
+ tile_state.SetInclusive(0, tile_aggregate);
624
+
625
+ // Initialize the segment index for the first scan item if necessary
626
+ // (the exclusive prefix for the first item is garbage)
627
+ if (!HAS_IDENTITY_ZERO)
628
+ scan_items[0].key = 0;
629
+ }
630
+
631
+ // Unzip values and segment indices
632
+ zip_keys_and_values(pred_keys,
633
+ segment_indices,
634
+ scan_items,
635
+ scatter_items);
636
+
637
+ // Scatter flagged items
638
+ scatter(scatter_items,
639
+ segment_flags,
640
+ segment_indices,
641
+ tile_aggregate.key,
642
+ 0);
643
+
644
+ if (IS_LAST_TILE)
645
+ {
646
+ // Finalize the carry-out from the last tile
647
+ finalize_last_tile(tile_aggregate.key,
648
+ num_remaining,
649
+ keys[ITEMS_PER_THREAD - 1],
650
+ tile_aggregate.value);
651
+ }
652
+ }
653
+
654
+ // Process subsequent tile of input (dynamic chained scan).
655
+ // Returns the running count of segments
656
+ // and aggregated values (including this tile)
657
+
658
+ template <bool IS_LAST_TILE>
659
+ THRUST_DEVICE_FUNCTION void
660
+ consume_subsequent_tile(Size num_remaining,
661
+ int tile_idx,
662
+ Size tile_offset,
663
+ ScanTileState &tile_state)
664
+ {
665
+ using core::sync_threadblock;
666
+
667
+ key_type keys[ITEMS_PER_THREAD]; // Tile keys
668
+ key_type pred_keys[ITEMS_PER_THREAD]; // Tile keys shifted up (predecessor)
669
+ value_type values[ITEMS_PER_THREAD]; // Tile values
670
+ size_type segment_flags[ITEMS_PER_THREAD]; // Segment head flags
671
+ size_type segment_indices[ITEMS_PER_THREAD]; // Segment indices
672
+ size_value_pair_t scan_items[ITEMS_PER_THREAD]; // Zipped values and segment flags|indices
673
+ key_value_pair_t scatter_items[ITEMS_PER_THREAD]; // Zipped key value pairs for scattering
674
+
675
+ // Load keys (last tile repeats final element)
676
+ if (IS_LAST_TILE)
677
+ {
678
+ BlockLoadKeys(storage.load_keys)
679
+ .Load(keys_load_it + tile_offset,
680
+ keys,
681
+ num_remaining,
682
+ *(keys_load_it + tile_offset));
683
+ }
684
+ else
685
+ {
686
+ BlockLoadKeys(storage.load_keys)
687
+ .Load(keys_load_it + tile_offset, keys);
688
+ }
689
+
690
+ key_type tile_pred_key = (threadIdx.x == 0)
691
+ ? keys_load_it[tile_offset - 1]
692
+ : key_type();
693
+
694
+ sync_threadblock();
695
+
696
+ // Load values (last tile repeats final element)
697
+ if (IS_LAST_TILE)
698
+ {
699
+ BlockLoadValues(storage.load_values)
700
+ .Load(values_load_it + tile_offset,
701
+ values,
702
+ num_remaining,
703
+ *(values_load_it + tile_offset));
704
+ }
705
+ else
706
+ {
707
+ BlockLoadValues(storage.load_values)
708
+ .Load(values_load_it + tile_offset, values);
709
+ }
710
+
711
+ sync_threadblock();
712
+
713
+ // Set head segment_flags
714
+ BlockDiscontinuityKeys(storage.scan_storage.discontinuity)
715
+ .FlagHeads(segment_flags,
716
+ keys,
717
+ pred_keys,
718
+ inequality_op,
719
+ tile_pred_key);
720
+
721
+ // Zip values and segment_flags
722
+ zip_values_and_flags<IS_LAST_TILE>(num_remaining,
723
+ values,
724
+ segment_flags,
725
+ scan_items);
726
+
727
+ // Exclusive scan of values and segment_flags
728
+ size_value_pair_t tile_aggregate;
729
+ TilePrefixCallback prefix_op(tile_state, storage.scan_storage.prefix, scan_op, tile_idx);
730
+ scan_tile(scan_items,
731
+ tile_aggregate,
732
+ prefix_op,
733
+ is_true<HAS_IDENTITY_ZERO>());
734
+ size_value_pair_t tile_inclusive_prefix = prefix_op.GetInclusivePrefix();
735
+
736
+ // Unzip values and segment indices
737
+ zip_keys_and_values(pred_keys, segment_indices, scan_items, scatter_items);
738
+
739
+ // Scatter flagged items
740
+ scatter(scatter_items,
741
+ segment_flags,
742
+ segment_indices,
743
+ tile_aggregate.key,
744
+ prefix_op.GetExclusivePrefix().key);
745
+
746
+ if (IS_LAST_TILE)
747
+ {
748
+ // Finalize the carry-out from the last tile
749
+ finalize_last_tile(tile_inclusive_prefix.key,
750
+ num_remaining,
751
+ keys[ITEMS_PER_THREAD - 1],
752
+ tile_inclusive_prefix.value);
753
+ }
754
+ }
755
+ template <bool IS_LAST_TILE>
756
+ THRUST_DEVICE_FUNCTION void
757
+ consume_tile(size_type num_remaining,
758
+ int tile_idx,
759
+ size_type tile_offset,
760
+ ScanTileState &tile_state)
761
+ {
762
+ if (tile_idx == 0)
763
+ {
764
+ consume_first_tile<IS_LAST_TILE>(num_remaining,
765
+ tile_offset,
766
+ tile_state);
767
+ }
768
+ else
769
+ {
770
+ consume_subsequent_tile<IS_LAST_TILE>(num_remaining,
771
+ tile_idx,
772
+ tile_offset,
773
+ tile_state);
774
+ }
775
+ }
776
+
777
+ //---------------------------------------------------------------------
778
+ // Constructor : consume_range
779
+ //---------------------------------------------------------------------
780
+
781
+ THRUST_DEVICE_FUNCTION impl(TempStorage & storage_,
782
+ KeysInputIt keys_input_it_,
783
+ ValuesInputIt values_input_it_,
784
+ KeysOutputIt keys_output_it_,
785
+ ValuesOutputIt values_output_it_,
786
+ NumRunsOutputIt num_runs_output_it_,
787
+ EqualityOp equality_op_,
788
+ ReductionOp reduction_op_,
789
+ Size num_items,
790
+ int /*num_tiles*/,
791
+ ScanTileState & tile_state)
792
+ : storage(storage_),
793
+ keys_load_it(core::make_load_iterator(ptx_plan(), keys_input_it_)),
794
+ values_load_it(core::make_load_iterator(ptx_plan(), values_input_it_)),
795
+ keys_output_it(keys_output_it_),
796
+ values_output_it(values_output_it_),
797
+ num_runs_output_it(num_runs_output_it_),
798
+ inequality_op(equality_op_),
799
+ scan_op(reduction_op_)
800
+ {
801
+ // Blocks are launched in increasing order,
802
+ // so just assign one tile per block
803
+ //
804
+ int tile_idx = blockIdx.x;
805
+ Size tile_offset = static_cast<Size>(tile_idx) * ITEMS_PER_TILE;
806
+ Size num_remaining = num_items - tile_offset;
807
+
808
+ if (num_remaining > ITEMS_PER_TILE)
809
+ {
810
+ // Not the last tile (full)
811
+ consume_tile<false>(num_remaining, tile_idx, tile_offset, tile_state);
812
+ }
813
+ else if (num_remaining > 0)
814
+ {
815
+ // The last tile (possibly partially-full)
816
+ consume_tile<true>(num_remaining, tile_idx, tile_offset, tile_state);
817
+ }
818
+ }
819
+ }; // struct impl
820
+
821
+ //---------------------------------------------------------------------
822
+ // Agent entry point
823
+ //---------------------------------------------------------------------
824
+
825
+ THRUST_AGENT_ENTRY(KeysInputIt keys_input_it,
826
+ ValuesInputIt values_input_it,
827
+ KeysOutputIt keys_output_it,
828
+ ValuesOutputIt values_output_it,
829
+ NumRunsOutputIt num_runs_output_it,
830
+ ScanTileState tile_state,
831
+ EqualityOp equality_op,
832
+ ReductionOp reduction_op,
833
+ Size num_items,
834
+ int num_tiles,
835
+ char * shmem)
836
+ {
837
+ TempStorage &storage = *reinterpret_cast<TempStorage*>(shmem);
838
+
839
+ impl(storage,
840
+ keys_input_it,
841
+ values_input_it,
842
+ keys_output_it,
843
+ values_output_it,
844
+ num_runs_output_it,
845
+ equality_op,
846
+ reduction_op,
847
+ num_items,
848
+ num_tiles,
849
+ tile_state);
850
+ }
851
+
852
+ }; // struct ReduceByKeyAgent
853
+
854
+ template <class ScanTileState,
855
+ class Size,
856
+ class NumSelectedIt>
857
+ struct InitAgent
858
+ {
859
+ template <class Arch>
860
+ struct PtxPlan : PtxPolicy<128> {};
861
+ typedef core::specialize_plan<PtxPlan> ptx_plan;
862
+
863
+ //---------------------------------------------------------------------
864
+ // Agent entry point
865
+ //---------------------------------------------------------------------
866
+
867
+ THRUST_AGENT_ENTRY(ScanTileState tile_state,
868
+ Size num_tiles,
869
+ NumSelectedIt num_selected_out,
870
+ char * /*shmem*/)
871
+ {
872
+ tile_state.InitializeStatus(num_tiles);
873
+ if (blockIdx.x == 0 && threadIdx.x == 0)
874
+ *num_selected_out = 0;
875
+ }
876
+ }; // struct InitAgent
877
+
878
+ template <class KeysInputIt,
879
+ class ValuesInputIt,
880
+ class KeysOutputIt,
881
+ class ValuesOutputIt,
882
+ class NumRunsOutputIt,
883
+ class EqualityOp,
884
+ class ReductionOp,
885
+ class Size>
886
+ THRUST_RUNTIME_FUNCTION cudaError_t
887
+ doit_step(void * d_temp_storage,
888
+ size_t & temp_storage_bytes,
889
+ KeysInputIt keys_input_it,
890
+ ValuesInputIt values_input_it,
891
+ KeysOutputIt keys_output_it,
892
+ ValuesOutputIt values_output_it,
893
+ NumRunsOutputIt num_runs_output_it,
894
+ EqualityOp equality_op,
895
+ ReductionOp reduction_op,
896
+ Size num_items,
897
+ cudaStream_t stream)
898
+ {
899
+ using core::AgentPlan;
900
+ using core::AgentLauncher;
901
+
902
+ cudaError_t status = cudaSuccess;
903
+ if (num_items == 0)
904
+ return cudaErrorNotSupported;
905
+
906
+ typedef AgentLauncher<
907
+ ReduceByKeyAgent<KeysInputIt,
908
+ ValuesInputIt,
909
+ KeysOutputIt,
910
+ ValuesOutputIt,
911
+ EqualityOp,
912
+ ReductionOp,
913
+ NumRunsOutputIt,
914
+ Size> >
915
+ reduce_by_key_agent;
916
+
917
+ typedef typename reduce_by_key_agent::ScanTileState ScanTileState;
918
+ typedef AgentLauncher<
919
+ InitAgent<ScanTileState,
920
+ Size,
921
+ NumRunsOutputIt> >
922
+ init_agent;
923
+
924
+ AgentPlan reduce_by_key_plan = reduce_by_key_agent::get_plan(stream);
925
+ AgentPlan init_plan = init_agent::get_plan();
926
+
927
+ // Number of input tiles
928
+ int tile_size = reduce_by_key_plan.items_per_tile;
929
+ Size num_tiles = cub::DivideAndRoundUp(num_items, tile_size);
930
+
931
+ size_t vshmem_size = core::vshmem_size(reduce_by_key_plan.shared_memory_size,
932
+ num_tiles);
933
+
934
+ size_t allocation_sizes[2] = {9, vshmem_size};
935
+ status = ScanTileState::AllocationSize(static_cast<int>(num_tiles), allocation_sizes[0]);
936
+ CUDA_CUB_RET_IF_FAIL(status);
937
+
938
+ void *allocations[2] = {NULL, NULL};
939
+ status = cub::AliasTemporaries(d_temp_storage,
940
+ temp_storage_bytes,
941
+ allocations,
942
+ allocation_sizes);
943
+ CUDA_CUB_RET_IF_FAIL(status);
944
+
945
+ if (d_temp_storage == NULL)
946
+ {
947
+ return status;
948
+ }
949
+
950
+ ScanTileState tile_state;
951
+ status = tile_state.Init(static_cast<int>(num_tiles), allocations[0], allocation_sizes[0]);
952
+ CUDA_CUB_RET_IF_FAIL(status);
953
+
954
+ init_agent ia(init_plan, num_tiles, stream, "reduce_by_key::init_agent");
955
+ ia.launch(tile_state, num_tiles, num_runs_output_it);
956
+ CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
957
+
958
+ char *vshmem_ptr = vshmem_size > 0 ? (char *)allocations[1] : NULL;
959
+
960
+ reduce_by_key_agent rbka(reduce_by_key_plan,
961
+ num_items,
962
+ stream,
963
+ vshmem_ptr,
964
+ "reduce_by_keys::reduce_by_key_agent");
965
+ rbka.launch(keys_input_it,
966
+ values_input_it,
967
+ keys_output_it,
968
+ values_output_it,
969
+ num_runs_output_it,
970
+ tile_state,
971
+ equality_op,
972
+ reduction_op,
973
+ num_items,
974
+ num_tiles);
975
+ CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
976
+ return status;
977
+ }
978
+
979
+ template <typename Size,
980
+ typename Derived,
981
+ typename KeysInputIt,
982
+ typename ValuesInputIt,
983
+ typename KeysOutputIt,
984
+ typename ValuesOutputIt,
985
+ typename EqualityOp,
986
+ typename ReductionOp>
987
+ THRUST_RUNTIME_FUNCTION
988
+ pair<KeysOutputIt, ValuesOutputIt>
989
+ reduce_by_key_dispatch(execution_policy<Derived>& policy,
990
+ KeysInputIt keys_first,
991
+ Size num_items,
992
+ ValuesInputIt values_first,
993
+ KeysOutputIt keys_output,
994
+ ValuesOutputIt values_output,
995
+ EqualityOp equality_op,
996
+ ReductionOp reduction_op)
997
+ {
998
+ size_t temp_storage_bytes = 0;
999
+ cudaStream_t stream = cuda_cub::stream(policy);
1000
+
1001
+ if (num_items == 0)
1002
+ {
1003
+ return thrust::make_pair(keys_output, values_output);
1004
+ }
1005
+
1006
+ cudaError_t status;
1007
+ status = doit_step(NULL,
1008
+ temp_storage_bytes,
1009
+ keys_first,
1010
+ values_first,
1011
+ keys_output,
1012
+ values_output,
1013
+ reinterpret_cast<Size*>(NULL),
1014
+ equality_op,
1015
+ reduction_op,
1016
+ num_items,
1017
+ stream);
1018
+ cuda_cub::throw_on_error(status, "reduce_by_key failed on 1st step");
1019
+
1020
+ size_t allocation_sizes[2] = {sizeof(Size), temp_storage_bytes};
1021
+ void * allocations[2] = {NULL, NULL};
1022
+
1023
+ size_t storage_size = 0;
1024
+ status = core::alias_storage(NULL,
1025
+ storage_size,
1026
+ allocations,
1027
+ allocation_sizes);
1028
+ cuda_cub::throw_on_error(status, "reduce failed on 1st alias_storage");
1029
+
1030
+ // Allocate temporary storage.
1031
+ thrust::detail::temporary_array<thrust::detail::uint8_t, Derived>
1032
+ tmp(policy, storage_size);
1033
+ void *ptr = static_cast<void*>(tmp.data().get());
1034
+
1035
+ status = core::alias_storage(ptr,
1036
+ storage_size,
1037
+ allocations,
1038
+ allocation_sizes);
1039
+ cuda_cub::throw_on_error(status, "reduce failed on 2nd alias_storage");
1040
+
1041
+ Size* d_num_runs_out
1042
+ = thrust::detail::aligned_reinterpret_cast<Size*>(allocations[0]);
1043
+
1044
+ status = doit_step(allocations[1],
1045
+ temp_storage_bytes,
1046
+ keys_first,
1047
+ values_first,
1048
+ keys_output,
1049
+ values_output,
1050
+ d_num_runs_out,
1051
+ equality_op,
1052
+ reduction_op,
1053
+ num_items,
1054
+ stream);
1055
+ cuda_cub::throw_on_error(status, "reduce_by_key failed on 2nd step");
1056
+
1057
+ status = cuda_cub::synchronize(policy);
1058
+ cuda_cub::throw_on_error(status, "reduce_by_key: failed to synchronize");
1059
+
1060
+ int num_runs_out = cuda_cub::get_value(policy, d_num_runs_out);
1061
+
1062
+ return thrust::make_pair(
1063
+ keys_output + num_runs_out,
1064
+ values_output + num_runs_out
1065
+ );
1066
+ }
1067
+
1068
+ template <typename Derived,
1069
+ typename KeysInputIt,
1070
+ typename ValuesInputIt,
1071
+ typename KeysOutputIt,
1072
+ typename ValuesOutputIt,
1073
+ typename EqualityOp,
1074
+ typename ReductionOp>
1075
+ THRUST_RUNTIME_FUNCTION
1076
+ pair<KeysOutputIt, ValuesOutputIt>
1077
+ reduce_by_key(execution_policy<Derived>& policy,
1078
+ KeysInputIt keys_first,
1079
+ KeysInputIt keys_last,
1080
+ ValuesInputIt values_first,
1081
+ KeysOutputIt keys_output,
1082
+ ValuesOutputIt values_output,
1083
+ EqualityOp equality_op,
1084
+ ReductionOp reduction_op)
1085
+ {
1086
+ using size_type = typename iterator_traits<KeysInputIt>::difference_type;
1087
+
1088
+ size_type num_items = thrust::distance(keys_first, keys_last);
1089
+
1090
+ pair<KeysOutputIt, ValuesOutputIt> result = thrust::make_pair(keys_output, values_output);
1091
+
1092
+ if (num_items == 0)
1093
+ {
1094
+ return result;
1095
+ }
1096
+
1097
+ THRUST_INDEX_TYPE_DISPATCH(result,
1098
+ reduce_by_key_dispatch,
1099
+ num_items,
1100
+ (policy,
1101
+ keys_first,
1102
+ num_items_fixed,
1103
+ values_first,
1104
+ keys_output,
1105
+ values_output,
1106
+ equality_op,
1107
+ reduction_op));
1108
+
1109
+ return result;
1110
+ }
1111
+
1112
+ } // namespace __reduce_by_key
1113
+
1114
+ //-------------------------
1115
+ // Thrust API entry points
1116
+ //-------------------------
1117
+
1118
+ __thrust_exec_check_disable__
1119
+ template <class Derived,
1120
+ class KeyInputIt,
1121
+ class ValInputIt,
1122
+ class KeyOutputIt,
1123
+ class ValOutputIt,
1124
+ class BinaryPred,
1125
+ class BinaryOp>
1126
+ pair<KeyOutputIt, ValOutputIt> __host__ __device__
1127
+ reduce_by_key(execution_policy<Derived> &policy,
1128
+ KeyInputIt keys_first,
1129
+ KeyInputIt keys_last,
1130
+ ValInputIt values_first,
1131
+ KeyOutputIt keys_output,
1132
+ ValOutputIt values_output,
1133
+ BinaryPred binary_pred,
1134
+ BinaryOp binary_op)
1135
+ {
1136
+ auto ret = thrust::make_pair(keys_output, values_output);
1137
+ THRUST_CDP_DISPATCH((ret = __reduce_by_key::reduce_by_key(policy,
1138
+ keys_first,
1139
+ keys_last,
1140
+ values_first,
1141
+ keys_output,
1142
+ values_output,
1143
+ binary_pred,
1144
+ binary_op);),
1145
+ (ret =
1146
+ thrust::reduce_by_key(cvt_to_seq(derived_cast(policy)),
1147
+ keys_first,
1148
+ keys_last,
1149
+ values_first,
1150
+ keys_output,
1151
+ values_output,
1152
+ binary_pred,
1153
+ binary_op);));
1154
+ return ret;
1155
+ }
1156
+
1157
+ template <class Derived,
1158
+ class KeyInputIt,
1159
+ class ValInputIt,
1160
+ class KeyOutputIt,
1161
+ class ValOutputIt,
1162
+ class BinaryPred>
1163
+ pair<KeyOutputIt, ValOutputIt> __host__ __device__
1164
+ reduce_by_key(execution_policy<Derived> &policy,
1165
+ KeyInputIt keys_first,
1166
+ KeyInputIt keys_last,
1167
+ ValInputIt values_first,
1168
+ KeyOutputIt keys_output,
1169
+ ValOutputIt values_output,
1170
+ BinaryPred binary_pred)
1171
+ {
1172
+ typedef typename thrust::detail::eval_if<
1173
+ thrust::detail::is_output_iterator<ValOutputIt>::value,
1174
+ thrust::iterator_value<ValInputIt>,
1175
+ thrust::iterator_value<ValOutputIt>
1176
+ >::type value_type;
1177
+ return cuda_cub::reduce_by_key(policy,
1178
+ keys_first,
1179
+ keys_last,
1180
+ values_first,
1181
+ keys_output,
1182
+ values_output,
1183
+ binary_pred,
1184
+ plus<value_type>());
1185
+ }
1186
+
1187
+ template <class Derived,
1188
+ class KeyInputIt,
1189
+ class ValInputIt,
1190
+ class KeyOutputIt,
1191
+ class ValOutputIt>
1192
+ pair<KeyOutputIt, ValOutputIt> __host__ __device__
1193
+ reduce_by_key(execution_policy<Derived> &policy,
1194
+ KeyInputIt keys_first,
1195
+ KeyInputIt keys_last,
1196
+ ValInputIt values_first,
1197
+ KeyOutputIt keys_output,
1198
+ ValOutputIt values_output)
1199
+ {
1200
+ typedef typename thrust::iterator_value<KeyInputIt>::type KeyT;
1201
+ return cuda_cub::reduce_by_key(policy,
1202
+ keys_first,
1203
+ keys_last,
1204
+ values_first,
1205
+ keys_output,
1206
+ values_output,
1207
+ equal_to<KeyT>());
1208
+ }
1209
+
1210
+ } // namespace cuda_
1211
+
1212
+ THRUST_NAMESPACE_END
1213
+
1214
+ #include <thrust/memory.h>
1215
+ #include <thrust/reduce.h>
1216
+
1217
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/remove.h ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+ #include <thrust/system/cuda/detail/copy_if.h>
41
+
42
+ THRUST_NAMESPACE_BEGIN
43
+ namespace cuda_cub {
44
+
45
+ // in-place
46
+
47
+ template <class Derived,
48
+ class InputIt,
49
+ class StencilIt,
50
+ class Predicate>
51
+ InputIt __host__ __device__
52
+ remove_if(execution_policy<Derived> &policy,
53
+ InputIt first,
54
+ InputIt last,
55
+ StencilIt stencil,
56
+ Predicate predicate)
57
+ {
58
+ return cuda_cub::copy_if(policy, first, last, stencil, first,
59
+ thrust::detail::not1(predicate));
60
+ }
61
+
62
+ template <class Derived,
63
+ class InputIt,
64
+ class Predicate>
65
+ InputIt __host__ __device__
66
+ remove_if(execution_policy<Derived> &policy,
67
+ InputIt first,
68
+ InputIt last,
69
+ Predicate predicate)
70
+ {
71
+ return cuda_cub::copy_if(policy, first, last, first,
72
+ thrust::detail::not1(predicate));
73
+ }
74
+
75
+
76
+ template <class Derived,
77
+ class InputIt,
78
+ class T>
79
+ InputIt __host__ __device__
80
+ remove(execution_policy<Derived> &policy,
81
+ InputIt first,
82
+ InputIt last,
83
+ const T & value)
84
+ {
85
+ using thrust::placeholders::_1;
86
+
87
+ return cuda_cub::remove_if(policy, first, last, _1 == value);
88
+ }
89
+
90
+ // copy
91
+
92
+ template <class Derived,
93
+ class InputIt,
94
+ class StencilIt,
95
+ class OutputIt,
96
+ class Predicate>
97
+ OutputIt __host__ __device__
98
+ remove_copy_if(execution_policy<Derived> &policy,
99
+ InputIt first,
100
+ InputIt last,
101
+ StencilIt stencil,
102
+ OutputIt result,
103
+ Predicate predicate)
104
+ {
105
+ return cuda_cub::copy_if(policy, first, last, stencil, result,
106
+ thrust::detail::not1(predicate));
107
+ }
108
+
109
+ template <class Derived,
110
+ class InputIt,
111
+ class OutputIt,
112
+ class Predicate>
113
+ OutputIt __host__ __device__
114
+ remove_copy_if(execution_policy<Derived> &policy,
115
+ InputIt first,
116
+ InputIt last,
117
+ OutputIt result,
118
+ Predicate predicate)
119
+ {
120
+ return cuda_cub::copy_if(policy, first, last, result,
121
+ thrust::detail::not1(predicate));
122
+ }
123
+
124
+
125
+ template <class Derived,
126
+ class InputIt,
127
+ class OutputIt,
128
+ class T>
129
+ OutputIt __host__ __device__
130
+ remove_copy(execution_policy<Derived> &policy,
131
+ InputIt first,
132
+ InputIt last,
133
+ OutputIt result,
134
+ const T & value)
135
+ {
136
+ thrust::detail::equal_to_value<T> pred(value);
137
+ return cuda_cub::remove_copy_if(policy, first, last, result, pred);
138
+ }
139
+
140
+ } // namespace cuda_cub
141
+ THRUST_NAMESPACE_END
142
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/replace.h ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+ #include <thrust/system/cuda/detail/transform.h>
41
+ #include <thrust/detail/internal_functional.h>
42
+
43
+ THRUST_NAMESPACE_BEGIN
44
+ namespace cuda_cub {
45
+
46
+ namespace __replace
47
+ {
48
+ template<class T>
49
+ struct constant_f
50
+ {
51
+ T value;
52
+
53
+ THRUST_FUNCTION
54
+ constant_f(T const &x) : value(x) {}
55
+
56
+ template<class U>
57
+ THRUST_DEVICE_FUNCTION
58
+ T operator()(U const &) const
59
+ {
60
+ return value;
61
+ }
62
+ }; // struct constant_f
63
+
64
+ template<class Predicate, class NewType, class OutputType>
65
+ struct new_value_if_f
66
+ {
67
+ Predicate pred;
68
+ NewType new_value;
69
+
70
+ THRUST_FUNCTION
71
+ new_value_if_f(Predicate pred_, NewType new_value_)
72
+ : pred(pred_), new_value(new_value_) {}
73
+
74
+ template<class T>
75
+ OutputType THRUST_DEVICE_FUNCTION
76
+ operator()(T const &x)
77
+ {
78
+ return pred(x) ? new_value : x;
79
+ }
80
+
81
+ template<class T, class P>
82
+ OutputType THRUST_DEVICE_FUNCTION
83
+ operator()(T const &x, P const& y)
84
+ {
85
+ return pred(y) ? new_value : x;
86
+ }
87
+ }; // struct new_value_if_f
88
+
89
+ } // namespace __replace
90
+
91
+ template <class Derived,
92
+ class Iterator,
93
+ class T>
94
+ void __host__ __device__
95
+ replace(execution_policy<Derived> &policy,
96
+ Iterator first,
97
+ Iterator last,
98
+ T const & old_value,
99
+ T const & new_value)
100
+ {
101
+ using thrust::placeholders::_1;
102
+
103
+ cuda_cub::transform_if(policy,
104
+ first,
105
+ last,
106
+ first,
107
+ __replace::constant_f<T>(new_value),
108
+ _1 == old_value);
109
+ }
110
+
111
+ template <class Derived,
112
+ class Iterator,
113
+ class Predicate,
114
+ class T>
115
+ void __host__ __device__
116
+ replace_if(execution_policy<Derived> &policy,
117
+ Iterator first,
118
+ Iterator last,
119
+ Predicate pred,
120
+ T const & new_value)
121
+ {
122
+ cuda_cub::transform_if(policy,
123
+ first,
124
+ last,
125
+ first,
126
+ __replace::constant_f<T>(new_value),
127
+ pred);
128
+ }
129
+
130
+ template <class Derived,
131
+ class Iterator,
132
+ class StencilIt,
133
+ class Predicate,
134
+ class T>
135
+ void __host__ __device__
136
+ replace_if(execution_policy<Derived> &policy,
137
+ Iterator first,
138
+ Iterator last,
139
+ StencilIt stencil,
140
+ Predicate pred,
141
+ T const & new_value)
142
+ {
143
+ cuda_cub::transform_if(policy,
144
+ first,
145
+ last,
146
+ stencil,
147
+ first,
148
+ __replace::constant_f<T>(new_value),
149
+ pred);
150
+ }
151
+
152
+ template <class Derived,
153
+ class InputIt,
154
+ class OutputIt,
155
+ class Predicate,
156
+ class T>
157
+ OutputIt __host__ __device__
158
+ replace_copy_if(execution_policy<Derived> &policy,
159
+ InputIt first,
160
+ InputIt last,
161
+ OutputIt result,
162
+ Predicate predicate,
163
+ T const & new_value)
164
+ {
165
+ typedef typename iterator_traits<OutputIt>::value_type output_type;
166
+ typedef __replace::new_value_if_f<Predicate, T, output_type> new_value_if_t;
167
+ return cuda_cub::transform(policy,
168
+ first,
169
+ last,
170
+ result,
171
+ new_value_if_t(predicate, new_value));
172
+ }
173
+
174
+ template <class Derived,
175
+ class InputIt,
176
+ class StencilIt,
177
+ class OutputIt,
178
+ class Predicate,
179
+ class T>
180
+ OutputIt __host__ __device__
181
+ replace_copy_if(execution_policy<Derived> &policy,
182
+ InputIt first,
183
+ InputIt last,
184
+ StencilIt stencil,
185
+ OutputIt result,
186
+ Predicate predicate,
187
+ T const & new_value)
188
+ {
189
+ typedef typename iterator_traits<OutputIt>::value_type output_type;
190
+ typedef __replace::new_value_if_f<Predicate, T, output_type> new_value_if_t;
191
+ return cuda_cub::transform(policy,
192
+ first,
193
+ last,
194
+ stencil,
195
+ result,
196
+ new_value_if_t(predicate, new_value));
197
+ }
198
+
199
+ template <class Derived,
200
+ class InputIt,
201
+ class OutputIt,
202
+ class T>
203
+ OutputIt __host__ __device__
204
+ replace_copy(execution_policy<Derived> &policy,
205
+ InputIt first,
206
+ InputIt last,
207
+ OutputIt result,
208
+ T const & old_value,
209
+ T const & new_value)
210
+ {
211
+ return cuda_cub::replace_copy_if(policy,
212
+ first,
213
+ last,
214
+ result,
215
+ thrust::detail::equal_to_value<T>(old_value),
216
+ new_value);
217
+ }
218
+
219
+ } // namespace cuda_cub
220
+ THRUST_NAMESPACE_END
221
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/reverse.h ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+ #include <thrust/system/cuda/detail/execution_policy.h>
41
+
42
+ THRUST_NAMESPACE_BEGIN
43
+ namespace cuda_cub {
44
+
45
+ template <class Derived, class ItemsIt, class ResultIt>
46
+ ResultIt __host__ __device__
47
+ reverse_copy(execution_policy<Derived> &policy,
48
+ ItemsIt first,
49
+ ItemsIt last,
50
+ ResultIt result);
51
+
52
+ template <class Derived, class ItemsIt>
53
+ void __host__ __device__
54
+ reverse(execution_policy<Derived> &policy,
55
+ ItemsIt first,
56
+ ItemsIt last);
57
+
58
+ } // namespace cuda_cub
59
+ THRUST_NAMESPACE_END
60
+
61
+ #include <thrust/advance.h>
62
+ #include <thrust/distance.h>
63
+ #include <thrust/system/cuda/detail/swap_ranges.h>
64
+ #include <thrust/system/cuda/detail/copy.h>
65
+ #include <thrust/iterator/reverse_iterator.h>
66
+
67
+ THRUST_NAMESPACE_BEGIN
68
+ namespace cuda_cub {
69
+
70
+ template <class Derived,
71
+ class ItemsIt,
72
+ class ResultIt>
73
+ ResultIt __host__ __device__
74
+ reverse_copy(execution_policy<Derived> &policy,
75
+ ItemsIt first,
76
+ ItemsIt last,
77
+ ResultIt result)
78
+ {
79
+ return cuda_cub::copy(policy,
80
+ thrust::make_reverse_iterator(last),
81
+ thrust::make_reverse_iterator(first),
82
+ result);
83
+ }
84
+
85
+ template <class Derived,
86
+ class ItemsIt>
87
+ void __host__ __device__
88
+ reverse(execution_policy<Derived> &policy,
89
+ ItemsIt first,
90
+ ItemsIt last)
91
+ {
92
+ typedef typename thrust::iterator_difference<ItemsIt>::type difference_type;
93
+
94
+ // find the midpoint of [first,last)
95
+ difference_type N = thrust::distance(first, last);
96
+ ItemsIt mid(first);
97
+ thrust::advance(mid, N / 2);
98
+
99
+ cuda_cub::swap_ranges(policy, first, mid, thrust::make_reverse_iterator(last));
100
+ }
101
+
102
+
103
+ } // namespace cuda_cub
104
+ THRUST_NAMESPACE_END
105
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/scan.h ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+
41
+ #include <thrust/detail/cstdint.h>
42
+ #include <thrust/detail/type_traits.h>
43
+ #include <thrust/distance.h>
44
+ #include <thrust/iterator/iterator_traits.h>
45
+ #include <thrust/system/cuda/config.h>
46
+ #include <thrust/system/cuda/detail/cdp_dispatch.h>
47
+ #include <thrust/system/cuda/detail/dispatch.h>
48
+
49
+ #include <cub/device/device_scan.cuh>
50
+
51
+ THRUST_NAMESPACE_BEGIN
52
+ namespace cuda_cub
53
+ {
54
+ namespace detail
55
+ {
56
+
57
+ __thrust_exec_check_disable__
58
+ template <typename Derived,
59
+ typename InputIt,
60
+ typename Size,
61
+ typename OutputIt,
62
+ typename ScanOp>
63
+ __host__ __device__
64
+ OutputIt inclusive_scan_n_impl(thrust::cuda_cub::execution_policy<Derived> &policy,
65
+ InputIt first,
66
+ Size num_items,
67
+ OutputIt result,
68
+ ScanOp scan_op)
69
+ {
70
+ using AccumT = typename thrust::iterator_traits<InputIt>::value_type;
71
+ using Dispatch32 = cub::DispatchScan<InputIt,
72
+ OutputIt,
73
+ ScanOp,
74
+ cub::NullType,
75
+ thrust::detail::int32_t,
76
+ AccumT>;
77
+ using Dispatch64 = cub::DispatchScan<InputIt,
78
+ OutputIt,
79
+ ScanOp,
80
+ cub::NullType,
81
+ thrust::detail::int64_t,
82
+ AccumT>;
83
+
84
+ cudaStream_t stream = thrust::cuda_cub::stream(policy);
85
+ cudaError_t status;
86
+
87
+ // Determine temporary storage requirements:
88
+ size_t tmp_size = 0;
89
+ {
90
+ THRUST_INDEX_TYPE_DISPATCH2(status,
91
+ Dispatch32::Dispatch,
92
+ Dispatch64::Dispatch,
93
+ num_items,
94
+ (nullptr,
95
+ tmp_size,
96
+ first,
97
+ result,
98
+ scan_op,
99
+ cub::NullType{},
100
+ num_items_fixed,
101
+ stream));
102
+ thrust::cuda_cub::throw_on_error(status,
103
+ "after determining tmp storage "
104
+ "requirements for inclusive_scan");
105
+ }
106
+
107
+ // Run scan:
108
+ {
109
+ // Allocate temporary storage:
110
+ thrust::detail::temporary_array<thrust::detail::uint8_t, Derived> tmp{
111
+ policy,
112
+ tmp_size};
113
+ THRUST_INDEX_TYPE_DISPATCH2(status,
114
+ Dispatch32::Dispatch,
115
+ Dispatch64::Dispatch,
116
+ num_items,
117
+ (tmp.data().get(),
118
+ tmp_size,
119
+ first,
120
+ result,
121
+ scan_op,
122
+ cub::NullType{},
123
+ num_items_fixed,
124
+ stream));
125
+ thrust::cuda_cub::throw_on_error(status,
126
+ "after dispatching inclusive_scan kernel");
127
+ thrust::cuda_cub::throw_on_error(thrust::cuda_cub::synchronize_optional(policy),
128
+ "inclusive_scan failed to synchronize");
129
+ }
130
+
131
+ return result + num_items;
132
+ }
133
+
134
+ __thrust_exec_check_disable__
135
+ template <typename Derived,
136
+ typename InputIt,
137
+ typename Size,
138
+ typename OutputIt,
139
+ typename InitValueT,
140
+ typename ScanOp>
141
+ __host__ __device__
142
+ OutputIt exclusive_scan_n_impl(thrust::cuda_cub::execution_policy<Derived> &policy,
143
+ InputIt first,
144
+ Size num_items,
145
+ OutputIt result,
146
+ InitValueT init,
147
+ ScanOp scan_op)
148
+ {
149
+ using InputValueT = cub::detail::InputValue<InitValueT>;
150
+ using Dispatch32 = cub::DispatchScan<InputIt,
151
+ OutputIt,
152
+ ScanOp,
153
+ InputValueT,
154
+ thrust::detail::int32_t,
155
+ InitValueT>;
156
+ using Dispatch64 = cub::DispatchScan<InputIt,
157
+ OutputIt,
158
+ ScanOp,
159
+ InputValueT,
160
+ thrust::detail::int64_t,
161
+ InitValueT>;
162
+
163
+ cudaStream_t stream = thrust::cuda_cub::stream(policy);
164
+ cudaError_t status;
165
+
166
+ // Determine temporary storage requirements:
167
+ size_t tmp_size = 0;
168
+ {
169
+ THRUST_INDEX_TYPE_DISPATCH2(status,
170
+ Dispatch32::Dispatch,
171
+ Dispatch64::Dispatch,
172
+ num_items,
173
+ (nullptr,
174
+ tmp_size,
175
+ first,
176
+ result,
177
+ scan_op,
178
+ InputValueT(init),
179
+ num_items_fixed,
180
+ stream));
181
+ thrust::cuda_cub::throw_on_error(status,
182
+ "after determining tmp storage "
183
+ "requirements for exclusive_scan");
184
+ }
185
+
186
+ // Run scan:
187
+ {
188
+ // Allocate temporary storage:
189
+ thrust::detail::temporary_array<thrust::detail::uint8_t, Derived> tmp{
190
+ policy,
191
+ tmp_size};
192
+ THRUST_INDEX_TYPE_DISPATCH2(status,
193
+ Dispatch32::Dispatch,
194
+ Dispatch64::Dispatch,
195
+ num_items,
196
+ (tmp.data().get(),
197
+ tmp_size,
198
+ first,
199
+ result,
200
+ scan_op,
201
+ InputValueT(init),
202
+ num_items_fixed,
203
+ stream));
204
+ thrust::cuda_cub::throw_on_error(status,
205
+ "after dispatching exclusive_scan kernel");
206
+ thrust::cuda_cub::throw_on_error(thrust::cuda_cub::synchronize_optional(policy),
207
+ "exclusive_scan failed to synchronize");
208
+ }
209
+
210
+ return result + num_items;
211
+ }
212
+
213
+ } // namespace detail
214
+
215
+ //-------------------------
216
+ // Thrust API entry points
217
+ //-------------------------
218
+
219
+ __thrust_exec_check_disable__
220
+ template <typename Derived,
221
+ typename InputIt,
222
+ typename Size,
223
+ typename OutputIt,
224
+ typename ScanOp>
225
+ __host__ __device__
226
+ OutputIt inclusive_scan_n(thrust::cuda_cub::execution_policy<Derived> &policy,
227
+ InputIt first,
228
+ Size num_items,
229
+ OutputIt result,
230
+ ScanOp scan_op)
231
+ {
232
+ THRUST_CDP_DISPATCH(
233
+ (result = thrust::cuda_cub::detail::inclusive_scan_n_impl(policy,
234
+ first,
235
+ num_items,
236
+ result,
237
+ scan_op);),
238
+ (result = thrust::inclusive_scan(cvt_to_seq(derived_cast(policy)),
239
+ first,
240
+ first + num_items,
241
+ result,
242
+ scan_op);));
243
+ return result;
244
+ }
245
+
246
+ template <typename Derived, typename InputIt, typename OutputIt, typename ScanOp>
247
+ __host__ __device__
248
+ OutputIt inclusive_scan(thrust::cuda_cub::execution_policy<Derived> &policy,
249
+ InputIt first,
250
+ InputIt last,
251
+ OutputIt result,
252
+ ScanOp scan_op)
253
+ {
254
+ using diff_t = typename thrust::iterator_traits<InputIt>::difference_type;
255
+ diff_t const num_items = thrust::distance(first, last);
256
+ return thrust::cuda_cub::inclusive_scan_n(policy,
257
+ first,
258
+ num_items,
259
+ result,
260
+ scan_op);
261
+ }
262
+
263
+ template <typename Derived, typename InputIt, typename OutputIt>
264
+ __host__ __device__
265
+ OutputIt inclusive_scan(thrust::cuda_cub::execution_policy<Derived> &policy,
266
+ InputIt first,
267
+ InputIt last,
268
+ OutputIt result)
269
+ {
270
+ return thrust::cuda_cub::inclusive_scan(policy,
271
+ first,
272
+ last,
273
+ result,
274
+ thrust::plus<>{});
275
+ }
276
+
277
+ __thrust_exec_check_disable__
278
+ template <typename Derived,
279
+ typename InputIt,
280
+ typename Size,
281
+ typename OutputIt,
282
+ typename T,
283
+ typename ScanOp>
284
+ __host__ __device__
285
+ OutputIt exclusive_scan_n(thrust::cuda_cub::execution_policy<Derived> &policy,
286
+ InputIt first,
287
+ Size num_items,
288
+ OutputIt result,
289
+ T init,
290
+ ScanOp scan_op)
291
+ {
292
+ THRUST_CDP_DISPATCH(
293
+ (result = thrust::cuda_cub::detail::exclusive_scan_n_impl(policy,
294
+ first,
295
+ num_items,
296
+ result,
297
+ init,
298
+ scan_op);),
299
+ (result = thrust::exclusive_scan(cvt_to_seq(derived_cast(policy)),
300
+ first,
301
+ first + num_items,
302
+ result,
303
+ init,
304
+ scan_op);));
305
+ return result;
306
+ }
307
+
308
+ template <typename Derived,
309
+ typename InputIt,
310
+ typename OutputIt,
311
+ typename T,
312
+ typename ScanOp>
313
+ __host__ __device__
314
+ OutputIt exclusive_scan(thrust::cuda_cub::execution_policy<Derived> &policy,
315
+ InputIt first,
316
+ InputIt last,
317
+ OutputIt result,
318
+ T init,
319
+ ScanOp scan_op)
320
+ {
321
+ using diff_t = typename thrust::iterator_traits<InputIt>::difference_type;
322
+ diff_t const num_items = thrust::distance(first, last);
323
+ return thrust::cuda_cub::exclusive_scan_n(policy,
324
+ first,
325
+ num_items,
326
+ result,
327
+ init,
328
+ scan_op);
329
+ }
330
+
331
+ template <typename Derived, typename InputIt, typename OutputIt, typename T>
332
+ __host__ __device__
333
+ OutputIt exclusive_scan(thrust::cuda_cub::execution_policy<Derived> &policy,
334
+ InputIt first,
335
+ InputIt last,
336
+ OutputIt result,
337
+ T init)
338
+ {
339
+ return thrust::cuda_cub::exclusive_scan(policy,
340
+ first,
341
+ last,
342
+ result,
343
+ init,
344
+ thrust::plus<>{});
345
+ }
346
+
347
+ template <typename Derived, typename InputIt, typename OutputIt>
348
+ __host__ __device__
349
+ OutputIt exclusive_scan(thrust::cuda_cub::execution_policy<Derived> &policy,
350
+ InputIt first,
351
+ InputIt last,
352
+ OutputIt result)
353
+ {
354
+ using init_type = typename thrust::iterator_traits<InputIt>::value_type;
355
+ return cuda_cub::exclusive_scan(policy, first, last, result, init_type{});
356
+ };
357
+
358
+ } // namespace cuda_cub
359
+ THRUST_NAMESPACE_END
360
+
361
+ #include <thrust/scan.h>
362
+
363
+ #endif // NVCC
miniCUDA124/include/thrust/system/cuda/detail/scan_by_key.h ADDED
@@ -0,0 +1,500 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+
41
+ #include <thrust/distance.h>
42
+ #include <thrust/functional.h>
43
+
44
+ #include <thrust/type_traits/is_contiguous_iterator.h>
45
+
46
+ #include <thrust/iterator/iterator_traits.h>
47
+
48
+ #include <thrust/detail/cstdint.h>
49
+ #include <thrust/detail/minmax.h>
50
+ #include <thrust/detail/mpl/math.h>
51
+ #include <thrust/detail/temporary_array.h>
52
+
53
+ #include <thrust/system/cuda/config.h>
54
+ #include <thrust/system/cuda/detail/cdp_dispatch.h>
55
+ #include <thrust/system/cuda/detail/dispatch.h>
56
+ #include <thrust/system/cuda/detail/par_to_seq.h>
57
+ #include <thrust/system/cuda/detail/util.h>
58
+
59
+ #include <cub/device/dispatch/dispatch_scan_by_key.cuh>
60
+ #include <cub/util_type.cuh>
61
+
62
+ THRUST_NAMESPACE_BEGIN
63
+ namespace cuda_cub
64
+ {
65
+ namespace detail
66
+ {
67
+
68
+ __thrust_exec_check_disable__
69
+ template <typename Derived,
70
+ typename KeysInIt,
71
+ typename ValuesInIt,
72
+ typename ValuesOutIt,
73
+ typename EqualityOpT,
74
+ typename ScanOpT,
75
+ typename SizeT>
76
+ __host__ __device__
77
+ ValuesOutIt inclusive_scan_by_key_n(
78
+ thrust::cuda_cub::execution_policy<Derived>& policy,
79
+ KeysInIt keys,
80
+ ValuesInIt values,
81
+ ValuesOutIt result,
82
+ SizeT num_items,
83
+ EqualityOpT equality_op,
84
+ ScanOpT scan_op)
85
+ {
86
+ if (num_items == 0)
87
+ {
88
+ return result;
89
+ }
90
+
91
+ // Convert to raw pointers if possible:
92
+ using KeysInUnwrapIt =
93
+ thrust::detail::try_unwrap_contiguous_iterator_return_t<KeysInIt>;
94
+ using ValuesInUnwrapIt =
95
+ thrust::detail::try_unwrap_contiguous_iterator_return_t<ValuesInIt>;
96
+ using ValuesOutUnwrapIt =
97
+ thrust::detail::try_unwrap_contiguous_iterator_return_t<ValuesOutIt>;
98
+ using AccumT = typename thrust::iterator_traits<ValuesInUnwrapIt>::value_type;
99
+
100
+ auto keys_unwrap = thrust::detail::try_unwrap_contiguous_iterator(keys);
101
+ auto values_unwrap = thrust::detail::try_unwrap_contiguous_iterator(values);
102
+ auto result_unwrap = thrust::detail::try_unwrap_contiguous_iterator(result);
103
+
104
+ using Dispatch32 = cub::DispatchScanByKey<KeysInUnwrapIt,
105
+ ValuesInUnwrapIt,
106
+ ValuesOutUnwrapIt,
107
+ EqualityOpT,
108
+ ScanOpT,
109
+ cub::NullType,
110
+ thrust::detail::int32_t,
111
+ AccumT>;
112
+ using Dispatch64 = cub::DispatchScanByKey<KeysInUnwrapIt,
113
+ ValuesInUnwrapIt,
114
+ ValuesOutUnwrapIt,
115
+ EqualityOpT,
116
+ ScanOpT,
117
+ cub::NullType,
118
+ thrust::detail::int64_t,
119
+ AccumT>;
120
+
121
+ cudaStream_t stream = thrust::cuda_cub::stream(policy);
122
+ cudaError_t status{};
123
+
124
+ // Determine temporary storage requirements:
125
+ std::size_t tmp_size = 0;
126
+ {
127
+ THRUST_INDEX_TYPE_DISPATCH2(status,
128
+ Dispatch32::Dispatch,
129
+ Dispatch64::Dispatch,
130
+ num_items,
131
+ (nullptr,
132
+ tmp_size,
133
+ keys_unwrap,
134
+ values_unwrap,
135
+ result_unwrap,
136
+ equality_op,
137
+ scan_op,
138
+ cub::NullType{},
139
+ num_items_fixed,
140
+ stream));
141
+ thrust::cuda_cub::throw_on_error(status,
142
+ "after determining tmp storage "
143
+ "requirements for inclusive_scan_by_key");
144
+ }
145
+
146
+ // Run scan:
147
+ {
148
+ // Allocate temporary storage:
149
+ thrust::detail::temporary_array<thrust::detail::uint8_t, Derived> tmp{
150
+ policy,
151
+ tmp_size};
152
+
153
+ THRUST_INDEX_TYPE_DISPATCH2(status,
154
+ Dispatch32::Dispatch,
155
+ Dispatch64::Dispatch,
156
+ num_items,
157
+ (tmp.data().get(),
158
+ tmp_size,
159
+ keys_unwrap,
160
+ values_unwrap,
161
+ result_unwrap,
162
+ equality_op,
163
+ scan_op,
164
+ cub::NullType{},
165
+ num_items_fixed,
166
+ stream));
167
+
168
+ thrust::cuda_cub::throw_on_error(
169
+ status, "after dispatching inclusive_scan_by_key kernel");
170
+
171
+ thrust::cuda_cub::throw_on_error(
172
+ thrust::cuda_cub::synchronize_optional(policy),
173
+ "inclusive_scan_by_key failed to synchronize");
174
+ }
175
+
176
+ return result + num_items;
177
+ }
178
+
179
+ __thrust_exec_check_disable__
180
+ template <typename Derived,
181
+ typename KeysInIt,
182
+ typename ValuesInIt,
183
+ typename ValuesOutIt,
184
+ typename InitValueT,
185
+ typename EqualityOpT,
186
+ typename ScanOpT,
187
+ typename SizeT>
188
+ __host__ __device__
189
+ ValuesOutIt exclusive_scan_by_key_n(
190
+ thrust::cuda_cub::execution_policy<Derived>& policy,
191
+ KeysInIt keys,
192
+ ValuesInIt values,
193
+ ValuesOutIt result,
194
+ SizeT num_items,
195
+ InitValueT init_value,
196
+ EqualityOpT equality_op,
197
+ ScanOpT scan_op)
198
+ {
199
+
200
+ if (num_items == 0)
201
+ {
202
+ return result;
203
+ }
204
+
205
+ // Convert to raw pointers if possible:
206
+ using KeysInUnwrapIt =
207
+ thrust::detail::try_unwrap_contiguous_iterator_return_t<KeysInIt>;
208
+ using ValuesInUnwrapIt =
209
+ thrust::detail::try_unwrap_contiguous_iterator_return_t<ValuesInIt>;
210
+ using ValuesOutUnwrapIt =
211
+ thrust::detail::try_unwrap_contiguous_iterator_return_t<ValuesOutIt>;
212
+
213
+ auto keys_unwrap = thrust::detail::try_unwrap_contiguous_iterator(keys);
214
+ auto values_unwrap = thrust::detail::try_unwrap_contiguous_iterator(values);
215
+ auto result_unwrap = thrust::detail::try_unwrap_contiguous_iterator(result);
216
+
217
+ using Dispatch32 = cub::DispatchScanByKey<KeysInUnwrapIt,
218
+ ValuesInUnwrapIt,
219
+ ValuesOutUnwrapIt,
220
+ EqualityOpT,
221
+ ScanOpT,
222
+ InitValueT,
223
+ thrust::detail::int32_t,
224
+ InitValueT>;
225
+ using Dispatch64 = cub::DispatchScanByKey<KeysInUnwrapIt,
226
+ ValuesInUnwrapIt,
227
+ ValuesOutUnwrapIt,
228
+ EqualityOpT,
229
+ ScanOpT,
230
+ InitValueT,
231
+ thrust::detail::int64_t,
232
+ InitValueT>;
233
+
234
+ cudaStream_t stream = thrust::cuda_cub::stream(policy);
235
+ cudaError_t status{};
236
+
237
+ // Determine temporary storage requirements:
238
+ std::size_t tmp_size = 0;
239
+ {
240
+ THRUST_INDEX_TYPE_DISPATCH2(status,
241
+ Dispatch32::Dispatch,
242
+ Dispatch64::Dispatch,
243
+ num_items,
244
+ (nullptr,
245
+ tmp_size,
246
+ keys_unwrap,
247
+ values_unwrap,
248
+ result_unwrap,
249
+ equality_op,
250
+ scan_op,
251
+ init_value,
252
+ num_items_fixed,
253
+ stream));
254
+ thrust::cuda_cub::throw_on_error(status,
255
+ "after determining tmp storage "
256
+ "requirements for exclusive_scan_by_key");
257
+ }
258
+
259
+ // Run scan:
260
+ {
261
+ // Allocate temporary storage:
262
+ thrust::detail::temporary_array<thrust::detail::uint8_t, Derived> tmp{
263
+ policy,
264
+ tmp_size};
265
+
266
+ THRUST_INDEX_TYPE_DISPATCH2(status,
267
+ Dispatch32::Dispatch,
268
+ Dispatch64::Dispatch,
269
+ num_items,
270
+ (tmp.data().get(),
271
+ tmp_size,
272
+ keys_unwrap,
273
+ values_unwrap,
274
+ result_unwrap,
275
+ equality_op,
276
+ scan_op,
277
+ init_value,
278
+ num_items_fixed,
279
+ stream));
280
+
281
+ thrust::cuda_cub::throw_on_error(
282
+ status, "after dispatching exclusive_scan_by_key kernel");
283
+
284
+ thrust::cuda_cub::throw_on_error(
285
+ thrust::cuda_cub::synchronize_optional(policy),
286
+ "exclusive_scan_by_key failed to synchronize");
287
+ }
288
+
289
+ return result + num_items;
290
+ }
291
+
292
+
293
+ } // namespace detail
294
+
295
+ //-------------------------
296
+ // Thrust API entry points
297
+ //-------------------------
298
+
299
+ //---------------------------
300
+ // Inclusive scan
301
+ //---------------------------
302
+
303
+ __thrust_exec_check_disable__
304
+ template <class Derived,
305
+ class KeyInputIt,
306
+ class ValInputIt,
307
+ class ValOutputIt,
308
+ class BinaryPred,
309
+ class ScanOp>
310
+ ValOutputIt __host__ __device__
311
+ inclusive_scan_by_key(execution_policy<Derived> &policy,
312
+ KeyInputIt key_first,
313
+ KeyInputIt key_last,
314
+ ValInputIt value_first,
315
+ ValOutputIt value_result,
316
+ BinaryPred binary_pred,
317
+ ScanOp scan_op)
318
+ {
319
+ ValOutputIt ret = value_result;
320
+ THRUST_CDP_DISPATCH(
321
+ (ret = thrust::cuda_cub::detail::inclusive_scan_by_key_n(
322
+ policy,
323
+ key_first,
324
+ value_first,
325
+ value_result,
326
+ thrust::distance(key_first, key_last),
327
+ binary_pred,
328
+ scan_op);),
329
+ (ret = thrust::inclusive_scan_by_key(cvt_to_seq(derived_cast(policy)),
330
+ key_first,
331
+ key_last,
332
+ value_first,
333
+ value_result,
334
+ binary_pred,
335
+ scan_op);));
336
+
337
+ return ret;
338
+ }
339
+
340
+ template <class Derived,
341
+ class KeyInputIt,
342
+ class ValInputIt,
343
+ class ValOutputIt,
344
+ class BinaryPred>
345
+ ValOutputIt __host__ __device__
346
+ inclusive_scan_by_key(execution_policy<Derived> &policy,
347
+ KeyInputIt key_first,
348
+ KeyInputIt key_last,
349
+ ValInputIt value_first,
350
+ ValOutputIt value_result,
351
+ BinaryPred binary_pred)
352
+ {
353
+ return cuda_cub::inclusive_scan_by_key(policy,
354
+ key_first,
355
+ key_last,
356
+ value_first,
357
+ value_result,
358
+ binary_pred,
359
+ thrust::plus<>());
360
+ }
361
+
362
+ template <class Derived,
363
+ class KeyInputIt,
364
+ class ValInputIt,
365
+ class ValOutputIt>
366
+ ValOutputIt __host__ __device__
367
+ inclusive_scan_by_key(execution_policy<Derived> &policy,
368
+ KeyInputIt key_first,
369
+ KeyInputIt key_last,
370
+ ValInputIt value_first,
371
+ ValOutputIt value_result)
372
+ {
373
+ return cuda_cub::inclusive_scan_by_key(policy,
374
+ key_first,
375
+ key_last,
376
+ value_first,
377
+ value_result,
378
+ thrust::equal_to<>());
379
+ }
380
+
381
+
382
+ //---------------------------
383
+ // Exclusive scan
384
+ //---------------------------
385
+
386
+ __thrust_exec_check_disable__
387
+ template <class Derived,
388
+ class KeyInputIt,
389
+ class ValInputIt,
390
+ class ValOutputIt,
391
+ class Init,
392
+ class BinaryPred,
393
+ class ScanOp>
394
+ ValOutputIt __host__ __device__
395
+ exclusive_scan_by_key(execution_policy<Derived> &policy,
396
+ KeyInputIt key_first,
397
+ KeyInputIt key_last,
398
+ ValInputIt value_first,
399
+ ValOutputIt value_result,
400
+ Init init,
401
+ BinaryPred binary_pred,
402
+ ScanOp scan_op)
403
+ {
404
+ ValOutputIt ret = value_result;
405
+ THRUST_CDP_DISPATCH(
406
+ (ret = thrust::cuda_cub::detail::exclusive_scan_by_key_n(
407
+ policy,
408
+ key_first,
409
+ value_first,
410
+ value_result,
411
+ thrust::distance(key_first, key_last),
412
+ init,
413
+ binary_pred,
414
+ scan_op);),
415
+ (ret = thrust::exclusive_scan_by_key(cvt_to_seq(derived_cast(policy)),
416
+ key_first,
417
+ key_last,
418
+ value_first,
419
+ value_result,
420
+ init,
421
+ binary_pred,
422
+ scan_op);));
423
+ return ret;
424
+ }
425
+
426
+ template <class Derived,
427
+ class KeyInputIt,
428
+ class ValInputIt,
429
+ class ValOutputIt,
430
+ class Init,
431
+ class BinaryPred>
432
+ ValOutputIt __host__ __device__
433
+ exclusive_scan_by_key(execution_policy<Derived> &policy,
434
+ KeyInputIt key_first,
435
+ KeyInputIt key_last,
436
+ ValInputIt value_first,
437
+ ValOutputIt value_result,
438
+ Init init,
439
+ BinaryPred binary_pred)
440
+ {
441
+ return cuda_cub::exclusive_scan_by_key(policy,
442
+ key_first,
443
+ key_last,
444
+ value_first,
445
+ value_result,
446
+ init,
447
+ binary_pred,
448
+ thrust::plus<>());
449
+ }
450
+
451
+ template <class Derived,
452
+ class KeyInputIt,
453
+ class ValInputIt,
454
+ class ValOutputIt,
455
+ class Init>
456
+ ValOutputIt __host__ __device__
457
+ exclusive_scan_by_key(execution_policy<Derived> &policy,
458
+ KeyInputIt key_first,
459
+ KeyInputIt key_last,
460
+ ValInputIt value_first,
461
+ ValOutputIt value_result,
462
+ Init init)
463
+ {
464
+ return cuda_cub::exclusive_scan_by_key(policy,
465
+ key_first,
466
+ key_last,
467
+ value_first,
468
+ value_result,
469
+ init,
470
+ thrust::equal_to<>());
471
+ }
472
+
473
+
474
+ template <class Derived,
475
+ class KeyInputIt,
476
+ class ValInputIt,
477
+ class ValOutputIt>
478
+ ValOutputIt __host__ __device__
479
+ exclusive_scan_by_key(execution_policy<Derived> &policy,
480
+ KeyInputIt key_first,
481
+ KeyInputIt key_last,
482
+ ValInputIt value_first,
483
+ ValOutputIt value_result)
484
+ {
485
+ using value_type = typename thrust::iterator_traits<ValInputIt>::value_type;
486
+ return cuda_cub::exclusive_scan_by_key(policy,
487
+ key_first,
488
+ key_last,
489
+ value_first,
490
+ value_result,
491
+ value_type{});
492
+ }
493
+
494
+
495
+ } // namespace cuda_cub
496
+ THRUST_NAMESPACE_END
497
+
498
+ #include <thrust/scan.h>
499
+
500
+ #endif // NVCC
miniCUDA124/include/thrust/system/cuda/detail/scatter.h ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+ #include <thrust/system/cuda/detail/transform.h>
41
+ #include <thrust/iterator/permutation_iterator.h>
42
+
43
+ THRUST_NAMESPACE_BEGIN
44
+ namespace cuda_cub {
45
+
46
+ template <class Derived,
47
+ class ItemsIt,
48
+ class MapIt,
49
+ class ResultIt>
50
+ void __host__ __device__
51
+ scatter(execution_policy<Derived>& policy,
52
+ ItemsIt first,
53
+ ItemsIt last,
54
+ MapIt map,
55
+ ResultIt result)
56
+ {
57
+ cuda_cub::transform(policy,
58
+ first,
59
+ last,
60
+ thrust::make_permutation_iterator(result, map),
61
+ identity());
62
+ }
63
+
64
+ template <class Derived,
65
+ class ItemsIt,
66
+ class MapIt,
67
+ class StencilIt,
68
+ class ResultIt,
69
+ class Predicate>
70
+ void __host__ __device__
71
+ scatter_if(execution_policy<Derived>& policy,
72
+ ItemsIt first,
73
+ ItemsIt last,
74
+ MapIt map,
75
+ StencilIt stencil,
76
+ ResultIt result,
77
+ Predicate predicate)
78
+ {
79
+ cuda_cub::transform_if(policy,
80
+ first,
81
+ last,
82
+ stencil,
83
+ thrust::make_permutation_iterator(result, map),
84
+ identity(),
85
+ predicate);
86
+ }
87
+
88
+ template <class Derived,
89
+ class ItemsIt,
90
+ class MapIt,
91
+ class StencilIt,
92
+ class ResultIt,
93
+ class Predicate>
94
+ void __host__ __device__
95
+ scatter_if(execution_policy<Derived>& policy,
96
+ ItemsIt first,
97
+ ItemsIt last,
98
+ MapIt map,
99
+ StencilIt stencil,
100
+ ResultIt result)
101
+ {
102
+ cuda_cub::scatter_if(policy,
103
+ first,
104
+ last,
105
+ map,
106
+ stencil,
107
+ result,
108
+ identity());
109
+ }
110
+
111
+
112
+ } // namespace cuda_cub
113
+ THRUST_NAMESPACE_END
114
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/sequence.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ // this system has no special version of this algorithm
30
+
miniCUDA124/include/thrust/system/cuda/detail/set_operations.h ADDED
@@ -0,0 +1,1947 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+
41
+ #include <thrust/detail/alignment.h>
42
+ #include <thrust/detail/cstdint.h>
43
+ #include <thrust/detail/mpl/math.h>
44
+ #include <thrust/detail/temporary_array.h>
45
+ #include <thrust/distance.h>
46
+ #include <thrust/extrema.h>
47
+ #include <thrust/pair.h>
48
+ #include <thrust/set_operations.h>
49
+ #include <thrust/system/cuda/detail/cdp_dispatch.h>
50
+ #include <thrust/system/cuda/detail/core/agent_launcher.h>
51
+ #include <thrust/system/cuda/detail/execution_policy.h>
52
+ #include <thrust/system/cuda/detail/get_value.h>
53
+ #include <thrust/system/cuda/detail/par_to_seq.h>
54
+ #include <thrust/system/cuda/detail/util.h>
55
+
56
+
57
+ THRUST_NAMESPACE_BEGIN
58
+
59
+ namespace cuda_cub {
60
+
61
+ namespace __set_operations {
62
+
63
+ template <bool UpperBound,
64
+ class IntT,
65
+ class Size,
66
+ class It,
67
+ class T,
68
+ class Comp>
69
+ THRUST_DEVICE_FUNCTION void
70
+ binary_search_iteration(It data,
71
+ Size &begin,
72
+ Size &end,
73
+ T key,
74
+ int shift,
75
+ Comp comp)
76
+ {
77
+
78
+ IntT scale = (1 << shift) - 1;
79
+ Size mid = (begin + scale * end) >> shift;
80
+
81
+ T key2 = data[mid];
82
+ bool pred = UpperBound ? !comp(key, key2) : comp(key2, key);
83
+ if (pred)
84
+ begin = mid + 1;
85
+ else
86
+ end = mid;
87
+ }
88
+
89
+ template <bool UpperBound, class Size, class T, class It, class Comp>
90
+ THRUST_DEVICE_FUNCTION Size
91
+ binary_search(It data, Size count, T key, Comp comp)
92
+ {
93
+ Size begin = 0;
94
+ Size end = count;
95
+ while (begin < end)
96
+ binary_search_iteration<UpperBound, int>(data,
97
+ begin,
98
+ end,
99
+ key,
100
+ 1,
101
+ comp);
102
+ return begin;
103
+ }
104
+
105
+ template <bool UpperBound, class IntT, class Size, class T, class It, class Comp>
106
+ THRUST_DEVICE_FUNCTION Size
107
+ biased_binary_search(It data, Size count, T key, IntT levels, Comp comp)
108
+ {
109
+ Size begin = 0;
110
+ Size end = count;
111
+
112
+ if (levels >= 4 && begin < end)
113
+ binary_search_iteration<UpperBound, IntT>(data, begin, end, key, 9, comp);
114
+ if (levels >= 3 && begin < end)
115
+ binary_search_iteration<UpperBound, IntT>(data, begin, end, key, 7, comp);
116
+ if (levels >= 2 && begin < end)
117
+ binary_search_iteration<UpperBound, IntT>(data, begin, end, key, 5, comp);
118
+ if (levels >= 1 && begin < end)
119
+ binary_search_iteration<UpperBound, IntT>(data, begin, end, key, 4, comp);
120
+
121
+ while (begin < end)
122
+ binary_search_iteration<UpperBound, IntT>(data, begin, end, key, 1, comp);
123
+ return begin;
124
+ }
125
+
126
+ template <bool UpperBound, class Size, class It1, class It2, class Comp>
127
+ THRUST_DEVICE_FUNCTION Size
128
+ merge_path(It1 a, Size aCount, It2 b, Size bCount, Size diag, Comp comp)
129
+ {
130
+ typedef typename thrust::iterator_traits<It1>::value_type T;
131
+
132
+ Size begin = thrust::max<Size>(0, diag - bCount);
133
+ Size end = thrust::min<Size>(diag, aCount);
134
+
135
+ while (begin < end)
136
+ {
137
+ Size mid = (begin + end) >> 1;
138
+ T aKey = a[mid];
139
+ T bKey = b[diag - 1 - mid];
140
+ bool pred = UpperBound ? comp(aKey, bKey) : !comp(bKey, aKey);
141
+ if (pred)
142
+ begin = mid + 1;
143
+ else
144
+ end = mid;
145
+ }
146
+ return begin;
147
+ }
148
+
149
+ template <class It1, class It2, class Size, class Size2, class CompareOp>
150
+ THRUST_DEVICE_FUNCTION pair<Size, Size>
151
+ balanced_path(It1 keys1,
152
+ It2 keys2,
153
+ Size num_keys1,
154
+ Size num_keys2,
155
+ Size diag,
156
+ Size2 levels,
157
+ CompareOp compare_op)
158
+ {
159
+ typedef typename iterator_traits<It1>::value_type T;
160
+
161
+ Size index1 = merge_path<false>(keys1,
162
+ num_keys1,
163
+ keys2,
164
+ num_keys2,
165
+ diag,
166
+ compare_op);
167
+ Size index2 = diag - index1;
168
+
169
+ bool star = false;
170
+ if (index2 < num_keys2)
171
+ {
172
+ T x = keys2[index2];
173
+
174
+ // Search for the beginning of the duplicate run in both A and B.
175
+ Size start1 = biased_binary_search<false>(keys1,
176
+ index1,
177
+ x,
178
+ levels,
179
+ compare_op);
180
+ Size start2 = biased_binary_search<false>(keys2,
181
+ index2,
182
+ x,
183
+ levels,
184
+ compare_op);
185
+
186
+ // The distance between x's merge path and its lower_bound is its rank.
187
+ // We add up the a and b ranks and evenly distribute them to
188
+ // get a stairstep path.
189
+ Size run1 = index1 - start1;
190
+ Size run2 = index2 - start2;
191
+ Size total_run = run1 + run2;
192
+
193
+ // Attempt to advance b and regress a.
194
+ Size advance2 = max<Size>(total_run >> 1, total_run - run1);
195
+ Size end2 = min<Size>(num_keys2, start2 + advance2 + 1);
196
+
197
+ Size run_end2 = index2 + binary_search<true>(keys2 + index2,
198
+ end2 - index2,
199
+ x,
200
+ compare_op);
201
+ run2 = run_end2 - start2;
202
+
203
+ advance2 = min<Size>(advance2, run2);
204
+ Size advance1 = total_run - advance2;
205
+
206
+ bool round_up = (advance1 == advance2 + 1) && (advance2 < run2);
207
+ if (round_up) star = true;
208
+
209
+ index1 = start1 + advance1;
210
+ }
211
+ return thrust::make_pair(index1, (diag - index1) + star);
212
+ } // func balanced_path
213
+
214
+ template <int _BLOCK_THREADS,
215
+ int _ITEMS_PER_THREAD = 1,
216
+ cub::BlockLoadAlgorithm _LOAD_ALGORITHM = cub::BLOCK_LOAD_DIRECT,
217
+ cub::CacheLoadModifier _LOAD_MODIFIER = cub::LOAD_LDG,
218
+ cub::BlockScanAlgorithm _SCAN_ALGORITHM = cub::BLOCK_SCAN_WARP_SCANS>
219
+ struct PtxPolicy
220
+ {
221
+ enum
222
+ {
223
+ BLOCK_THREADS = _BLOCK_THREADS,
224
+ ITEMS_PER_THREAD = _ITEMS_PER_THREAD,
225
+ ITEMS_PER_TILE = _BLOCK_THREADS * _ITEMS_PER_THREAD - 1
226
+ };
227
+
228
+ static const cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM;
229
+ static const cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER;
230
+ static const cub::BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM;
231
+ }; // PtxPolicy
232
+
233
+ template<class Arch, class T, class U>
234
+ struct Tuning;
235
+
236
+ namespace mpl = thrust::detail::mpl::math;
237
+
238
+ template<class T, class U>
239
+ struct Tuning<sm30,T,U>
240
+ {
241
+ enum
242
+ {
243
+ MAX_INPUT_BYTES = mpl::max<size_t, sizeof(T), sizeof(U)>::value,
244
+ COMBINED_INPUT_BYTES = sizeof(T), // + sizeof(Value),
245
+ NOMINAL_4B_ITEMS_PER_THREAD = 7,
246
+ ITEMS_PER_THREAD = mpl::min<
247
+ int,
248
+ NOMINAL_4B_ITEMS_PER_THREAD,
249
+ mpl::max<
250
+ int,
251
+ 1,
252
+ static_cast<int>(((NOMINAL_4B_ITEMS_PER_THREAD * 4) +
253
+ COMBINED_INPUT_BYTES - 1) /
254
+ COMBINED_INPUT_BYTES)>::value>::value,
255
+ };
256
+
257
+ typedef PtxPolicy<128,
258
+ ITEMS_PER_THREAD,
259
+ cub::BLOCK_LOAD_WARP_TRANSPOSE,
260
+ cub::LOAD_DEFAULT,
261
+ cub::BLOCK_SCAN_WARP_SCANS>
262
+ type;
263
+ }; // tuning sm30
264
+
265
+ template<class T, class U>
266
+ struct Tuning<sm52,T,U>
267
+ {
268
+ enum
269
+ {
270
+ MAX_INPUT_BYTES = mpl::max<size_t, sizeof(T), sizeof(U)>::value,
271
+ COMBINED_INPUT_BYTES = sizeof(T), // + sizeof(U),
272
+ NOMINAL_4B_ITEMS_PER_THREAD = 15,
273
+ ITEMS_PER_THREAD = mpl::min<
274
+ int,
275
+ NOMINAL_4B_ITEMS_PER_THREAD,
276
+ mpl::max<
277
+ int,
278
+ 1,
279
+ static_cast<int>(((NOMINAL_4B_ITEMS_PER_THREAD * 4) +
280
+ COMBINED_INPUT_BYTES - 1) /
281
+ COMBINED_INPUT_BYTES)>::value>::value,
282
+ };
283
+
284
+ typedef PtxPolicy<256,
285
+ ITEMS_PER_THREAD,
286
+ cub::BLOCK_LOAD_WARP_TRANSPOSE,
287
+ cub::LOAD_DEFAULT,
288
+ cub::BLOCK_SCAN_WARP_SCANS>
289
+ type;
290
+ }; // tuning sm52
291
+
292
+ template<class T, class U>
293
+ struct Tuning<sm60,T,U>
294
+ {
295
+ enum
296
+ {
297
+ MAX_INPUT_BYTES = mpl::max<size_t, sizeof(T), sizeof(U)>::value,
298
+ COMBINED_INPUT_BYTES = sizeof(T), // + sizeof(U),
299
+ NOMINAL_4B_ITEMS_PER_THREAD = 19,
300
+ ITEMS_PER_THREAD = mpl::min<
301
+ int,
302
+ NOMINAL_4B_ITEMS_PER_THREAD,
303
+ mpl::max<
304
+ int,
305
+ 1,
306
+ static_cast<int>(((NOMINAL_4B_ITEMS_PER_THREAD * 4) +
307
+ COMBINED_INPUT_BYTES - 1) /
308
+ COMBINED_INPUT_BYTES)>::value>::value,
309
+ };
310
+
311
+ typedef PtxPolicy<512,
312
+ ITEMS_PER_THREAD,
313
+ cub::BLOCK_LOAD_WARP_TRANSPOSE,
314
+ cub::LOAD_DEFAULT,
315
+ cub::BLOCK_SCAN_WARP_SCANS>
316
+ type;
317
+ }; // tuning sm60
318
+
319
+ template <class KeysIt1,
320
+ class KeysIt2,
321
+ class ValuesIt1,
322
+ class ValuesIt2,
323
+ class KeysOutputIt,
324
+ class ValuesOutputIt,
325
+ class Size,
326
+ class CompareOp,
327
+ class SetOp,
328
+ class HAS_VALUES>
329
+ struct SetOpAgent
330
+ {
331
+ typedef typename iterator_traits<KeysIt1>::value_type key1_type;
332
+ typedef typename iterator_traits<KeysIt2>::value_type key2_type;
333
+ typedef typename iterator_traits<ValuesIt1>::value_type value1_type;
334
+ typedef typename iterator_traits<ValuesIt2>::value_type value2_type;
335
+
336
+ typedef key1_type key_type;
337
+ typedef value1_type value_type;
338
+
339
+ typedef cub::ScanTileState<Size> ScanTileState;
340
+
341
+ template <class Arch>
342
+ struct PtxPlan : Tuning<Arch, key_type, value_type>::type
343
+ {
344
+ typedef Tuning<Arch, key_type, value_type> tuning;
345
+
346
+ typedef typename core::LoadIterator<PtxPlan, KeysIt1>::type KeysLoadIt1;
347
+ typedef typename core::LoadIterator<PtxPlan, KeysIt2>::type KeysLoadIt2;
348
+ typedef typename core::LoadIterator<PtxPlan, ValuesIt1>::type ValuesLoadIt1;
349
+ typedef typename core::LoadIterator<PtxPlan, ValuesIt2>::type ValuesLoadIt2;
350
+
351
+ typedef typename core::BlockLoad<PtxPlan, KeysLoadIt1>::type BlockLoadKeys1;
352
+ typedef typename core::BlockLoad<PtxPlan, KeysLoadIt2>::type BlockLoadKeys2;
353
+ typedef typename core::BlockLoad<PtxPlan, ValuesLoadIt1>::type BlockLoadValues1;
354
+ typedef typename core::BlockLoad<PtxPlan, ValuesLoadIt2>::type BlockLoadValues2;
355
+
356
+ typedef cub::TilePrefixCallbackOp<Size,
357
+ cub::Sum,
358
+ ScanTileState,
359
+ Arch::ver>
360
+ TilePrefixCallback;
361
+
362
+ typedef cub::BlockScan<Size,
363
+ PtxPlan::BLOCK_THREADS,
364
+ PtxPlan::SCAN_ALGORITHM,
365
+ 1,
366
+ 1,
367
+ Arch::ver>
368
+ BlockScan;
369
+
370
+ // gather required temporary storage in a union
371
+ //
372
+ union TempStorage
373
+ {
374
+ struct ScanStorage
375
+ {
376
+ typename BlockScan::TempStorage scan;
377
+ typename TilePrefixCallback::TempStorage prefix;
378
+ } scan_storage;
379
+
380
+ struct LoadStorage
381
+ {
382
+ core::uninitialized_array<int, PtxPlan::BLOCK_THREADS> offset;
383
+ union
384
+ {
385
+ // FIXME These don't appear to be used anywhere?
386
+ typename BlockLoadKeys1::TempStorage load_keys1;
387
+ typename BlockLoadKeys2::TempStorage load_keys2;
388
+ typename BlockLoadValues1::TempStorage load_values1;
389
+ typename BlockLoadValues2::TempStorage load_values2;
390
+
391
+ // Allocate extra shmem than truely neccessary
392
+ // This will permit to avoid range checks in
393
+ // serial set operations, e.g. serial_set_difference
394
+ core::uninitialized_array<
395
+ key_type,
396
+ PtxPlan::ITEMS_PER_TILE + PtxPlan::BLOCK_THREADS>
397
+ keys_shared;
398
+
399
+ core::uninitialized_array<
400
+ value_type,
401
+ PtxPlan::ITEMS_PER_TILE + PtxPlan::BLOCK_THREADS>
402
+ values_shared;
403
+ }; // anon union
404
+ } load_storage; // struct LoadStorage
405
+ }; // union TempStorage
406
+ }; // struct PtxPlan
407
+
408
+ typedef typename core::specialize_plan_msvc10_war<PtxPlan>::type::type ptx_plan;
409
+
410
+ typedef typename ptx_plan::KeysLoadIt1 KeysLoadIt1;
411
+ typedef typename ptx_plan::KeysLoadIt2 KeysLoadIt2;
412
+ typedef typename ptx_plan::ValuesLoadIt1 ValuesLoadIt1;
413
+ typedef typename ptx_plan::ValuesLoadIt2 ValuesLoadIt2;
414
+
415
+ typedef typename ptx_plan::BlockLoadKeys1 BlockLoadKeys1;
416
+ typedef typename ptx_plan::BlockLoadKeys2 BlockLoadKeys2;
417
+ typedef typename ptx_plan::BlockLoadValues1 BlockLoadValues1;
418
+ typedef typename ptx_plan::BlockLoadValues2 BlockLoadValues2;
419
+
420
+ typedef typename ptx_plan::TilePrefixCallback TilePrefixCallback;
421
+ typedef typename ptx_plan::BlockScan BlockScan;
422
+
423
+ typedef typename ptx_plan::TempStorage TempStorage;
424
+
425
+ enum
426
+ {
427
+ ITEMS_PER_THREAD = ptx_plan::ITEMS_PER_THREAD,
428
+ BLOCK_THREADS = ptx_plan::BLOCK_THREADS,
429
+ };
430
+
431
+ struct impl
432
+ {
433
+ //---------------------------------------------------------------------
434
+ // Per-thread fields
435
+ //---------------------------------------------------------------------
436
+
437
+ TempStorage & storage;
438
+ ScanTileState &tile_state;
439
+ KeysLoadIt1 keys1_in;
440
+ KeysLoadIt2 keys2_in;
441
+ ValuesLoadIt1 values1_in;
442
+ ValuesLoadIt2 values2_in;
443
+ Size keys1_count;
444
+ Size keys2_count;
445
+ KeysOutputIt keys_out;
446
+ ValuesOutputIt values_out;
447
+ CompareOp compare_op;
448
+ SetOp set_op;
449
+ pair<Size, Size> *partitions;
450
+ std::size_t *output_count;
451
+
452
+ //---------------------------------------------------------------------
453
+ // Utility functions
454
+ //---------------------------------------------------------------------
455
+
456
+ template <bool IS_FULL_TILE, class T, class It1, class It2>
457
+ THRUST_DEVICE_FUNCTION void
458
+ gmem_to_reg(T (&output)[ITEMS_PER_THREAD],
459
+ It1 input1,
460
+ It2 input2,
461
+ int count1,
462
+ int count2)
463
+ {
464
+ if (IS_FULL_TILE)
465
+ {
466
+ #pragma unroll
467
+ for (int ITEM = 0; ITEM < ITEMS_PER_THREAD - 1; ++ITEM)
468
+ {
469
+ int idx = BLOCK_THREADS * ITEM + threadIdx.x;
470
+ output[ITEM] = (idx < count1)
471
+ ? static_cast<T>(input1[idx])
472
+ : static_cast<T>(input2[idx - count1]);
473
+ }
474
+
475
+ // last ITEM might be a conditional load even for full tiles
476
+ // please check first before attempting to load.
477
+ int ITEM = ITEMS_PER_THREAD - 1;
478
+ int idx = BLOCK_THREADS * ITEM + threadIdx.x;
479
+ if (idx < count1 + count2)
480
+ output[ITEM] = (idx < count1)
481
+ ? static_cast<T>(input1[idx])
482
+ : static_cast<T>(input2[idx - count1]);
483
+ }
484
+ else
485
+ {
486
+ #pragma unroll
487
+ for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
488
+ {
489
+ int idx = BLOCK_THREADS * ITEM + threadIdx.x;
490
+ if (idx < count1 + count2)
491
+ {
492
+ output[ITEM] = (idx < count1)
493
+ ? static_cast<T>(input1[idx])
494
+ : static_cast<T>(input2[idx - count1]);
495
+ }
496
+ }
497
+ }
498
+ }
499
+
500
+ template <class T, class It>
501
+ THRUST_DEVICE_FUNCTION void
502
+ reg_to_shared(It output,
503
+ T (&input)[ITEMS_PER_THREAD])
504
+ {
505
+ #pragma unroll
506
+ for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
507
+ {
508
+ int idx = BLOCK_THREADS * ITEM + threadIdx.x;
509
+ output[idx] = input[ITEM];
510
+ }
511
+ }
512
+
513
+ template <class OutputIt, class T, class SharedIt>
514
+ void THRUST_DEVICE_FUNCTION
515
+ scatter(OutputIt output,
516
+ T (&input)[ITEMS_PER_THREAD],
517
+ SharedIt shared,
518
+ int active_mask,
519
+ Size thread_output_prefix,
520
+ Size tile_output_prefix,
521
+ int tile_output_count)
522
+ {
523
+ using core::sync_threadblock;
524
+
525
+
526
+
527
+ int local_scatter_idx = thread_output_prefix - tile_output_prefix;
528
+ #pragma unroll
529
+ for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
530
+ {
531
+ if (active_mask & (1 << ITEM))
532
+ {
533
+ shared[local_scatter_idx++] = input[ITEM];
534
+ }
535
+ }
536
+ sync_threadblock();
537
+
538
+ for (int item = threadIdx.x;
539
+ item < tile_output_count;
540
+ item += BLOCK_THREADS)
541
+ {
542
+ output[tile_output_prefix + item] = shared[item];
543
+ }
544
+ }
545
+
546
+ int THRUST_DEVICE_FUNCTION
547
+ serial_set_op(key_type *keys,
548
+ int keys1_beg,
549
+ int keys2_beg,
550
+ int keys1_count,
551
+ int keys2_count,
552
+ key_type (&output)[ITEMS_PER_THREAD],
553
+ int (&indices)[ITEMS_PER_THREAD],
554
+ CompareOp compare_op,
555
+ SetOp set_op)
556
+ {
557
+ int active_mask = set_op(keys,
558
+ keys1_beg,
559
+ keys2_beg,
560
+ keys1_count,
561
+ keys2_count,
562
+ output,
563
+ indices,
564
+ compare_op);
565
+
566
+ return active_mask;
567
+ }
568
+
569
+ //---------------------------------------------------------------------
570
+ // Tile operations
571
+ //---------------------------------------------------------------------
572
+
573
+ template <bool IS_LAST_TILE>
574
+ void THRUST_DEVICE_FUNCTION
575
+ consume_tile(Size tile_idx)
576
+ {
577
+ using core::sync_threadblock;
578
+ using core::uninitialized_array;
579
+
580
+ pair<Size, Size> partition_beg = partitions[tile_idx + 0];
581
+ pair<Size, Size> partition_end = partitions[tile_idx + 1];
582
+
583
+ Size keys1_beg = partition_beg.first;
584
+ Size keys1_end = partition_end.first;
585
+ Size keys2_beg = partition_beg.second;
586
+ Size keys2_end = partition_end.second;
587
+
588
+ // number of keys per tile
589
+ //
590
+ int num_keys1 = static_cast<int>(keys1_end - keys1_beg);
591
+ int num_keys2 = static_cast<int>(keys2_end - keys2_beg);
592
+
593
+
594
+ // load keys into shared memory for further processing
595
+ key_type keys_loc[ITEMS_PER_THREAD];
596
+
597
+ gmem_to_reg<!IS_LAST_TILE>(keys_loc,
598
+ keys1_in + keys1_beg,
599
+ keys2_in + keys2_beg,
600
+ num_keys1,
601
+ num_keys2);
602
+
603
+ reg_to_shared(&storage.load_storage.keys_shared[0], keys_loc);
604
+
605
+ sync_threadblock();
606
+
607
+ int diag_loc = min<int>(ITEMS_PER_THREAD * threadIdx.x,
608
+ num_keys1 + num_keys2);
609
+
610
+ pair<int, int> partition_loc =
611
+ balanced_path(&storage.load_storage.keys_shared[0],
612
+ &storage.load_storage.keys_shared[num_keys1],
613
+ num_keys1,
614
+ num_keys2,
615
+ diag_loc,
616
+ 4,
617
+ compare_op);
618
+
619
+ int keys1_beg_loc = partition_loc.first;
620
+ int keys2_beg_loc = partition_loc.second;
621
+
622
+ // compute difference between next and current thread
623
+ // to obtain number of elements per thread
624
+ int value = threadIdx.x == 0
625
+ ? (num_keys1 << 16) | num_keys2
626
+ : (partition_loc.first << 16) | partition_loc.second;
627
+
628
+ int dst = threadIdx.x == 0 ? BLOCK_THREADS - 1 : threadIdx.x - 1;
629
+ storage.load_storage.offset[dst] = value;
630
+
631
+ core::sync_threadblock();
632
+
633
+ pair<int,int> partition1_loc = thrust::make_pair(
634
+ storage.load_storage.offset[threadIdx.x] >> 16,
635
+ storage.load_storage.offset[threadIdx.x] & 0xFFFF);
636
+
637
+ int keys1_end_loc = partition1_loc.first;
638
+ int keys2_end_loc = partition1_loc.second;
639
+
640
+ int num_keys1_loc = keys1_end_loc - keys1_beg_loc;
641
+ int num_keys2_loc = keys2_end_loc - keys2_beg_loc;
642
+
643
+ // perform serial set operation
644
+ //
645
+ int indices[ITEMS_PER_THREAD];
646
+
647
+ int active_mask = serial_set_op(&storage.load_storage.keys_shared[0],
648
+ keys1_beg_loc,
649
+ keys2_beg_loc + num_keys1,
650
+ num_keys1_loc,
651
+ num_keys2_loc,
652
+ keys_loc,
653
+ indices,
654
+ compare_op,
655
+ set_op);
656
+ sync_threadblock();
657
+ #if 0
658
+ if (ITEMS_PER_THREAD*threadIdx.x >= num_keys1 + num_keys2)
659
+ active_mask = 0;
660
+ #endif
661
+
662
+ // look-back scan over thread_output_count
663
+ // to compute global thread_output_base and tile_otput_count;
664
+ Size tile_output_count = 0;
665
+ Size thread_output_prefix = 0;
666
+ Size tile_output_prefix = 0;
667
+ Size thread_output_count = static_cast<Size>(__popc(active_mask));
668
+
669
+ if (tile_idx == 0) // first tile
670
+ {
671
+ BlockScan(storage.scan_storage.scan)
672
+ .ExclusiveSum(thread_output_count,
673
+ thread_output_prefix,
674
+ tile_output_count);
675
+ if (threadIdx.x == 0)
676
+ {
677
+ // Update tile status if this is not the last tile
678
+ if (!IS_LAST_TILE)
679
+ {
680
+ tile_state.SetInclusive(0, tile_output_count);
681
+ }
682
+ }
683
+ }
684
+ else
685
+ {
686
+ TilePrefixCallback prefix_cb(tile_state,
687
+ storage.scan_storage.prefix,
688
+ cub::Sum(),
689
+ tile_idx);
690
+
691
+ BlockScan(storage.scan_storage.scan)
692
+ .ExclusiveSum(thread_output_count,
693
+ thread_output_prefix,
694
+ prefix_cb);
695
+ tile_output_count = prefix_cb.GetBlockAggregate();
696
+ tile_output_prefix = prefix_cb.GetExclusivePrefix();
697
+ }
698
+
699
+ sync_threadblock();
700
+
701
+ // scatter results
702
+ //
703
+ scatter(keys_out,
704
+ keys_loc,
705
+ &storage.load_storage.keys_shared[0],
706
+ active_mask,
707
+ thread_output_prefix,
708
+ tile_output_prefix,
709
+ tile_output_count);
710
+
711
+ if (HAS_VALUES::value)
712
+ {
713
+ value_type values_loc[ITEMS_PER_THREAD];
714
+ gmem_to_reg<!IS_LAST_TILE>(values_loc,
715
+ values1_in + keys1_beg,
716
+ values2_in + keys2_beg,
717
+ num_keys1,
718
+ num_keys2);
719
+
720
+ sync_threadblock();
721
+
722
+ reg_to_shared(&storage.load_storage.values_shared[0], values_loc);
723
+
724
+ sync_threadblock();
725
+
726
+ // gather items from shared mem
727
+ //
728
+ #pragma unroll
729
+ for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
730
+ {
731
+ if (active_mask & (1 << ITEM))
732
+ {
733
+ values_loc[ITEM] = storage.load_storage.values_shared[indices[ITEM]];
734
+ }
735
+ }
736
+
737
+ sync_threadblock();
738
+
739
+ scatter(values_out,
740
+ values_loc,
741
+ &storage.load_storage.values_shared[0],
742
+ active_mask,
743
+ thread_output_prefix,
744
+ tile_output_prefix,
745
+ tile_output_count);
746
+ }
747
+
748
+ if (IS_LAST_TILE && threadIdx.x == 0)
749
+ {
750
+ *output_count = tile_output_prefix + tile_output_count;
751
+ }
752
+ }
753
+
754
+ //---------------------------------------------------------------------
755
+ // Constructor
756
+ //---------------------------------------------------------------------
757
+
758
+ THRUST_DEVICE_FUNCTION
759
+ impl(TempStorage & storage_,
760
+ ScanTileState &tile_state_,
761
+ KeysIt1 keys1_,
762
+ KeysIt2 keys2_,
763
+ ValuesIt1 values1_,
764
+ ValuesIt2 values2_,
765
+ Size keys1_count_,
766
+ Size keys2_count_,
767
+ KeysOutputIt keys_out_,
768
+ ValuesOutputIt values_out_,
769
+ CompareOp compare_op_,
770
+ SetOp set_op_,
771
+ pair<Size, Size> *partitions_,
772
+ std::size_t * output_count_)
773
+ : storage(storage_),
774
+ tile_state(tile_state_),
775
+ keys1_in(core::make_load_iterator(ptx_plan(), keys1_)),
776
+ keys2_in(core::make_load_iterator(ptx_plan(), keys2_)),
777
+ values1_in(core::make_load_iterator(ptx_plan(), values1_)),
778
+ values2_in(core::make_load_iterator(ptx_plan(), values2_)),
779
+ keys1_count(keys1_count_),
780
+ keys2_count(keys2_count_),
781
+ keys_out(keys_out_),
782
+ values_out(values_out_),
783
+ compare_op(compare_op_),
784
+ set_op(set_op_),
785
+ partitions(partitions_),
786
+ output_count(output_count_)
787
+ {
788
+ int tile_idx = blockIdx.x;
789
+ int num_tiles = gridDim.x;
790
+
791
+ if (tile_idx < num_tiles-1)
792
+ {
793
+ consume_tile<false>(tile_idx);
794
+ }
795
+ else
796
+ {
797
+ consume_tile<true>(tile_idx);
798
+ }
799
+ }
800
+ }; // struct impl
801
+
802
+ //---------------------------------------------------------------------
803
+ // Agent entry point
804
+ //---------------------------------------------------------------------
805
+
806
+ THRUST_AGENT_ENTRY(KeysIt1 keys1,
807
+ KeysIt2 keys2,
808
+ ValuesIt1 values1,
809
+ ValuesIt2 values2,
810
+ Size keys1_count,
811
+ Size keys2_count,
812
+ KeysOutputIt keys_output,
813
+ ValuesOutputIt values_output,
814
+ CompareOp compare_op,
815
+ SetOp set_op,
816
+ pair<Size, Size> *partitions,
817
+ std::size_t * output_count,
818
+ ScanTileState tile_state,
819
+ char * shmem)
820
+ {
821
+ TempStorage &storage = *reinterpret_cast<TempStorage *>(shmem);
822
+
823
+ impl(storage,
824
+ tile_state,
825
+ keys1,
826
+ keys2,
827
+ values1,
828
+ values2,
829
+ keys1_count,
830
+ keys2_count,
831
+ keys_output,
832
+ values_output,
833
+ compare_op,
834
+ set_op,
835
+ partitions,
836
+ output_count);
837
+ }
838
+ }; // struct SetOpAgent
839
+
840
+ template <class KeysIt1,
841
+ class KeysIt2,
842
+ class Size,
843
+ class CompareOp>
844
+ struct PartitionAgent
845
+ {
846
+ template <class Arch>
847
+ struct PtxPlan : PtxPolicy<256> {};
848
+
849
+ typedef core::specialize_plan<PtxPlan> ptx_plan;
850
+
851
+ //---------------------------------------------------------------------
852
+ // Agent entry point
853
+ //---------------------------------------------------------------------
854
+
855
+ THRUST_AGENT_ENTRY(KeysIt1 keys1,
856
+ KeysIt2 keys2,
857
+ Size keys1_count,
858
+ Size keys2_count,
859
+ Size num_partitions,
860
+ pair<Size, Size> *partitions,
861
+ CompareOp compare_op,
862
+ int items_per_tile,
863
+ char * /*shmem*/)
864
+ {
865
+ Size partition_idx = blockDim.x * blockIdx.x + threadIdx.x;
866
+ if (partition_idx < num_partitions)
867
+ {
868
+ Size partition_at = min<Size>(partition_idx * items_per_tile,
869
+ keys1_count + keys2_count);
870
+ pair<Size, Size> diag = balanced_path(keys1,
871
+ keys2,
872
+ keys1_count,
873
+ keys2_count,
874
+ partition_at,
875
+ 4ll,
876
+ compare_op);
877
+ partitions[partition_idx] = diag;
878
+ }
879
+ }
880
+ }; // struct PartitionAgent
881
+
882
+ template <class ScanTileState,
883
+ class Size>
884
+ struct InitAgent
885
+ {
886
+ template <class Arch>
887
+ struct PtxPlan : PtxPolicy<128> {};
888
+
889
+ typedef core::specialize_plan<PtxPlan> ptx_plan;
890
+
891
+ //---------------------------------------------------------------------
892
+ // Agent entry point
893
+ //---------------------------------------------------------------------
894
+
895
+ THRUST_AGENT_ENTRY(ScanTileState tile_state,
896
+ Size num_tiles,
897
+ char * /*shmem*/)
898
+ {
899
+ tile_state.InitializeStatus(num_tiles);
900
+ }
901
+ }; // struct InitAgent
902
+
903
+ //---------------------------------------------------------------------
904
+ // Serial set operations
905
+ //---------------------------------------------------------------------
906
+
907
+ // serial_set_intersection
908
+ // -----------------------
909
+ // emit A if A and B are in range and equal.
910
+ struct serial_set_intersection
911
+ {
912
+ // max_input_size <= 32
913
+ template <class T, class CompareOp, int ITEMS_PER_THREAD>
914
+ int THRUST_DEVICE_FUNCTION
915
+ operator()(T * keys,
916
+ int keys1_beg,
917
+ int keys2_beg,
918
+ int keys1_count,
919
+ int keys2_count,
920
+ T (&output)[ITEMS_PER_THREAD],
921
+ int (&indices)[ITEMS_PER_THREAD],
922
+ CompareOp compare_op)
923
+ {
924
+ int active_mask = 0;
925
+
926
+ int aBegin = keys1_beg;
927
+ int bBegin = keys2_beg;
928
+ int aEnd = keys1_beg + keys1_count;
929
+ int bEnd = keys2_beg + keys2_count;
930
+
931
+ T aKey = keys[aBegin];
932
+ T bKey = keys[bBegin];
933
+
934
+ #pragma unroll
935
+ for (int i = 0; i < ITEMS_PER_THREAD; ++i)
936
+ {
937
+ bool pA = compare_op(aKey, bKey);
938
+ bool pB = compare_op(bKey, aKey);
939
+
940
+ // The outputs must come from A by definition of set interection.
941
+ output[i] = aKey;
942
+ indices[i] = aBegin;
943
+
944
+ if ((aBegin < aEnd) && (bBegin < bEnd) && pA == pB)
945
+ active_mask |= 1 << i;
946
+
947
+ if (!pB) {aKey = keys[++aBegin]; }
948
+ if (!pA) {bKey = keys[++bBegin]; }
949
+ }
950
+ return active_mask;
951
+ }
952
+ }; // struct serial_set_intersection
953
+
954
+ // serial_set_symmetric_difference
955
+ // ---------------------
956
+ // emit A if A < B and emit B if B < A.
957
+ struct serial_set_symmetric_difference
958
+ {
959
+ // max_input_size <= 32
960
+ template <class T, class CompareOp, int ITEMS_PER_THREAD>
961
+ int THRUST_DEVICE_FUNCTION
962
+ operator()(T * keys,
963
+ int keys1_beg,
964
+ int keys2_beg,
965
+ int keys1_count,
966
+ int keys2_count,
967
+ T (&output)[ITEMS_PER_THREAD],
968
+ int (&indices)[ITEMS_PER_THREAD],
969
+ CompareOp compare_op)
970
+ {
971
+ int active_mask = 0;
972
+
973
+ int aBegin = keys1_beg;
974
+ int bBegin = keys2_beg;
975
+ int aEnd = keys1_beg + keys1_count;
976
+ int bEnd = keys2_beg + keys2_count;
977
+ int end = aEnd + bEnd;
978
+
979
+ T aKey = keys[aBegin];
980
+ T bKey = keys[bBegin];
981
+
982
+
983
+ #pragma unroll
984
+ for (int i = 0; i < ITEMS_PER_THREAD; ++i)
985
+ {
986
+ bool pB = aBegin >= aEnd;
987
+ bool pA = !pB && bBegin >= bEnd;
988
+
989
+ if (!pA && !pB)
990
+ {
991
+ pA = compare_op(aKey, bKey);
992
+ pB = !pA && compare_op(bKey, aKey);
993
+ }
994
+
995
+ // The outputs must come from A by definition of set difference.
996
+ output[i] = pA ? aKey : bKey;
997
+ indices[i] = pA ? aBegin : bBegin;
998
+
999
+ if (aBegin + bBegin < end && pA != pB)
1000
+ active_mask |= 1 << i;
1001
+
1002
+ if (!pB) {aKey = keys[++aBegin]; }
1003
+ if (!pA) {bKey = keys[++bBegin]; }
1004
+
1005
+ }
1006
+ return active_mask;
1007
+ }
1008
+ }; // struct set_symmetric_difference
1009
+
1010
+ // serial_set_difference
1011
+ // ---------------------
1012
+ // emit A if A < B
1013
+ struct serial_set_difference
1014
+ {
1015
+ // max_input_size <= 32
1016
+ template <class T, class CompareOp, int ITEMS_PER_THREAD>
1017
+ int THRUST_DEVICE_FUNCTION
1018
+ operator()(T * keys,
1019
+ int keys1_beg,
1020
+ int keys2_beg,
1021
+ int keys1_count,
1022
+ int keys2_count,
1023
+ T (&output)[ITEMS_PER_THREAD],
1024
+ int (&indices)[ITEMS_PER_THREAD],
1025
+ CompareOp compare_op)
1026
+ {
1027
+ int active_mask = 0;
1028
+
1029
+ int aBegin = keys1_beg;
1030
+ int bBegin = keys2_beg;
1031
+ int aEnd = keys1_beg + keys1_count;
1032
+ int bEnd = keys2_beg + keys2_count;
1033
+ int end = aEnd + bEnd;
1034
+
1035
+ T aKey = keys[aBegin];
1036
+ T bKey = keys[bBegin];
1037
+
1038
+ #pragma unroll
1039
+ for (int i = 0; i < ITEMS_PER_THREAD; ++i)
1040
+ {
1041
+ bool pB = aBegin >= aEnd;
1042
+ bool pA = !pB && bBegin >= bEnd;
1043
+
1044
+ if (!pA && !pB)
1045
+ {
1046
+ pA = compare_op(aKey, bKey);
1047
+ pB = !pA && compare_op(bKey, aKey);
1048
+ }
1049
+
1050
+ // The outputs must come from A by definition of set difference.
1051
+ output[i] = aKey;
1052
+ indices[i] = aBegin;
1053
+
1054
+ if (aBegin + bBegin < end && pA)
1055
+ active_mask |= 1 << i;
1056
+
1057
+ if (!pB) { aKey = keys[++aBegin]; }
1058
+ if (!pA) { bKey = keys[++bBegin]; }
1059
+ }
1060
+ return active_mask;
1061
+ }
1062
+ }; // struct set_difference
1063
+
1064
+ // serial_set_union
1065
+ // ----------------
1066
+ // emit A if A <= B else emit B
1067
+ struct serial_set_union
1068
+ {
1069
+ // max_input_size <= 32
1070
+ template <class T, class CompareOp, int ITEMS_PER_THREAD>
1071
+ int THRUST_DEVICE_FUNCTION
1072
+ operator()(T * keys,
1073
+ int keys1_beg,
1074
+ int keys2_beg,
1075
+ int keys1_count,
1076
+ int keys2_count,
1077
+ T (&output)[ITEMS_PER_THREAD],
1078
+ int (&indices)[ITEMS_PER_THREAD],
1079
+ CompareOp compare_op)
1080
+ {
1081
+ int active_mask = 0;
1082
+
1083
+ int aBegin = keys1_beg;
1084
+ int bBegin = keys2_beg;
1085
+ int aEnd = keys1_beg + keys1_count;
1086
+ int bEnd = keys2_beg + keys2_count;
1087
+ int end = aEnd + bEnd;
1088
+
1089
+ T aKey = keys[aBegin];
1090
+ T bKey = keys[bBegin];
1091
+
1092
+ #pragma unroll
1093
+ for (int i = 0; i < ITEMS_PER_THREAD; ++i)
1094
+ {
1095
+ bool pB = aBegin >= aEnd;
1096
+ bool pA = !pB && bBegin >= bEnd;
1097
+
1098
+ if (!pA && !pB)
1099
+ {
1100
+ pA = compare_op(aKey, bKey);
1101
+ pB = !pA && compare_op(bKey, aKey);
1102
+ }
1103
+
1104
+ // Output A in case of a tie, so check if b < a.
1105
+ output[i] = pB ? bKey : aKey;
1106
+ indices[i] = pB ? bBegin : aBegin;
1107
+
1108
+ if (aBegin + bBegin < end)
1109
+ active_mask |= 1 << i;
1110
+
1111
+ if (!pB) { aKey = keys[++aBegin]; }
1112
+ if (!pA) { bKey = keys[++bBegin]; }
1113
+
1114
+ }
1115
+ return active_mask;
1116
+ }
1117
+ }; // struct set_union
1118
+
1119
+ template <class HAS_VALUES,
1120
+ class KeysIt1,
1121
+ class KeysIt2,
1122
+ class ValuesIt1,
1123
+ class ValuesIt2,
1124
+ class Size,
1125
+ class KeysOutputIt,
1126
+ class ValuesOutputIt,
1127
+ class CompareOp,
1128
+ class SetOp>
1129
+ cudaError_t THRUST_RUNTIME_FUNCTION
1130
+ doit_step(void * d_temp_storage,
1131
+ size_t & temp_storage_size,
1132
+ KeysIt1 keys1,
1133
+ KeysIt2 keys2,
1134
+ ValuesIt1 values1,
1135
+ ValuesIt2 values2,
1136
+ Size num_keys1,
1137
+ Size num_keys2,
1138
+ KeysOutputIt keys_output,
1139
+ ValuesOutputIt values_output,
1140
+ std::size_t * output_count,
1141
+ CompareOp compare_op,
1142
+ SetOp set_op,
1143
+ cudaStream_t stream)
1144
+ {
1145
+ Size keys_total = num_keys1 + num_keys2;
1146
+ if (keys_total == 0)
1147
+ return cudaErrorNotSupported;
1148
+
1149
+ cudaError_t status = cudaSuccess;
1150
+
1151
+ using core::AgentPlan;
1152
+ using core::AgentLauncher;
1153
+
1154
+ typedef AgentLauncher<
1155
+ SetOpAgent<KeysIt1,
1156
+ KeysIt2,
1157
+ ValuesIt1,
1158
+ ValuesIt2,
1159
+ KeysOutputIt,
1160
+ ValuesOutputIt,
1161
+ Size,
1162
+ CompareOp,
1163
+ SetOp,
1164
+ HAS_VALUES> >
1165
+ set_op_agent;
1166
+
1167
+ typedef AgentLauncher<PartitionAgent<KeysIt1, KeysIt2, Size, CompareOp> >
1168
+ partition_agent;
1169
+
1170
+ typedef typename set_op_agent::ScanTileState ScanTileState;
1171
+ typedef AgentLauncher<InitAgent<ScanTileState, Size> > init_agent;
1172
+
1173
+
1174
+ AgentPlan set_op_plan = set_op_agent::get_plan(stream);
1175
+ AgentPlan init_plan = init_agent::get_plan();
1176
+ AgentPlan partition_plan = partition_agent::get_plan();
1177
+
1178
+ int tile_size = set_op_plan.items_per_tile;
1179
+ Size num_tiles = (keys_total + tile_size - 1) / tile_size;
1180
+
1181
+ size_t tile_agent_storage;
1182
+ status = ScanTileState::AllocationSize(static_cast<int>(num_tiles),
1183
+ tile_agent_storage);
1184
+ CUDA_CUB_RET_IF_FAIL(status);
1185
+
1186
+ size_t vshmem_storage = core::vshmem_size(set_op_plan.shared_memory_size,
1187
+ num_tiles);
1188
+ size_t partition_agent_storage = (num_tiles + 1) * sizeof(Size) * 2;
1189
+
1190
+ void *allocations[3] = {NULL, NULL, NULL};
1191
+ size_t allocation_sizes[3] = {tile_agent_storage,
1192
+ partition_agent_storage,
1193
+ vshmem_storage};
1194
+
1195
+ status = core::alias_storage(d_temp_storage,
1196
+ temp_storage_size,
1197
+ allocations,
1198
+ allocation_sizes);
1199
+ CUDA_CUB_RET_IF_FAIL(status);
1200
+
1201
+ if (d_temp_storage == NULL)
1202
+ {
1203
+ return status;
1204
+ }
1205
+
1206
+ ScanTileState tile_state;
1207
+ status = tile_state.Init(static_cast<int>(num_tiles),
1208
+ allocations[0],
1209
+ allocation_sizes[0]);
1210
+ CUDA_CUB_RET_IF_FAIL(status);
1211
+
1212
+ pair<Size, Size> *partitions = (pair<Size, Size> *)allocations[1];
1213
+ char *vshmem_ptr = vshmem_storage > 0 ? (char *)allocations[2] : NULL;
1214
+
1215
+ init_agent ia(init_plan, num_tiles, stream, "set_op::init_agent");
1216
+ ia.launch(tile_state, num_tiles);
1217
+ CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
1218
+
1219
+ partition_agent pa(partition_plan, num_tiles+1, stream, "set_op::partition agent");
1220
+ pa.launch(keys1,
1221
+ keys2,
1222
+ num_keys1,
1223
+ num_keys2,
1224
+ num_tiles+1,
1225
+ partitions,
1226
+ compare_op,
1227
+ tile_size);
1228
+ CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
1229
+
1230
+ set_op_agent sa(set_op_plan, keys_total, stream, vshmem_ptr, "set_op::set_op_agent");
1231
+ sa.launch(keys1,
1232
+ keys2,
1233
+ values1,
1234
+ values2,
1235
+ num_keys1,
1236
+ num_keys2,
1237
+ keys_output,
1238
+ values_output,
1239
+ compare_op,
1240
+ set_op,
1241
+ partitions,
1242
+ output_count,
1243
+ tile_state);
1244
+ CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
1245
+
1246
+ return status;
1247
+ }
1248
+
1249
+ template <typename HAS_VALUES,
1250
+ typename Derived,
1251
+ typename KeysIt1,
1252
+ typename KeysIt2,
1253
+ typename ValuesIt1,
1254
+ typename ValuesIt2,
1255
+ typename KeysOutputIt,
1256
+ typename ValuesOutputIt,
1257
+ typename CompareOp,
1258
+ typename SetOp>
1259
+ THRUST_RUNTIME_FUNCTION
1260
+ pair<KeysOutputIt, ValuesOutputIt>
1261
+ set_operations(execution_policy<Derived>& policy,
1262
+ KeysIt1 keys1_first,
1263
+ KeysIt1 keys1_last,
1264
+ KeysIt2 keys2_first,
1265
+ KeysIt2 keys2_last,
1266
+ ValuesIt1 values1_first,
1267
+ ValuesIt2 values2_first,
1268
+ KeysOutputIt keys_output,
1269
+ ValuesOutputIt values_output,
1270
+ CompareOp compare_op,
1271
+ SetOp set_op)
1272
+ {
1273
+ typedef typename iterator_traits<KeysIt1>::difference_type size_type;
1274
+
1275
+ size_type num_keys1 = static_cast<size_type>(thrust::distance(keys1_first, keys1_last));
1276
+ size_type num_keys2 = static_cast<size_type>(thrust::distance(keys2_first, keys2_last));
1277
+
1278
+ if (num_keys1 + num_keys2 == 0)
1279
+ return thrust::make_pair(keys_output, values_output);
1280
+
1281
+ size_t temp_storage_bytes = 0;
1282
+ cudaStream_t stream = cuda_cub::stream(policy);
1283
+
1284
+ cudaError_t status;
1285
+ THRUST_DOUBLE_INDEX_TYPE_DISPATCH(status, doit_step<HAS_VALUES>,
1286
+ num_keys1, num_keys2, (NULL,
1287
+ temp_storage_bytes,
1288
+ keys1_first,
1289
+ keys2_first,
1290
+ values1_first,
1291
+ values2_first,
1292
+ num_keys1_fixed,
1293
+ num_keys2_fixed,
1294
+ keys_output,
1295
+ values_output,
1296
+ reinterpret_cast<std::size_t*>(NULL),
1297
+ compare_op,
1298
+ set_op,
1299
+ stream));
1300
+ cuda_cub::throw_on_error(status, "set_operations failed on 1st step");
1301
+
1302
+ size_t allocation_sizes[2] = {sizeof(std::size_t), temp_storage_bytes};
1303
+ void * allocations[2] = {NULL, NULL};
1304
+
1305
+ size_t storage_size = 0;
1306
+
1307
+ status = core::alias_storage(NULL,
1308
+ storage_size,
1309
+ allocations,
1310
+ allocation_sizes);
1311
+ cuda_cub::throw_on_error(status, "set_operations failed on 1st alias_storage");
1312
+
1313
+ // Allocate temporary storage.
1314
+ thrust::detail::temporary_array<thrust::detail::uint8_t, Derived>
1315
+ tmp(policy, storage_size);
1316
+ void *ptr = static_cast<void*>(tmp.data().get());
1317
+
1318
+ status = core::alias_storage(ptr,
1319
+ storage_size,
1320
+ allocations,
1321
+ allocation_sizes);
1322
+ cuda_cub::throw_on_error(status, "set_operations failed on 2nd alias_storage");
1323
+
1324
+ std::size_t* d_output_count
1325
+ = thrust::detail::aligned_reinterpret_cast<std::size_t*>(allocations[0]);
1326
+
1327
+ THRUST_DOUBLE_INDEX_TYPE_DISPATCH(status, doit_step<HAS_VALUES>,
1328
+ num_keys1, num_keys2, (allocations[1],
1329
+ temp_storage_bytes,
1330
+ keys1_first,
1331
+ keys2_first,
1332
+ values1_first,
1333
+ values2_first,
1334
+ num_keys1_fixed,
1335
+ num_keys2_fixed,
1336
+ keys_output,
1337
+ values_output,
1338
+ d_output_count,
1339
+ compare_op,
1340
+ set_op,
1341
+ stream));
1342
+ cuda_cub::throw_on_error(status, "set_operations failed on 2nd step");
1343
+
1344
+ status = cuda_cub::synchronize(policy);
1345
+ cuda_cub::throw_on_error(status, "set_operations failed to synchronize");
1346
+
1347
+ std::size_t output_count = cuda_cub::get_value(policy, d_output_count);
1348
+
1349
+ return thrust::make_pair(keys_output + output_count, values_output + output_count);
1350
+ }
1351
+ } // namespace __set_operations
1352
+
1353
+ //-------------------------
1354
+ // Thrust API entry points
1355
+ //-------------------------
1356
+
1357
+ __thrust_exec_check_disable__
1358
+ template <class Derived,
1359
+ class ItemsIt1,
1360
+ class ItemsIt2,
1361
+ class OutputIt,
1362
+ class CompareOp>
1363
+ OutputIt __host__ __device__
1364
+ set_difference(execution_policy<Derived> &policy,
1365
+ ItemsIt1 items1_first,
1366
+ ItemsIt1 items1_last,
1367
+ ItemsIt2 items2_first,
1368
+ ItemsIt2 items2_last,
1369
+ OutputIt result,
1370
+ CompareOp compare)
1371
+ {
1372
+ THRUST_CDP_DISPATCH(
1373
+ (using items1_t = thrust::iterator_value_t<ItemsIt1>;
1374
+ items1_t *null_ = nullptr;
1375
+ auto tmp = __set_operations::set_operations<thrust::detail::false_type>(
1376
+ policy,
1377
+ items1_first,
1378
+ items1_last,
1379
+ items2_first,
1380
+ items2_last,
1381
+ null_,
1382
+ null_,
1383
+ result,
1384
+ null_,
1385
+ compare,
1386
+ __set_operations::serial_set_difference());
1387
+ result = tmp.first;),
1388
+ (result = thrust::set_difference(cvt_to_seq(derived_cast(policy)),
1389
+ items1_first,
1390
+ items1_last,
1391
+ items2_first,
1392
+ items2_last,
1393
+ result,
1394
+ compare);));
1395
+ return result;
1396
+ }
1397
+
1398
+ template <class Derived,
1399
+ class ItemsIt1,
1400
+ class ItemsIt2,
1401
+ class OutputIt>
1402
+ OutputIt __host__ __device__
1403
+ set_difference(execution_policy<Derived> &policy,
1404
+ ItemsIt1 items1_first,
1405
+ ItemsIt1 items1_last,
1406
+ ItemsIt2 items2_first,
1407
+ ItemsIt2 items2_last,
1408
+ OutputIt result)
1409
+ {
1410
+ typedef typename thrust::iterator_value<ItemsIt1>::type value_type;
1411
+ return cuda_cub::set_difference(policy,
1412
+ items1_first,
1413
+ items1_last,
1414
+ items2_first,
1415
+ items2_last,
1416
+ result,
1417
+ less<value_type>());
1418
+ }
1419
+
1420
+ /*****************************/
1421
+
1422
+
1423
+ __thrust_exec_check_disable__
1424
+ template <class Derived,
1425
+ class ItemsIt1,
1426
+ class ItemsIt2,
1427
+ class OutputIt,
1428
+ class CompareOp>
1429
+ OutputIt __host__ __device__
1430
+ set_intersection(execution_policy<Derived> &policy,
1431
+ ItemsIt1 items1_first,
1432
+ ItemsIt1 items1_last,
1433
+ ItemsIt2 items2_first,
1434
+ ItemsIt2 items2_last,
1435
+ OutputIt result,
1436
+ CompareOp compare)
1437
+ {
1438
+ THRUST_CDP_DISPATCH(
1439
+ (using items1_t = thrust::iterator_value_t<ItemsIt1>;
1440
+ items1_t *null_ = NULL;
1441
+ auto tmp = __set_operations::set_operations<thrust::detail::false_type>(
1442
+ policy,
1443
+ items1_first,
1444
+ items1_last,
1445
+ items2_first,
1446
+ items2_last,
1447
+ null_,
1448
+ null_,
1449
+ result,
1450
+ null_,
1451
+ compare,
1452
+ __set_operations::serial_set_intersection());
1453
+ result = tmp.first;),
1454
+ (result = thrust::set_intersection(cvt_to_seq(derived_cast(policy)),
1455
+ items1_first,
1456
+ items1_last,
1457
+ items2_first,
1458
+ items2_last,
1459
+ result,
1460
+ compare);));
1461
+ return result;
1462
+ }
1463
+
1464
+ template <class Derived,
1465
+ class ItemsIt1,
1466
+ class ItemsIt2,
1467
+ class OutputIt>
1468
+ OutputIt __host__ __device__
1469
+ set_intersection(execution_policy<Derived> &policy,
1470
+ ItemsIt1 items1_first,
1471
+ ItemsIt1 items1_last,
1472
+ ItemsIt2 items2_first,
1473
+ ItemsIt2 items2_last,
1474
+ OutputIt result)
1475
+ {
1476
+ typedef typename thrust::iterator_value<ItemsIt1>::type value_type;
1477
+ return cuda_cub::set_intersection(policy,
1478
+ items1_first,
1479
+ items1_last,
1480
+ items2_first,
1481
+ items2_last,
1482
+ result,
1483
+ less<value_type>());
1484
+ }
1485
+
1486
+
1487
+ /*****************************/
1488
+
1489
+ __thrust_exec_check_disable__
1490
+ template <class Derived,
1491
+ class ItemsIt1,
1492
+ class ItemsIt2,
1493
+ class OutputIt,
1494
+ class CompareOp>
1495
+ OutputIt __host__ __device__
1496
+ set_symmetric_difference(execution_policy<Derived> &policy,
1497
+ ItemsIt1 items1_first,
1498
+ ItemsIt1 items1_last,
1499
+ ItemsIt2 items2_first,
1500
+ ItemsIt2 items2_last,
1501
+ OutputIt result,
1502
+ CompareOp compare)
1503
+ {
1504
+ THRUST_CDP_DISPATCH(
1505
+ (using items1_t = thrust::iterator_value_t<ItemsIt1>;
1506
+ items1_t *null_ = nullptr;
1507
+ auto tmp = __set_operations::set_operations<thrust::detail::false_type>(
1508
+ policy,
1509
+ items1_first,
1510
+ items1_last,
1511
+ items2_first,
1512
+ items2_last,
1513
+ null_,
1514
+ null_,
1515
+ result,
1516
+ null_,
1517
+ compare,
1518
+ __set_operations::serial_set_symmetric_difference());
1519
+ result = tmp.first;),
1520
+ (result = thrust::set_symmetric_difference(cvt_to_seq(derived_cast(policy)),
1521
+ items1_first,
1522
+ items1_last,
1523
+ items2_first,
1524
+ items2_last,
1525
+ result,
1526
+ compare);));
1527
+ return result;
1528
+ }
1529
+
1530
+ template <class Derived,
1531
+ class ItemsIt1,
1532
+ class ItemsIt2,
1533
+ class OutputIt>
1534
+ OutputIt __host__ __device__
1535
+ set_symmetric_difference(execution_policy<Derived> &policy,
1536
+ ItemsIt1 items1_first,
1537
+ ItemsIt1 items1_last,
1538
+ ItemsIt2 items2_first,
1539
+ ItemsIt2 items2_last,
1540
+ OutputIt result)
1541
+ {
1542
+ typedef typename thrust::iterator_value<ItemsIt1>::type value_type;
1543
+ return cuda_cub::set_symmetric_difference(policy,
1544
+ items1_first,
1545
+ items1_last,
1546
+ items2_first,
1547
+ items2_last,
1548
+ result,
1549
+ less<value_type>());
1550
+ }
1551
+
1552
+ /*****************************/
1553
+
1554
+ __thrust_exec_check_disable__
1555
+ template <class Derived,
1556
+ class ItemsIt1,
1557
+ class ItemsIt2,
1558
+ class OutputIt,
1559
+ class CompareOp>
1560
+ OutputIt __host__ __device__
1561
+ set_union(execution_policy<Derived> &policy,
1562
+ ItemsIt1 items1_first,
1563
+ ItemsIt1 items1_last,
1564
+ ItemsIt2 items2_first,
1565
+ ItemsIt2 items2_last,
1566
+ OutputIt result,
1567
+ CompareOp compare)
1568
+ {
1569
+ THRUST_CDP_DISPATCH(
1570
+ (using items1_t = thrust::iterator_value_t<ItemsIt1>;
1571
+ items1_t *null_ = nullptr;
1572
+ auto tmp = __set_operations::set_operations<thrust::detail::false_type>(
1573
+ policy,
1574
+ items1_first,
1575
+ items1_last,
1576
+ items2_first,
1577
+ items2_last,
1578
+ null_,
1579
+ null_,
1580
+ result,
1581
+ null_,
1582
+ compare,
1583
+ __set_operations::serial_set_union());
1584
+ result = tmp.first;),
1585
+ (result = thrust::set_union(cvt_to_seq(derived_cast(policy)),
1586
+ items1_first,
1587
+ items1_last,
1588
+ items2_first,
1589
+ items2_last,
1590
+ result,
1591
+ compare);));
1592
+ return result;
1593
+ }
1594
+
1595
+ template <class Derived,
1596
+ class ItemsIt1,
1597
+ class ItemsIt2,
1598
+ class OutputIt>
1599
+ OutputIt __host__ __device__
1600
+ set_union(execution_policy<Derived> &policy,
1601
+ ItemsIt1 items1_first,
1602
+ ItemsIt1 items1_last,
1603
+ ItemsIt2 items2_first,
1604
+ ItemsIt2 items2_last,
1605
+ OutputIt result)
1606
+ {
1607
+ typedef typename thrust::iterator_value<ItemsIt1>::type value_type;
1608
+ return cuda_cub::set_union(policy,
1609
+ items1_first,
1610
+ items1_last,
1611
+ items2_first,
1612
+ items2_last,
1613
+ result,
1614
+ less<value_type>());
1615
+ }
1616
+
1617
+
1618
+ /*****************************/
1619
+ /*****************************/
1620
+ /***** *_by_key *****/
1621
+ /*****************************/
1622
+ /*****************************/
1623
+
1624
+ /*****************************/
1625
+
1626
+ __thrust_exec_check_disable__
1627
+ template <class Derived,
1628
+ class KeysIt1,
1629
+ class KeysIt2,
1630
+ class ItemsIt1,
1631
+ class ItemsIt2,
1632
+ class KeysOutputIt,
1633
+ class ItemsOutputIt,
1634
+ class CompareOp>
1635
+ pair<KeysOutputIt, ItemsOutputIt> __host__ __device__
1636
+ set_difference_by_key(execution_policy<Derived> &policy,
1637
+ KeysIt1 keys1_first,
1638
+ KeysIt1 keys1_last,
1639
+ KeysIt2 keys2_first,
1640
+ KeysIt2 keys2_last,
1641
+ ItemsIt1 items1_first,
1642
+ ItemsIt2 items2_first,
1643
+ KeysOutputIt keys_result,
1644
+ ItemsOutputIt items_result,
1645
+ CompareOp compare_op)
1646
+ {
1647
+ auto ret = thrust::make_pair(keys_result, items_result);
1648
+ THRUST_CDP_DISPATCH(
1649
+ (ret = __set_operations::set_operations<thrust::detail::true_type>(
1650
+ policy,
1651
+ keys1_first,
1652
+ keys1_last,
1653
+ keys2_first,
1654
+ keys2_last,
1655
+ items1_first,
1656
+ items2_first,
1657
+ keys_result,
1658
+ items_result,
1659
+ compare_op,
1660
+ __set_operations::serial_set_difference());),
1661
+ (ret = thrust::set_difference_by_key(cvt_to_seq(derived_cast(policy)),
1662
+ keys1_first,
1663
+ keys1_last,
1664
+ keys2_first,
1665
+ keys2_last,
1666
+ items1_first,
1667
+ items2_first,
1668
+ keys_result,
1669
+ items_result,
1670
+ compare_op);));
1671
+ return ret;
1672
+ }
1673
+
1674
+ template <class Derived,
1675
+ class KeysIt1,
1676
+ class KeysIt2,
1677
+ class ItemsIt1,
1678
+ class ItemsIt2,
1679
+ class KeysOutputIt,
1680
+ class ItemsOutputIt>
1681
+ pair<KeysOutputIt, ItemsOutputIt> __host__ __device__
1682
+ set_difference_by_key(execution_policy<Derived> &policy,
1683
+ KeysIt1 keys1_first,
1684
+ KeysIt1 keys1_last,
1685
+ KeysIt2 keys2_first,
1686
+ KeysIt2 keys2_last,
1687
+ ItemsIt1 items1_first,
1688
+ ItemsIt2 items2_first,
1689
+ KeysOutputIt keys_result,
1690
+ ItemsOutputIt items_result)
1691
+ {
1692
+ typedef typename thrust::iterator_value<KeysIt1>::type value_type;
1693
+ return cuda_cub::set_difference_by_key(policy,
1694
+ keys1_first,
1695
+ keys1_last,
1696
+ keys2_first,
1697
+ keys2_last,
1698
+ items1_first,
1699
+ items2_first,
1700
+ keys_result,
1701
+ items_result,
1702
+ less<value_type>());
1703
+ }
1704
+
1705
+ /*****************************/
1706
+
1707
+ __thrust_exec_check_disable__
1708
+ template <class Derived,
1709
+ class KeysIt1,
1710
+ class KeysIt2,
1711
+ class ItemsIt1,
1712
+ class ItemsIt2,
1713
+ class KeysOutputIt,
1714
+ class ItemsOutputIt,
1715
+ class CompareOp>
1716
+ pair<KeysOutputIt, ItemsOutputIt> __host__ __device__
1717
+ set_intersection_by_key(execution_policy<Derived> &policy,
1718
+ KeysIt1 keys1_first,
1719
+ KeysIt1 keys1_last,
1720
+ KeysIt2 keys2_first,
1721
+ KeysIt2 keys2_last,
1722
+ ItemsIt1 items1_first,
1723
+ KeysOutputIt keys_result,
1724
+ ItemsOutputIt items_result,
1725
+ CompareOp compare_op)
1726
+ {
1727
+ auto ret = thrust::make_pair(keys_result, items_result);
1728
+ THRUST_CDP_DISPATCH(
1729
+ (ret = __set_operations::set_operations<thrust::detail::true_type>(
1730
+ policy,
1731
+ keys1_first,
1732
+ keys1_last,
1733
+ keys2_first,
1734
+ keys2_last,
1735
+ items1_first,
1736
+ items1_first,
1737
+ keys_result,
1738
+ items_result,
1739
+ compare_op,
1740
+ __set_operations::serial_set_intersection());),
1741
+ (ret = thrust::set_intersection_by_key(cvt_to_seq(derived_cast(policy)),
1742
+ keys1_first,
1743
+ keys1_last,
1744
+ keys2_first,
1745
+ keys2_last,
1746
+ items1_first,
1747
+ keys_result,
1748
+ items_result,
1749
+ compare_op);));
1750
+ return ret;
1751
+ }
1752
+
1753
+ template <class Derived,
1754
+ class KeysIt1,
1755
+ class KeysIt2,
1756
+ class ItemsIt1,
1757
+ class ItemsIt2,
1758
+ class KeysOutputIt,
1759
+ class ItemsOutputIt>
1760
+ pair<KeysOutputIt, ItemsOutputIt> __host__ __device__
1761
+ set_intersection_by_key(execution_policy<Derived> &policy,
1762
+ KeysIt1 keys1_first,
1763
+ KeysIt1 keys1_last,
1764
+ KeysIt2 keys2_first,
1765
+ KeysIt2 keys2_last,
1766
+ ItemsIt1 items1_first,
1767
+ KeysOutputIt keys_result,
1768
+ ItemsOutputIt items_result)
1769
+ {
1770
+ typedef typename thrust::iterator_value<KeysIt1>::type value_type;
1771
+ return cuda_cub::set_intersection_by_key(policy,
1772
+ keys1_first,
1773
+ keys1_last,
1774
+ keys2_first,
1775
+ keys2_last,
1776
+ items1_first,
1777
+ keys_result,
1778
+ items_result,
1779
+ less<value_type>());
1780
+ }
1781
+
1782
+ /*****************************/
1783
+
1784
+ __thrust_exec_check_disable__
1785
+ template <class Derived,
1786
+ class KeysIt1,
1787
+ class KeysIt2,
1788
+ class ItemsIt1,
1789
+ class ItemsIt2,
1790
+ class KeysOutputIt,
1791
+ class ItemsOutputIt,
1792
+ class CompareOp>
1793
+ pair<KeysOutputIt, ItemsOutputIt> __host__ __device__
1794
+ set_symmetric_difference_by_key(execution_policy<Derived> &policy,
1795
+ KeysIt1 keys1_first,
1796
+ KeysIt1 keys1_last,
1797
+ KeysIt2 keys2_first,
1798
+ KeysIt2 keys2_last,
1799
+ ItemsIt1 items1_first,
1800
+ ItemsIt2 items2_first,
1801
+ KeysOutputIt keys_result,
1802
+ ItemsOutputIt items_result,
1803
+ CompareOp compare_op)
1804
+ {
1805
+ auto ret = thrust::make_pair(keys_result, items_result);
1806
+ THRUST_CDP_DISPATCH(
1807
+ (ret = __set_operations::set_operations<thrust::detail::true_type>(
1808
+ policy,
1809
+ keys1_first,
1810
+ keys1_last,
1811
+ keys2_first,
1812
+ keys2_last,
1813
+ items1_first,
1814
+ items2_first,
1815
+ keys_result,
1816
+ items_result,
1817
+ compare_op,
1818
+ __set_operations::serial_set_symmetric_difference());),
1819
+ (ret =
1820
+ thrust::set_symmetric_difference_by_key(cvt_to_seq(derived_cast(policy)),
1821
+ keys1_first,
1822
+ keys1_last,
1823
+ keys2_first,
1824
+ keys2_last,
1825
+ items1_first,
1826
+ items2_first,
1827
+ keys_result,
1828
+ items_result,
1829
+ compare_op);));
1830
+ return ret;
1831
+ }
1832
+
1833
+ template <class Derived,
1834
+ class KeysIt1,
1835
+ class KeysIt2,
1836
+ class ItemsIt1,
1837
+ class ItemsIt2,
1838
+ class KeysOutputIt,
1839
+ class ItemsOutputIt>
1840
+ pair<KeysOutputIt, ItemsOutputIt> __host__ __device__
1841
+ set_symmetric_difference_by_key(execution_policy<Derived> &policy,
1842
+ KeysIt1 keys1_first,
1843
+ KeysIt1 keys1_last,
1844
+ KeysIt2 keys2_first,
1845
+ KeysIt2 keys2_last,
1846
+ ItemsIt1 items1_first,
1847
+ ItemsIt2 items2_first,
1848
+ KeysOutputIt keys_result,
1849
+ ItemsOutputIt items_result)
1850
+ {
1851
+ typedef typename thrust::iterator_value<KeysIt1>::type value_type;
1852
+ return cuda_cub::set_symmetric_difference_by_key(policy,
1853
+ keys1_first,
1854
+ keys1_last,
1855
+ keys2_first,
1856
+ keys2_last,
1857
+ items1_first,
1858
+ items2_first,
1859
+ keys_result,
1860
+ items_result,
1861
+ less<value_type>());
1862
+ }
1863
+
1864
+ /*****************************/
1865
+
1866
+ __thrust_exec_check_disable__
1867
+ template <class Derived,
1868
+ class KeysIt1,
1869
+ class KeysIt2,
1870
+ class ItemsIt1,
1871
+ class ItemsIt2,
1872
+ class KeysOutputIt,
1873
+ class ItemsOutputIt,
1874
+ class CompareOp>
1875
+ pair<KeysOutputIt, ItemsOutputIt> __host__ __device__
1876
+ set_union_by_key(execution_policy<Derived> &policy,
1877
+ KeysIt1 keys1_first,
1878
+ KeysIt1 keys1_last,
1879
+ KeysIt2 keys2_first,
1880
+ KeysIt2 keys2_last,
1881
+ ItemsIt1 items1_first,
1882
+ ItemsIt2 items2_first,
1883
+ KeysOutputIt keys_result,
1884
+ ItemsOutputIt items_result,
1885
+ CompareOp compare_op)
1886
+ {
1887
+ auto ret = thrust::make_pair(keys_result, items_result);
1888
+ THRUST_CDP_DISPATCH(
1889
+ (ret = __set_operations::set_operations<thrust::detail::true_type>(
1890
+ policy,
1891
+ keys1_first,
1892
+ keys1_last,
1893
+ keys2_first,
1894
+ keys2_last,
1895
+ items1_first,
1896
+ items2_first,
1897
+ keys_result,
1898
+ items_result,
1899
+ compare_op,
1900
+ __set_operations::serial_set_union());),
1901
+ (ret = thrust::set_union_by_key(cvt_to_seq(derived_cast(policy)),
1902
+ keys1_first,
1903
+ keys1_last,
1904
+ keys2_first,
1905
+ keys2_last,
1906
+ items1_first,
1907
+ items2_first,
1908
+ keys_result,
1909
+ items_result,
1910
+ compare_op);));
1911
+ return ret;
1912
+ }
1913
+
1914
+ template <class Derived,
1915
+ class KeysIt1,
1916
+ class KeysIt2,
1917
+ class ItemsIt1,
1918
+ class ItemsIt2,
1919
+ class KeysOutputIt,
1920
+ class ItemsOutputIt>
1921
+ pair<KeysOutputIt, ItemsOutputIt> __host__ __device__
1922
+ set_union_by_key(execution_policy<Derived> &policy,
1923
+ KeysIt1 keys1_first,
1924
+ KeysIt1 keys1_last,
1925
+ KeysIt2 keys2_first,
1926
+ KeysIt2 keys2_last,
1927
+ ItemsIt1 items1_first,
1928
+ ItemsIt2 items2_first,
1929
+ KeysOutputIt keys_result,
1930
+ ItemsOutputIt items_result)
1931
+ {
1932
+ typedef typename thrust::iterator_value<KeysIt1>::type value_type;
1933
+ return cuda_cub::set_union_by_key(policy,
1934
+ keys1_first,
1935
+ keys1_last,
1936
+ keys2_first,
1937
+ keys2_last,
1938
+ items1_first,
1939
+ items2_first,
1940
+ keys_result,
1941
+ items_result,
1942
+ less<value_type>());
1943
+ }
1944
+
1945
+ } // namespace cuda_cub
1946
+ THRUST_NAMESPACE_END
1947
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/sort.h ADDED
@@ -0,0 +1,636 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+ #include <thrust/distance.h>
41
+ #include <thrust/extrema.h>
42
+ #include <thrust/sequence.h>
43
+ #include <thrust/sort.h>
44
+
45
+ #include <thrust/system/cuda/config.h>
46
+ #include <thrust/system/cuda/detail/cdp_dispatch.h>
47
+ #include <thrust/system/cuda/detail/core/agent_launcher.h>
48
+ #include <thrust/system/cuda/detail/core/util.h>
49
+ #include <thrust/system/cuda/detail/execution_policy.h>
50
+ #include <thrust/system/cuda/detail/par_to_seq.h>
51
+ #include <thrust/system/cuda/detail/util.h>
52
+
53
+ #include <thrust/detail/alignment.h>
54
+ #include <thrust/detail/cstdint.h>
55
+ #include <thrust/detail/integer_math.h>
56
+ #include <thrust/detail/temporary_array.h>
57
+ #include <thrust/detail/trivial_sequence.h>
58
+
59
+ #include <thrust/type_traits/is_contiguous_iterator.h>
60
+
61
+ #include <cub/device/device_radix_sort.cuh>
62
+ #include <cub/device/device_merge_sort.cuh>
63
+
64
+ THRUST_NAMESPACE_BEGIN
65
+ namespace cuda_cub {
66
+
67
+ namespace __merge_sort {
68
+
69
+ template <class KeysIt,
70
+ class ItemsIt,
71
+ class Size,
72
+ class CompareOp>
73
+ THRUST_RUNTIME_FUNCTION cudaError_t
74
+ doit_step(void* d_temp_storage,
75
+ size_t& temp_storage_bytes,
76
+ KeysIt keys,
77
+ ItemsIt ,
78
+ Size keys_count,
79
+ CompareOp compare_op,
80
+ cudaStream_t stream,
81
+ thrust::detail::integral_constant<bool, false> /* sort_keys */)
82
+ {
83
+ using ItemsInputIt = cub::NullType *;
84
+ ItemsInputIt items = nullptr;
85
+
86
+ using DispatchMergeSortT = cub::DispatchMergeSort<KeysIt,
87
+ ItemsInputIt,
88
+ KeysIt,
89
+ ItemsInputIt,
90
+ Size,
91
+ CompareOp>;
92
+
93
+
94
+ return DispatchMergeSortT::Dispatch(d_temp_storage,
95
+ temp_storage_bytes,
96
+ keys,
97
+ items,
98
+ keys,
99
+ items,
100
+ keys_count,
101
+ compare_op,
102
+ stream);
103
+ }
104
+
105
+ template <class KeysIt,
106
+ class ItemsIt,
107
+ class Size,
108
+ class CompareOp>
109
+ THRUST_RUNTIME_FUNCTION cudaError_t
110
+ doit_step(void *d_temp_storage,
111
+ size_t &temp_storage_bytes,
112
+ KeysIt keys,
113
+ ItemsIt items,
114
+ Size keys_count,
115
+ CompareOp compare_op,
116
+ cudaStream_t stream,
117
+ thrust::detail::integral_constant<bool, true> /* sort_items */)
118
+ {
119
+ using DispatchMergeSortT =
120
+ cub::DispatchMergeSort<KeysIt, ItemsIt, KeysIt, ItemsIt, Size, CompareOp>;
121
+
122
+ return DispatchMergeSortT::Dispatch(d_temp_storage,
123
+ temp_storage_bytes,
124
+ keys,
125
+ items,
126
+ keys,
127
+ items,
128
+ keys_count,
129
+ compare_op,
130
+ stream);
131
+ }
132
+
133
+ template <class SORT_ITEMS,
134
+ class /* STABLE */,
135
+ class KeysIt,
136
+ class ItemsIt,
137
+ class Size,
138
+ class CompareOp>
139
+ THRUST_RUNTIME_FUNCTION cudaError_t
140
+ doit_step(void *d_temp_storage,
141
+ size_t &temp_storage_bytes,
142
+ KeysIt keys,
143
+ ItemsIt items,
144
+ Size keys_count,
145
+ CompareOp compare_op,
146
+ cudaStream_t stream)
147
+ {
148
+ if (keys_count == 0)
149
+ {
150
+ return cudaSuccess;
151
+ }
152
+
153
+ thrust::detail::integral_constant<bool, SORT_ITEMS::value> sort_items{};
154
+
155
+ return doit_step(d_temp_storage,
156
+ temp_storage_bytes,
157
+ keys,
158
+ items,
159
+ keys_count,
160
+ compare_op,
161
+ stream,
162
+ sort_items);
163
+ }
164
+
165
+ template <typename SORT_ITEMS,
166
+ typename STABLE,
167
+ typename Derived,
168
+ typename KeysIt,
169
+ typename ItemsIt,
170
+ typename CompareOp>
171
+ THRUST_RUNTIME_FUNCTION
172
+ void merge_sort(execution_policy<Derived>& policy,
173
+ KeysIt keys_first,
174
+ KeysIt keys_last,
175
+ ItemsIt items_first,
176
+ CompareOp compare_op)
177
+
178
+ {
179
+ typedef typename iterator_traits<KeysIt>::difference_type size_type;
180
+
181
+ size_type count = static_cast<size_type>(thrust::distance(keys_first, keys_last));
182
+
183
+ size_t storage_size = 0;
184
+ cudaStream_t stream = cuda_cub::stream(policy);
185
+
186
+ cudaError_t status;
187
+ status = doit_step<SORT_ITEMS, STABLE>(NULL,
188
+ storage_size,
189
+ keys_first,
190
+ items_first,
191
+ count,
192
+ compare_op,
193
+ stream);
194
+ cuda_cub::throw_on_error(status, "merge_sort: failed on 1st step");
195
+
196
+ // Allocate temporary storage.
197
+ thrust::detail::temporary_array<thrust::detail::uint8_t, Derived>
198
+ tmp(policy, storage_size);
199
+ void *ptr = static_cast<void*>(tmp.data().get());
200
+
201
+ status = doit_step<SORT_ITEMS, STABLE>(ptr,
202
+ storage_size,
203
+ keys_first,
204
+ items_first,
205
+ count,
206
+ compare_op,
207
+ stream);
208
+ cuda_cub::throw_on_error(status, "merge_sort: failed on 2nd step");
209
+
210
+ status = cuda_cub::synchronize_optional(policy);
211
+ cuda_cub::throw_on_error(status, "merge_sort: failed to synchronize");
212
+ }
213
+ } // namespace __merge_sort
214
+
215
+ namespace __radix_sort {
216
+
217
+ template <class SORT_ITEMS, class Comparator>
218
+ struct dispatch;
219
+
220
+ // sort keys in ascending order
221
+ template <class K>
222
+ struct dispatch<thrust::detail::false_type, thrust::less<K> >
223
+ {
224
+ template <class Key, class Item, class Size>
225
+ THRUST_RUNTIME_FUNCTION static cudaError_t
226
+ doit(void* d_temp_storage,
227
+ size_t& temp_storage_bytes,
228
+ cub::DoubleBuffer<Key>& keys_buffer,
229
+ cub::DoubleBuffer<Item>& /*items_buffer*/,
230
+ Size count,
231
+ cudaStream_t stream)
232
+ {
233
+ return cub::DeviceRadixSort::SortKeys(d_temp_storage,
234
+ temp_storage_bytes,
235
+ keys_buffer,
236
+ static_cast<int>(count),
237
+ 0,
238
+ static_cast<int>(sizeof(Key) * 8),
239
+ stream);
240
+ }
241
+ }; // struct dispatch -- sort keys in ascending order;
242
+
243
+ // sort keys in descending order
244
+ template <class K>
245
+ struct dispatch<thrust::detail::false_type, thrust::greater<K> >
246
+ {
247
+ template <class Key, class Item, class Size>
248
+ THRUST_RUNTIME_FUNCTION static cudaError_t
249
+ doit(void* d_temp_storage,
250
+ size_t& temp_storage_bytes,
251
+ cub::DoubleBuffer<Key>& keys_buffer,
252
+ cub::DoubleBuffer<Item>& /*items_buffer*/,
253
+ Size count,
254
+ cudaStream_t stream)
255
+ {
256
+ return cub::DeviceRadixSort::SortKeysDescending(d_temp_storage,
257
+ temp_storage_bytes,
258
+ keys_buffer,
259
+ static_cast<int>(count),
260
+ 0,
261
+ static_cast<int>(sizeof(Key) * 8),
262
+ stream);
263
+ }
264
+ }; // struct dispatch -- sort keys in descending order;
265
+
266
+ // sort pairs in ascending order
267
+ template <class K>
268
+ struct dispatch<thrust::detail::true_type, thrust::less<K> >
269
+ {
270
+ template <class Key, class Item, class Size>
271
+ THRUST_RUNTIME_FUNCTION static cudaError_t
272
+ doit(void* d_temp_storage,
273
+ size_t& temp_storage_bytes,
274
+ cub::DoubleBuffer<Key>& keys_buffer,
275
+ cub::DoubleBuffer<Item>& items_buffer,
276
+ Size count,
277
+ cudaStream_t stream)
278
+ {
279
+ return cub::DeviceRadixSort::SortPairs(d_temp_storage,
280
+ temp_storage_bytes,
281
+ keys_buffer,
282
+ items_buffer,
283
+ static_cast<int>(count),
284
+ 0,
285
+ static_cast<int>(sizeof(Key) * 8),
286
+ stream);
287
+ }
288
+ }; // struct dispatch -- sort pairs in ascending order;
289
+
290
+ // sort pairs in descending order
291
+ template <class K>
292
+ struct dispatch<thrust::detail::true_type, thrust::greater<K> >
293
+ {
294
+ template <class Key, class Item, class Size>
295
+ THRUST_RUNTIME_FUNCTION static cudaError_t
296
+ doit(void* d_temp_storage,
297
+ size_t& temp_storage_bytes,
298
+ cub::DoubleBuffer<Key>& keys_buffer,
299
+ cub::DoubleBuffer<Item>& items_buffer,
300
+ Size count,
301
+ cudaStream_t stream)
302
+ {
303
+ return cub::DeviceRadixSort::SortPairsDescending(d_temp_storage,
304
+ temp_storage_bytes,
305
+ keys_buffer,
306
+ items_buffer,
307
+ static_cast<int>(count),
308
+ 0,
309
+ static_cast<int>(sizeof(Key) * 8),
310
+ stream);
311
+ }
312
+ }; // struct dispatch -- sort pairs in descending order;
313
+
314
+ template <typename SORT_ITEMS,
315
+ typename Derived,
316
+ typename Key,
317
+ typename Item,
318
+ typename Size,
319
+ typename CompareOp>
320
+ THRUST_RUNTIME_FUNCTION
321
+ void radix_sort(execution_policy<Derived>& policy,
322
+ Key* keys,
323
+ Item* items,
324
+ Size count,
325
+ CompareOp)
326
+ {
327
+ size_t temp_storage_bytes = 0;
328
+ cudaStream_t stream = cuda_cub::stream(policy);
329
+
330
+ cub::DoubleBuffer<Key> keys_buffer(keys, NULL);
331
+ cub::DoubleBuffer<Item> items_buffer(items, NULL);
332
+
333
+ Size keys_count = count;
334
+ Size items_count = SORT_ITEMS::value ? count : 0;
335
+
336
+ cudaError_t status;
337
+
338
+ status = dispatch<SORT_ITEMS, CompareOp>::doit(NULL,
339
+ temp_storage_bytes,
340
+ keys_buffer,
341
+ items_buffer,
342
+ keys_count,
343
+ stream);
344
+ cuda_cub::throw_on_error(status, "radix_sort: failed on 1st step");
345
+
346
+ size_t keys_temp_storage = core::align_to(sizeof(Key) * keys_count, 128);
347
+ size_t items_temp_storage = core::align_to(sizeof(Item) * items_count, 128);
348
+
349
+ size_t storage_size = keys_temp_storage
350
+ + items_temp_storage
351
+ + temp_storage_bytes;
352
+
353
+ // Allocate temporary storage.
354
+ thrust::detail::temporary_array<thrust::detail::uint8_t, Derived>
355
+ tmp(policy, storage_size);
356
+
357
+ keys_buffer.d_buffers[1] = thrust::detail::aligned_reinterpret_cast<Key*>(
358
+ tmp.data().get()
359
+ );
360
+ items_buffer.d_buffers[1] = thrust::detail::aligned_reinterpret_cast<Item*>(
361
+ tmp.data().get() + keys_temp_storage
362
+ );
363
+ void *ptr = static_cast<void*>(
364
+ tmp.data().get() + keys_temp_storage + items_temp_storage
365
+ );
366
+
367
+ status = dispatch<SORT_ITEMS, CompareOp>::doit(ptr,
368
+ temp_storage_bytes,
369
+ keys_buffer,
370
+ items_buffer,
371
+ keys_count,
372
+ stream);
373
+ cuda_cub::throw_on_error(status, "radix_sort: failed on 2nd step");
374
+
375
+ if (keys_buffer.selector != 0)
376
+ {
377
+ Key* temp_ptr = reinterpret_cast<Key*>(keys_buffer.d_buffers[1]);
378
+ cuda_cub::copy_n(policy, temp_ptr, keys_count, keys);
379
+ }
380
+ THRUST_IF_CONSTEXPR(SORT_ITEMS::value)
381
+ {
382
+ if (items_buffer.selector != 0)
383
+ {
384
+ Item *temp_ptr = reinterpret_cast<Item *>(items_buffer.d_buffers[1]);
385
+ cuda_cub::copy_n(policy, temp_ptr, items_count, items);
386
+ }
387
+ }
388
+ }
389
+ } // __radix_sort
390
+
391
+ //---------------------------------------------------------------------
392
+ // Smart sort picks at compile-time whether to dispatch radix or merge sort
393
+ //---------------------------------------------------------------------
394
+
395
+ namespace __smart_sort {
396
+
397
+ template <class Key, class CompareOp>
398
+ struct can_use_primitive_sort
399
+ : thrust::detail::and_<
400
+ thrust::detail::is_arithmetic<Key>,
401
+ thrust::detail::or_<
402
+ thrust::detail::is_same<CompareOp, thrust::less<Key> >,
403
+ thrust::detail::is_same<CompareOp, thrust::greater<Key> > > > {};
404
+
405
+ template <class Iterator, class CompareOp>
406
+ struct enable_if_primitive_sort
407
+ : thrust::detail::enable_if<
408
+ can_use_primitive_sort<typename iterator_value<Iterator>::type,
409
+ CompareOp>::value> {};
410
+
411
+ template <class Iterator, class CompareOp>
412
+ struct enable_if_comparison_sort
413
+ : thrust::detail::disable_if<
414
+ can_use_primitive_sort<typename iterator_value<Iterator>::type,
415
+ CompareOp>::value> {};
416
+
417
+
418
+ template <class SORT_ITEMS,
419
+ class STABLE,
420
+ class Policy,
421
+ class KeysIt,
422
+ class ItemsIt,
423
+ class CompareOp>
424
+ THRUST_RUNTIME_FUNCTION typename enable_if_comparison_sort<KeysIt, CompareOp>::type
425
+ smart_sort(Policy& policy,
426
+ KeysIt keys_first,
427
+ KeysIt keys_last,
428
+ ItemsIt items_first,
429
+ CompareOp compare_op)
430
+ {
431
+ __merge_sort::merge_sort<SORT_ITEMS, STABLE>(policy,
432
+ keys_first,
433
+ keys_last,
434
+ items_first,
435
+ compare_op);
436
+
437
+ }
438
+
439
+ template <class SORT_ITEMS,
440
+ class STABLE,
441
+ class Policy,
442
+ class KeysIt,
443
+ class ItemsIt,
444
+ class CompareOp>
445
+ THRUST_RUNTIME_FUNCTION typename enable_if_primitive_sort<KeysIt, CompareOp>::type
446
+ smart_sort(execution_policy<Policy>& policy,
447
+ KeysIt keys_first,
448
+ KeysIt keys_last,
449
+ ItemsIt items_first,
450
+ CompareOp compare_op)
451
+ {
452
+ // ensure sequences have trivial iterators
453
+ thrust::detail::trivial_sequence<KeysIt, Policy>
454
+ keys(policy, keys_first, keys_last);
455
+
456
+ if (SORT_ITEMS::value)
457
+ {
458
+ thrust::detail::trivial_sequence<ItemsIt, Policy>
459
+ values(policy, items_first, items_first + (keys_last - keys_first));
460
+
461
+ __radix_sort::radix_sort<SORT_ITEMS>(
462
+ policy,
463
+ thrust::raw_pointer_cast(&*keys.begin()),
464
+ thrust::raw_pointer_cast(&*values.begin()),
465
+ keys_last - keys_first,
466
+ compare_op);
467
+
468
+ if (!is_contiguous_iterator<ItemsIt>::value)
469
+ {
470
+ cuda_cub::copy(policy, values.begin(), values.end(), items_first);
471
+ }
472
+ }
473
+ else
474
+ {
475
+ __radix_sort::radix_sort<SORT_ITEMS>(
476
+ policy,
477
+ thrust::raw_pointer_cast(&*keys.begin()),
478
+ thrust::raw_pointer_cast(&*keys.begin()),
479
+ keys_last - keys_first,
480
+ compare_op);
481
+ }
482
+
483
+ // copy results back, if necessary
484
+ if (!is_contiguous_iterator<KeysIt>::value)
485
+ {
486
+ cuda_cub::copy(policy, keys.begin(), keys.end(), keys_first);
487
+ }
488
+
489
+ cuda_cub::throw_on_error(
490
+ cuda_cub::synchronize_optional(policy),
491
+ "smart_sort: failed to synchronize");
492
+ }
493
+ } // namespace __smart_sort
494
+
495
+
496
+ //-------------------------
497
+ // Thrust API entry points
498
+ //-------------------------
499
+
500
+
501
+ __thrust_exec_check_disable__
502
+ template <class Derived, class ItemsIt, class CompareOp>
503
+ void __host__ __device__
504
+ sort(execution_policy<Derived>& policy,
505
+ ItemsIt first,
506
+ ItemsIt last,
507
+ CompareOp compare_op)
508
+ {
509
+ THRUST_CDP_DISPATCH(
510
+ (using item_t = thrust::iterator_value_t<ItemsIt>; item_t *null_ = nullptr;
511
+ __smart_sort::smart_sort<thrust::detail::false_type,
512
+ thrust::detail::false_type>(policy,
513
+ first,
514
+ last,
515
+ null_,
516
+ compare_op);),
517
+ (thrust::sort(cvt_to_seq(derived_cast(policy)), first, last, compare_op);));
518
+ }
519
+
520
+ __thrust_exec_check_disable__
521
+ template <class Derived, class ItemsIt, class CompareOp>
522
+ void __host__ __device__
523
+ stable_sort(execution_policy<Derived>& policy,
524
+ ItemsIt first,
525
+ ItemsIt last,
526
+ CompareOp compare_op)
527
+ {
528
+ THRUST_CDP_DISPATCH(
529
+ (using item_t = thrust::iterator_value_t<ItemsIt>; item_t *null_ = nullptr;
530
+ __smart_sort::smart_sort<thrust::detail::false_type,
531
+ thrust::detail::true_type>(policy,
532
+ first,
533
+ last,
534
+ null_,
535
+ compare_op);),
536
+ (thrust::stable_sort(cvt_to_seq(derived_cast(policy)),
537
+ first,
538
+ last,
539
+ compare_op);));
540
+ }
541
+
542
+ __thrust_exec_check_disable__
543
+ template <class Derived, class KeysIt, class ValuesIt, class CompareOp>
544
+ void __host__ __device__
545
+ sort_by_key(execution_policy<Derived>& policy,
546
+ KeysIt keys_first,
547
+ KeysIt keys_last,
548
+ ValuesIt values,
549
+ CompareOp compare_op)
550
+ {
551
+ THRUST_CDP_DISPATCH(
552
+ (__smart_sort::smart_sort<thrust::detail::true_type,
553
+ thrust::detail::false_type>(policy,
554
+ keys_first,
555
+ keys_last,
556
+ values,
557
+ compare_op);),
558
+ (thrust::sort_by_key(cvt_to_seq(derived_cast(policy)),
559
+ keys_first,
560
+ keys_last,
561
+ values,
562
+ compare_op);));
563
+ }
564
+
565
+ __thrust_exec_check_disable__
566
+ template <class Derived,
567
+ class KeysIt,
568
+ class ValuesIt,
569
+ class CompareOp>
570
+ void __host__ __device__
571
+ stable_sort_by_key(execution_policy<Derived> &policy,
572
+ KeysIt keys_first,
573
+ KeysIt keys_last,
574
+ ValuesIt values,
575
+ CompareOp compare_op)
576
+ {
577
+ THRUST_CDP_DISPATCH(
578
+ (__smart_sort::smart_sort<thrust::detail::true_type,
579
+ thrust::detail::true_type>(policy,
580
+ keys_first,
581
+ keys_last,
582
+ values,
583
+ compare_op);),
584
+ (thrust::stable_sort_by_key(cvt_to_seq(derived_cast(policy)),
585
+ keys_first,
586
+ keys_last,
587
+ values,
588
+ compare_op);));
589
+ }
590
+
591
+ // API with default comparator
592
+
593
+ template <class Derived, class ItemsIt>
594
+ void __host__ __device__
595
+ sort(execution_policy<Derived>& policy,
596
+ ItemsIt first,
597
+ ItemsIt last)
598
+ {
599
+ typedef typename thrust::iterator_value<ItemsIt>::type item_type;
600
+ cuda_cub::sort(policy, first, last, less<item_type>());
601
+ }
602
+
603
+ template <class Derived, class ItemsIt>
604
+ void __host__ __device__
605
+ stable_sort(execution_policy<Derived>& policy,
606
+ ItemsIt first,
607
+ ItemsIt last)
608
+ {
609
+ typedef typename thrust::iterator_value<ItemsIt>::type item_type;
610
+ cuda_cub::stable_sort(policy, first, last, less<item_type>());
611
+ }
612
+
613
+ template <class Derived, class KeysIt, class ValuesIt>
614
+ void __host__ __device__
615
+ sort_by_key(execution_policy<Derived>& policy,
616
+ KeysIt keys_first,
617
+ KeysIt keys_last,
618
+ ValuesIt values)
619
+ {
620
+ typedef typename thrust::iterator_value<KeysIt>::type key_type;
621
+ cuda_cub::sort_by_key(policy, keys_first, keys_last, values, less<key_type>());
622
+ }
623
+
624
+ template <class Derived, class KeysIt, class ValuesIt>
625
+ void __host__ __device__
626
+ stable_sort_by_key(
627
+ execution_policy<Derived>& policy, KeysIt keys_first, KeysIt keys_last, ValuesIt values)
628
+ {
629
+ typedef typename thrust::iterator_value<KeysIt>::type key_type;
630
+ cuda_cub::stable_sort_by_key(policy, keys_first, keys_last, values, less<key_type>());
631
+ }
632
+
633
+
634
+ } // namespace cuda_cub
635
+ THRUST_NAMESPACE_END
636
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/swap_ranges.h ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+ #include <iterator>
41
+ #include <thrust/system/cuda/detail/transform.h>
42
+ #include <thrust/system/cuda/detail/par_to_seq.h>
43
+ #include <thrust/swap.h>
44
+ #include <thrust/system/cuda/detail/parallel_for.h>
45
+ #include <thrust/distance.h>
46
+
47
+ THRUST_NAMESPACE_BEGIN
48
+
49
+ namespace cuda_cub {
50
+
51
+ namespace __swap_ranges {
52
+
53
+
54
+ template <class ItemsIt1, class ItemsIt2>
55
+ struct swap_f
56
+ {
57
+ ItemsIt1 items1;
58
+ ItemsIt2 items2;
59
+
60
+ typedef typename iterator_traits<ItemsIt1>::value_type value1_type;
61
+ typedef typename iterator_traits<ItemsIt2>::value_type value2_type;
62
+
63
+ THRUST_FUNCTION
64
+ swap_f(ItemsIt1 items1_, ItemsIt2 items2_)
65
+ : items1(items1_), items2(items2_) {}
66
+
67
+ template<class Size>
68
+ void THRUST_DEVICE_FUNCTION operator()(Size idx)
69
+ {
70
+ value1_type item1 = items1[idx];
71
+ value2_type item2 = items2[idx];
72
+ // XXX thrust::swap is buggy
73
+ // if reference_type of ItemIt1/ItemsIt2
74
+ // is a proxy reference, then KABOOM!
75
+ // to avoid this, just copy the value first before swap
76
+ // *todo* specialize on real & proxy references
77
+ using thrust::swap;
78
+ swap(item1, item2);
79
+ items1[idx] = item1;
80
+ items2[idx] = item2;
81
+ }
82
+ };
83
+ } // namespace __swap_ranges
84
+
85
+ template <class Derived,
86
+ class ItemsIt1,
87
+ class ItemsIt2>
88
+ ItemsIt2 __host__ __device__
89
+ swap_ranges(execution_policy<Derived> &policy,
90
+ ItemsIt1 first1,
91
+ ItemsIt1 last1,
92
+ ItemsIt2 first2)
93
+ {
94
+ typedef typename iterator_traits<ItemsIt1>::difference_type size_type;
95
+
96
+ size_type num_items = static_cast<size_type>(thrust::distance(first1, last1));
97
+
98
+ cuda_cub::parallel_for(policy,
99
+ __swap_ranges::swap_f<ItemsIt1,
100
+ ItemsIt2>(first1, first2),
101
+ num_items);
102
+
103
+ return first2 + num_items;
104
+ }
105
+
106
+
107
+ } // namespace cuda_cub
108
+
109
+ THRUST_NAMESPACE_END
110
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/tabulate.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+ #include <thrust/distance.h>
41
+ #include <thrust/system/cuda/config.h>
42
+ #include <thrust/system/cuda/execution_policy.h>
43
+ #include <thrust/system/cuda/detail/parallel_for.h>
44
+ #include <thrust/distance.h>
45
+
46
+ THRUST_NAMESPACE_BEGIN
47
+ namespace cuda_cub {
48
+
49
+ namespace __tabulate {
50
+
51
+ template <class Iterator, class TabulateOp, class Size>
52
+ struct functor
53
+ {
54
+ Iterator items;
55
+ TabulateOp op;
56
+
57
+ __host__ __device__
58
+ functor(Iterator items_, TabulateOp op_)
59
+ : items(items_), op(op_) {}
60
+
61
+ void __device__ operator()(Size idx)
62
+ {
63
+ items[idx] = op(idx);
64
+ }
65
+ }; // struct functor
66
+
67
+ } // namespace __tabulate
68
+
69
+ template <class Derived,
70
+ class Iterator,
71
+ class TabulateOp>
72
+ void __host__ __device__
73
+ tabulate(execution_policy<Derived>& policy,
74
+ Iterator first,
75
+ Iterator last,
76
+ TabulateOp tabulate_op)
77
+ {
78
+ typedef typename iterator_traits<Iterator>::difference_type size_type;
79
+
80
+ size_type count = thrust::distance(first, last);
81
+
82
+ typedef __tabulate::functor<Iterator, TabulateOp, size_type> functor_t;
83
+
84
+ cuda_cub::parallel_for(policy,
85
+ functor_t(first, tabulate_op),
86
+ count);
87
+ }
88
+
89
+ } // namespace cuda_cub
90
+ THRUST_NAMESPACE_END
91
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/temporary_buffer.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2016 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ // this system has no special temporary buffer functions
30
+
miniCUDA124/include/thrust/system/cuda/detail/terminate.h ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+
28
+ #pragma once
29
+
30
+ #include <thrust/detail/config.h>
31
+
32
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
33
+ # pragma GCC system_header
34
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
35
+ # pragma clang system_header
36
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
37
+ # pragma system_header
38
+ #endif // no system header
39
+ #include <thrust/system/cuda/detail/util.h>
40
+ #include <cstdio>
41
+
42
+ THRUST_NAMESPACE_BEGIN
43
+ namespace system
44
+ {
45
+ namespace cuda
46
+ {
47
+ namespace detail
48
+ {
49
+
50
+
51
+ inline __device__
52
+ void terminate()
53
+ {
54
+ thrust::cuda_cub::terminate();
55
+ }
56
+
57
+
58
+ inline __host__ __device__
59
+ void terminate_with_message(const char* message)
60
+ {
61
+ printf("%s\n", message);
62
+ thrust::cuda_cub::terminate();
63
+ }
64
+
65
+
66
+ } // end detail
67
+ } // end cuda
68
+ } // end system
69
+ THRUST_NAMESPACE_END
70
+
miniCUDA124/include/thrust/system/cuda/detail/transform.h ADDED
@@ -0,0 +1,424 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+ #include <thrust/system/cuda/config.h>
41
+
42
+ #include <thrust/system/cuda/detail/util.h>
43
+ #include <thrust/detail/type_traits/result_of_adaptable_function.h>
44
+ #include <thrust/system/cuda/detail/parallel_for.h>
45
+ #include <thrust/distance.h>
46
+
47
+ THRUST_NAMESPACE_BEGIN
48
+
49
+ namespace cuda_cub {
50
+
51
+
52
+ namespace __transform {
53
+
54
+ struct no_stencil_tag
55
+ {
56
+ };
57
+
58
+ struct always_true_predicate
59
+ {
60
+ template <class T>
61
+ bool THRUST_DEVICE_FUNCTION operator()(T const &) const
62
+ {
63
+ return true;
64
+ }
65
+ };
66
+
67
+ template <class InputIt,
68
+ class OutputIt,
69
+ class StencilIt,
70
+ class TransformOp,
71
+ class Predicate>
72
+ struct unary_transform_f
73
+ {
74
+ InputIt input;
75
+ OutputIt output;
76
+ StencilIt stencil;
77
+ TransformOp op;
78
+ Predicate pred;
79
+
80
+ THRUST_FUNCTION
81
+ unary_transform_f(InputIt input_,
82
+ OutputIt output_,
83
+ StencilIt stencil_,
84
+ TransformOp op_,
85
+ Predicate pred_)
86
+ : input(input_),
87
+ output(output_),
88
+ stencil(stencil_),
89
+ op(op_),
90
+ pred(pred_) {}
91
+
92
+ template<class Size>
93
+ void THRUST_DEVICE_FUNCTION operator()(Size idx)
94
+ {
95
+ if (pred(raw_reference_cast(stencil[idx])))
96
+ output[idx] = op(raw_reference_cast(input[idx]));
97
+ }
98
+ }; // struct unary_transform_stencil_f
99
+
100
+ template <class InputIt,
101
+ class OutputIt,
102
+ class TransformOp,
103
+ class Predicate>
104
+ struct unary_transform_f<InputIt,
105
+ OutputIt,
106
+ no_stencil_tag,
107
+ TransformOp,
108
+ Predicate>
109
+ {
110
+ InputIt input;
111
+ OutputIt output;
112
+ TransformOp op;
113
+ Predicate pred;
114
+
115
+ THRUST_FUNCTION
116
+ unary_transform_f(InputIt input_,
117
+ OutputIt output_,
118
+ no_stencil_tag,
119
+ TransformOp op_,
120
+ Predicate pred_)
121
+ : input(input_), output(output_), op(op_), pred(pred_) {}
122
+
123
+ template<class Size>
124
+ void THRUST_DEVICE_FUNCTION operator()(Size idx)
125
+ {
126
+ if (pred(raw_reference_cast(input[idx])))
127
+ output[idx] = op(raw_reference_cast(input[idx]));
128
+ }
129
+ }; // struct unary_transform_f
130
+
131
+ template <class InputIt1,
132
+ class InputIt2,
133
+ class OutputIt,
134
+ class StencilIt,
135
+ class TransformOp,
136
+ class Predicate>
137
+ struct binary_transform_f
138
+ {
139
+ InputIt1 input1;
140
+ InputIt2 input2;
141
+ OutputIt output;
142
+ StencilIt stencil;
143
+ TransformOp op;
144
+ Predicate pred;
145
+
146
+ THRUST_FUNCTION
147
+ binary_transform_f(InputIt1 input1_,
148
+ InputIt2 input2_,
149
+ OutputIt output_,
150
+ StencilIt stencil_,
151
+ TransformOp op_,
152
+ Predicate pred_)
153
+ : input1(input1_),
154
+ input2(input2_),
155
+ output(output_),
156
+ stencil(stencil_),
157
+ op(op_),
158
+ pred(pred_) {}
159
+
160
+ template<class Size>
161
+ void THRUST_DEVICE_FUNCTION operator()(Size idx)
162
+ {
163
+ if (pred(raw_reference_cast(stencil[idx])))
164
+ output[idx] = op(raw_reference_cast(input1[idx]),
165
+ raw_reference_cast(input2[idx]));
166
+ }
167
+ }; // struct binary_transform_stencil_f
168
+
169
+ template <class InputIt1,
170
+ class InputIt2,
171
+ class OutputIt,
172
+ class TransformOp,
173
+ class Predicate>
174
+ struct binary_transform_f<InputIt1,
175
+ InputIt2,
176
+ OutputIt,
177
+ no_stencil_tag,
178
+ TransformOp,
179
+ Predicate>
180
+ {
181
+ InputIt1 input1;
182
+ InputIt2 input2;
183
+ OutputIt output;
184
+ TransformOp op;
185
+ Predicate pred;
186
+
187
+ THRUST_FUNCTION
188
+ binary_transform_f(InputIt1 input1_,
189
+ InputIt2 input2_,
190
+ OutputIt output_,
191
+ no_stencil_tag ,
192
+ TransformOp op_,
193
+ Predicate pred_)
194
+ : input1(input1_),
195
+ input2(input2_),
196
+ output(output_),
197
+ op(op_),
198
+ pred(pred_) {}
199
+
200
+ template<class Size>
201
+ void THRUST_DEVICE_FUNCTION operator()(Size idx)
202
+ {
203
+ if (pred(raw_reference_cast(input1[idx])))
204
+ output[idx] = op(raw_reference_cast(input1[idx]),
205
+ raw_reference_cast(input2[idx]));
206
+ }
207
+ }; // struct binary_transform_f
208
+
209
+ template <class Policy,
210
+ class InputIt,
211
+ class Size,
212
+ class OutputIt,
213
+ class StencilIt,
214
+ class TransformOp,
215
+ class Predicate>
216
+ OutputIt THRUST_FUNCTION
217
+ unary(Policy & policy,
218
+ InputIt items,
219
+ OutputIt result,
220
+ Size num_items,
221
+ StencilIt stencil,
222
+ TransformOp transform_op,
223
+ Predicate predicate)
224
+ {
225
+ if (num_items == 0)
226
+ return result;
227
+
228
+ typedef unary_transform_f<InputIt,
229
+ OutputIt,
230
+ StencilIt,
231
+ TransformOp,
232
+ Predicate>
233
+ unary_transform_t;
234
+
235
+ cuda_cub::parallel_for(policy,
236
+ unary_transform_t(items,
237
+ result,
238
+ stencil,
239
+ transform_op,
240
+ predicate),
241
+ num_items);
242
+
243
+ return result + num_items;
244
+ }
245
+
246
+ template <class Policy,
247
+ class InputIt1,
248
+ class InputIt2,
249
+ class Size,
250
+ class OutputIt,
251
+ class StencilIt,
252
+ class TransformOp,
253
+ class Predicate>
254
+ OutputIt THRUST_FUNCTION
255
+ binary(Policy & policy,
256
+ InputIt1 items1,
257
+ InputIt2 items2,
258
+ OutputIt result,
259
+ Size num_items,
260
+ StencilIt stencil,
261
+ TransformOp transform_op,
262
+ Predicate predicate)
263
+ {
264
+ if (num_items == 0)
265
+ return result;
266
+
267
+ typedef binary_transform_f<InputIt1,
268
+ InputIt2,
269
+ OutputIt,
270
+ StencilIt,
271
+ TransformOp,
272
+ Predicate>
273
+ binary_transform_t;
274
+
275
+ cuda_cub::parallel_for(policy,
276
+ binary_transform_t(items1,
277
+ items2,
278
+ result,
279
+ stencil,
280
+ transform_op,
281
+ predicate),
282
+ num_items);
283
+
284
+ return result + num_items;
285
+ }
286
+
287
+ } // namespace __transform
288
+
289
+ //-------------------------
290
+ // Thrust API entry points
291
+ //-------------------------
292
+
293
+ //-------------------------
294
+ // one input data stream
295
+ //-------------------------
296
+
297
+ template <class Derived,
298
+ class InputIt,
299
+ class OutputIt,
300
+ class StencilInputIt,
301
+ class TransformOp,
302
+ class Predicate>
303
+ OutputIt THRUST_FUNCTION
304
+ transform_if(execution_policy<Derived> &policy,
305
+ InputIt first,
306
+ InputIt last,
307
+ StencilInputIt stencil,
308
+ OutputIt result,
309
+ TransformOp transform_op,
310
+ Predicate predicate)
311
+ {
312
+ typedef typename iterator_traits<InputIt>::difference_type size_type;
313
+ size_type num_items = static_cast<size_type>(thrust::distance(first, last));
314
+ return __transform::unary(policy,
315
+ first,
316
+ result,
317
+ num_items,
318
+ stencil,
319
+ transform_op,
320
+ predicate);
321
+ } // func transform_if
322
+
323
+ template <class Derived,
324
+ class InputIt,
325
+ class OutputIt,
326
+ class TransformOp,
327
+ class Predicate>
328
+ OutputIt THRUST_FUNCTION
329
+ transform_if(execution_policy<Derived> &policy,
330
+ InputIt first,
331
+ InputIt last,
332
+ OutputIt result,
333
+ TransformOp transform_op,
334
+ Predicate predicate)
335
+ {
336
+ return cuda_cub::transform_if(policy,
337
+ first,
338
+ last,
339
+ __transform::no_stencil_tag(),
340
+ result,
341
+ transform_op,
342
+ predicate);
343
+ } // func transform_if
344
+
345
+ template <class Derived,
346
+ class InputIt,
347
+ class OutputIt,
348
+ class TransformOp>
349
+ OutputIt THRUST_FUNCTION
350
+ transform(execution_policy<Derived> &policy,
351
+ InputIt first,
352
+ InputIt last,
353
+ OutputIt result,
354
+ TransformOp transform_op)
355
+ {
356
+ return cuda_cub::transform_if(policy,
357
+ first,
358
+ last,
359
+ result,
360
+ transform_op,
361
+ __transform::always_true_predicate());
362
+ } // func transform
363
+
364
+ //-------------------------
365
+ // two input data streams
366
+ //-------------------------
367
+
368
+
369
+ template <class Derived,
370
+ class InputIt1,
371
+ class InputIt2,
372
+ class StencilInputIt,
373
+ class OutputIt,
374
+ class TransformOp,
375
+ class Predicate>
376
+ OutputIt THRUST_FUNCTION
377
+ transform_if(execution_policy<Derived> &policy,
378
+ InputIt1 first1,
379
+ InputIt1 last1,
380
+ InputIt2 first2,
381
+ StencilInputIt stencil,
382
+ OutputIt result,
383
+ TransformOp transform_op,
384
+ Predicate predicate)
385
+ {
386
+ typedef typename iterator_traits<InputIt1>::difference_type size_type;
387
+ size_type num_items = static_cast<size_type>(thrust::distance(first1, last1));
388
+ return __transform::binary(policy,
389
+ first1,
390
+ first2,
391
+ result,
392
+ num_items,
393
+ stencil,
394
+ transform_op,
395
+ predicate);
396
+ } // func transform_if
397
+
398
+ template <class Derived,
399
+ class InputIt1,
400
+ class InputIt2,
401
+ class OutputIt,
402
+ class TransformOp>
403
+ OutputIt THRUST_FUNCTION
404
+ transform(execution_policy<Derived> &policy,
405
+ InputIt1 first1,
406
+ InputIt1 last1,
407
+ InputIt2 first2,
408
+ OutputIt result,
409
+ TransformOp transform_op)
410
+ {
411
+ return cuda_cub::transform_if(policy,
412
+ first1,
413
+ last1,
414
+ first2,
415
+ __transform::no_stencil_tag(),
416
+ result,
417
+ transform_op,
418
+ __transform::always_true_predicate());
419
+ } // func transform
420
+
421
+ } // namespace cuda_cub
422
+
423
+ THRUST_NAMESPACE_END
424
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/transform_reduce.h ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+ #include <iterator>
41
+ #include <thrust/system/cuda/detail/reduce.h>
42
+ #include <thrust/distance.h>
43
+
44
+ THRUST_NAMESPACE_BEGIN
45
+ namespace cuda_cub {
46
+
47
+ template <class Derived,
48
+ class InputIt,
49
+ class TransformOp,
50
+ class T,
51
+ class ReduceOp>
52
+ T __host__ __device__
53
+ transform_reduce(execution_policy<Derived> &policy,
54
+ InputIt first,
55
+ InputIt last,
56
+ TransformOp transform_op,
57
+ T init,
58
+ ReduceOp reduce_op)
59
+ {
60
+ typedef typename iterator_traits<InputIt>::difference_type size_type;
61
+ size_type num_items = static_cast<size_type>(thrust::distance(first, last));
62
+ typedef transform_input_iterator_t<T,
63
+ InputIt,
64
+ TransformOp>
65
+ transformed_iterator_t;
66
+
67
+ return cuda_cub::reduce_n(policy,
68
+ transformed_iterator_t(first, transform_op),
69
+ num_items,
70
+ init,
71
+ reduce_op);
72
+ }
73
+
74
+ } // namespace cuda_cub
75
+ THRUST_NAMESPACE_END
76
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/transform_scan.h ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+ #include <iterator>
41
+ #include <thrust/detail/type_traits.h>
42
+ #include <thrust/distance.h>
43
+ #include <thrust/system/cuda/detail/scan.h>
44
+
45
+ THRUST_NAMESPACE_BEGIN
46
+
47
+ namespace cuda_cub {
48
+
49
+ template <class Derived,
50
+ class InputIt,
51
+ class OutputIt,
52
+ class TransformOp,
53
+ class ScanOp>
54
+ OutputIt __host__ __device__
55
+ transform_inclusive_scan(execution_policy<Derived> &policy,
56
+ InputIt first,
57
+ InputIt last,
58
+ OutputIt result,
59
+ TransformOp transform_op,
60
+ ScanOp scan_op)
61
+ {
62
+ // Use the transformed input iterator's value type per https://wg21.link/P0571
63
+ using input_type = typename thrust::iterator_value<InputIt>::type;
64
+ using result_type = thrust::detail::invoke_result_t<TransformOp, input_type>;
65
+ using value_type = thrust::remove_cvref_t<result_type>;
66
+
67
+ typedef typename iterator_traits<InputIt>::difference_type size_type;
68
+ size_type num_items = static_cast<size_type>(thrust::distance(first, last));
69
+ typedef transform_input_iterator_t<value_type,
70
+ InputIt,
71
+ TransformOp>
72
+ transformed_iterator_t;
73
+
74
+ return cuda_cub::inclusive_scan_n(policy,
75
+ transformed_iterator_t(first, transform_op),
76
+ num_items,
77
+ result,
78
+ scan_op);
79
+ }
80
+
81
+ template <class Derived,
82
+ class InputIt,
83
+ class OutputIt,
84
+ class TransformOp,
85
+ class InitialValueType,
86
+ class ScanOp>
87
+ OutputIt __host__ __device__
88
+ transform_exclusive_scan(execution_policy<Derived> &policy,
89
+ InputIt first,
90
+ InputIt last,
91
+ OutputIt result,
92
+ TransformOp transform_op,
93
+ InitialValueType init,
94
+ ScanOp scan_op)
95
+ {
96
+ // Use the initial value type per https://wg21.link/P0571
97
+ using result_type = thrust::remove_cvref_t<InitialValueType>;
98
+
99
+ typedef typename iterator_traits<InputIt>::difference_type size_type;
100
+ size_type num_items = static_cast<size_type>(thrust::distance(first, last));
101
+ typedef transform_input_iterator_t<result_type,
102
+ InputIt,
103
+ TransformOp>
104
+ transformed_iterator_t;
105
+
106
+ return cuda_cub::exclusive_scan_n(policy,
107
+ transformed_iterator_t(first, transform_op),
108
+ num_items,
109
+ result,
110
+ init,
111
+ scan_op);
112
+ }
113
+
114
+ } // namespace cuda_cub
115
+
116
+ THRUST_NAMESPACE_END
117
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/uninitialized_copy.h ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+ #include <iterator>
41
+ #include <thrust/distance.h>
42
+ #include <thrust/system/cuda/detail/execution_policy.h>
43
+ #include <thrust/system/cuda/detail/util.h>
44
+ #include <thrust/system/cuda/detail/parallel_for.h>
45
+
46
+ THRUST_NAMESPACE_BEGIN
47
+
48
+ namespace cuda_cub {
49
+
50
+ namespace __uninitialized_copy {
51
+
52
+ template <class InputIt, class OutputIt>
53
+ struct functor
54
+ {
55
+ InputIt input;
56
+ OutputIt output;
57
+
58
+ typedef typename iterator_traits<InputIt>::value_type InputType;
59
+ typedef typename iterator_traits<OutputIt>::value_type OutputType;
60
+
61
+ THRUST_FUNCTION
62
+ functor(InputIt input_, OutputIt output_)
63
+ : input(input_), output(output_) {}
64
+
65
+ template<class Size>
66
+ void THRUST_DEVICE_FUNCTION operator()(Size idx)
67
+ {
68
+ InputType const &in = raw_reference_cast(input[idx]);
69
+ OutputType & out = raw_reference_cast(output[idx]);
70
+
71
+ #if defined(__CUDA__) && defined(__clang__)
72
+ // XXX unsafe, but clang is seemngly unable to call in-place new
73
+ out = in;
74
+ #else
75
+ ::new (static_cast<void *>(&out)) OutputType(in);
76
+ #endif
77
+ }
78
+ }; // struct functor
79
+
80
+ } // namespace __uninitialized_copy
81
+
82
+ template <class Derived,
83
+ class InputIt,
84
+ class Size,
85
+ class OutputIt>
86
+ OutputIt __host__ __device__
87
+ uninitialized_copy_n(execution_policy<Derived> &policy,
88
+ InputIt first,
89
+ Size count,
90
+ OutputIt result)
91
+ {
92
+ typedef __uninitialized_copy::functor<InputIt,OutputIt> functor_t;
93
+
94
+ cuda_cub::parallel_for(policy,
95
+ functor_t(first, result),
96
+ count);
97
+
98
+ return result + count;
99
+ }
100
+
101
+ template <class Derived,
102
+ class InputIt,
103
+ class OutputIt>
104
+ OutputIt __host__ __device__
105
+ uninitialized_copy(execution_policy<Derived>& policy,
106
+ InputIt first,
107
+ InputIt last,
108
+ OutputIt result)
109
+ {
110
+ return cuda_cub::uninitialized_copy_n(policy,
111
+ first,
112
+ thrust::distance(first, last),
113
+ result);
114
+ }
115
+
116
+ } // namespace cuda_
117
+
118
+ THRUST_NAMESPACE_END
119
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/uninitialized_fill.h ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+ #include <iterator>
41
+ #include <thrust/distance.h>
42
+ #include <thrust/system/cuda/detail/execution_policy.h>
43
+ #include <thrust/system/cuda/detail/util.h>
44
+ #include <thrust/system/cuda/detail/parallel_for.h>
45
+
46
+ THRUST_NAMESPACE_BEGIN
47
+
48
+ namespace cuda_cub {
49
+
50
+ namespace __uninitialized_fill {
51
+
52
+ template <class Iterator, class T>
53
+ struct functor
54
+ {
55
+ Iterator items;
56
+ T value;
57
+
58
+ typedef typename iterator_traits<Iterator>::value_type value_type;
59
+
60
+ THRUST_FUNCTION
61
+ functor(Iterator items_, T const& value_)
62
+ : items(items_), value(value_) {}
63
+
64
+ template<class Size>
65
+ void THRUST_DEVICE_FUNCTION operator()(Size idx)
66
+ {
67
+ value_type& out = raw_reference_cast(items[idx]);
68
+
69
+ #if defined(__CUDA__) && defined(__clang__)
70
+ // XXX unsafe. cuda-clang is seemingly unable to call ::new in device code
71
+ out = value;
72
+ #else
73
+ ::new (static_cast<void *>(&out)) value_type(value);
74
+ #endif
75
+ }
76
+ }; // struct functor
77
+
78
+ } // namespace __uninitialized_copy
79
+
80
+ template <class Derived,
81
+ class Iterator,
82
+ class Size,
83
+ class T>
84
+ Iterator __host__ __device__
85
+ uninitialized_fill_n(execution_policy<Derived>& policy,
86
+ Iterator first,
87
+ Size count,
88
+ T const& x)
89
+ {
90
+ typedef __uninitialized_fill::functor<Iterator,T> functor_t;
91
+
92
+ cuda_cub::parallel_for(policy,
93
+ functor_t(first, x),
94
+ count);
95
+
96
+ return first + count;
97
+ }
98
+
99
+ template <class Derived,
100
+ class Iterator,
101
+ class T>
102
+ void __host__ __device__
103
+ uninitialized_fill(execution_policy<Derived>& policy,
104
+ Iterator first,
105
+ Iterator last,
106
+ T const& x)
107
+ {
108
+ cuda_cub::uninitialized_fill_n(policy,
109
+ first,
110
+ thrust::distance(first, last),
111
+ x);
112
+ }
113
+
114
+ } // namespace cuda_cub
115
+
116
+ THRUST_NAMESPACE_END
117
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/unique.h ADDED
@@ -0,0 +1,829 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+
41
+ #include <thrust/advance.h>
42
+ #include <thrust/detail/cstdint.h>
43
+ #include <thrust/detail/minmax.h>
44
+ #include <thrust/distance.h>
45
+ #include <thrust/functional.h>
46
+ #include <thrust/system/cuda/config.h>
47
+ #include <thrust/system/cuda/detail/cdp_dispatch.h>
48
+ #include <thrust/system/cuda/detail/core/agent_launcher.h>
49
+ #include <thrust/system/cuda/detail/get_value.h>
50
+ #include <thrust/system/cuda/detail/par_to_seq.h>
51
+ #include <thrust/system/cuda/detail/util.h>
52
+
53
+ #include <cub/device/device_select.cuh>
54
+ #include <cub/util_math.cuh>
55
+
56
+ THRUST_NAMESPACE_BEGIN
57
+
58
+ template <typename DerivedPolicy,
59
+ typename ForwardIterator,
60
+ typename BinaryPredicate>
61
+ __host__ __device__ ForwardIterator
62
+ unique(
63
+ const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
64
+ ForwardIterator first,
65
+ ForwardIterator last,
66
+ BinaryPredicate binary_pred);
67
+
68
+ template <typename DerivedPolicy,
69
+ typename InputIterator,
70
+ typename OutputIterator,
71
+ typename BinaryPredicate>
72
+ __host__ __device__ OutputIterator
73
+ unique_copy(
74
+ const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
75
+ InputIterator first,
76
+ InputIterator last,
77
+ OutputIterator result,
78
+ BinaryPredicate binary_pred);
79
+
80
+ template <typename DerivedPolicy,
81
+ typename ForwardIterator,
82
+ typename BinaryPredicate>
83
+ __host__ __device__ typename thrust::iterator_traits<ForwardIterator>::difference_type
84
+ unique_count(
85
+ const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
86
+ ForwardIterator first,
87
+ ForwardIterator last,
88
+ BinaryPredicate binary_pred);
89
+
90
+ namespace cuda_cub {
91
+
92
+ // XXX it should be possible to unify unique & unique_by_key into a single
93
+ // agent with various specializations, similar to what is done
94
+ // with partition
95
+ namespace __unique {
96
+
97
+ template <int _BLOCK_THREADS,
98
+ int _ITEMS_PER_THREAD = 1,
99
+ cub::BlockLoadAlgorithm _LOAD_ALGORITHM = cub::BLOCK_LOAD_DIRECT,
100
+ cub::CacheLoadModifier _LOAD_MODIFIER = cub::LOAD_LDG,
101
+ cub::BlockScanAlgorithm _SCAN_ALGORITHM = cub::BLOCK_SCAN_WARP_SCANS>
102
+ struct PtxPolicy
103
+ {
104
+ enum
105
+ {
106
+ BLOCK_THREADS = _BLOCK_THREADS,
107
+ ITEMS_PER_THREAD = _ITEMS_PER_THREAD,
108
+ ITEMS_PER_TILE = _BLOCK_THREADS * _ITEMS_PER_THREAD,
109
+ };
110
+ static const cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM;
111
+ static const cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER;
112
+ static const cub::BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM;
113
+ }; // struct PtxPolicy
114
+
115
+ template<class,class>
116
+ struct Tuning;
117
+
118
+ namespace mpl = thrust::detail::mpl::math;
119
+
120
+ template<class T, int NOMINAL_4B_ITEMS_PER_THREAD>
121
+ struct items_per_thread
122
+ {
123
+ enum
124
+ {
125
+ value = mpl::min<
126
+ int,
127
+ NOMINAL_4B_ITEMS_PER_THREAD,
128
+ mpl::max<int,
129
+ 1,
130
+ static_cast<int>(NOMINAL_4B_ITEMS_PER_THREAD * 4 /
131
+ sizeof(T))>::value>::value
132
+ };
133
+ };
134
+
135
+ template<class T>
136
+ struct Tuning<sm52,T>
137
+ {
138
+ const static int INPUT_SIZE = sizeof(T);
139
+ enum
140
+ {
141
+ NOMINAL_4B_ITEMS_PER_THREAD = 11,
142
+ //
143
+ ITEMS_PER_THREAD = items_per_thread<T,
144
+ NOMINAL_4B_ITEMS_PER_THREAD>::value
145
+ };
146
+
147
+ typedef PtxPolicy<64,
148
+ ITEMS_PER_THREAD,
149
+ cub::BLOCK_LOAD_WARP_TRANSPOSE,
150
+ cub::LOAD_LDG,
151
+ cub::BLOCK_SCAN_WARP_SCANS>
152
+ type;
153
+ }; // Tuning for sm52
154
+
155
+
156
+ template <class T>
157
+ struct Tuning<sm35, T>
158
+ {
159
+ const static int INPUT_SIZE = sizeof(T);
160
+ enum
161
+ {
162
+ NOMINAL_4B_ITEMS_PER_THREAD = 9,
163
+ //
164
+ ITEMS_PER_THREAD = items_per_thread<T,
165
+ NOMINAL_4B_ITEMS_PER_THREAD>::value
166
+ };
167
+
168
+ typedef PtxPolicy<128,
169
+ ITEMS_PER_THREAD,
170
+ cub::BLOCK_LOAD_WARP_TRANSPOSE,
171
+ cub::LOAD_LDG,
172
+ cub::BLOCK_SCAN_WARP_SCANS>
173
+ type;
174
+ }; // Tuning for sm35
175
+
176
+ template<class T>
177
+ struct Tuning<sm30,T>
178
+ {
179
+ const static int INPUT_SIZE = sizeof(T);
180
+ enum
181
+ {
182
+ NOMINAL_4B_ITEMS_PER_THREAD = 7,
183
+ //
184
+ ITEMS_PER_THREAD = items_per_thread<T,
185
+ NOMINAL_4B_ITEMS_PER_THREAD>::value
186
+ };
187
+
188
+ typedef PtxPolicy<128,
189
+ ITEMS_PER_THREAD,
190
+ cub::BLOCK_LOAD_WARP_TRANSPOSE,
191
+ cub::LOAD_DEFAULT,
192
+ cub::BLOCK_SCAN_WARP_SCANS>
193
+ type;
194
+ }; // Tuning for sm30
195
+
196
+ template <class ItemsIt,
197
+ class ItemsOutputIt,
198
+ class BinaryPred,
199
+ class Size,
200
+ class NumSelectedOutIt>
201
+ struct UniqueAgent
202
+ {
203
+ typedef typename iterator_traits<ItemsIt>::value_type item_type;
204
+
205
+ typedef cub::ScanTileState<Size> ScanTileState;
206
+
207
+ template <class Arch>
208
+ struct PtxPlan : Tuning<Arch, item_type>::type
209
+ {
210
+ typedef Tuning<Arch, item_type> tuning;
211
+
212
+ typedef typename core::LoadIterator<PtxPlan, ItemsIt>::type ItemsLoadIt;
213
+
214
+ typedef typename core::BlockLoad<PtxPlan, ItemsLoadIt>::type BlockLoadItems;
215
+
216
+ typedef cub::BlockDiscontinuity<item_type,
217
+ PtxPlan::BLOCK_THREADS,
218
+ 1,
219
+ 1,
220
+ Arch::ver>
221
+ BlockDiscontinuityItems;
222
+
223
+ typedef cub::TilePrefixCallbackOp<Size,
224
+ cub::Sum,
225
+ ScanTileState,
226
+ Arch::ver>
227
+ TilePrefixCallback;
228
+ typedef cub::BlockScan<Size,
229
+ PtxPlan::BLOCK_THREADS,
230
+ PtxPlan::SCAN_ALGORITHM,
231
+ 1,
232
+ 1,
233
+ Arch::ver>
234
+ BlockScan;
235
+
236
+ typedef core::uninitialized_array<item_type, PtxPlan::ITEMS_PER_TILE>
237
+ shared_items_t;
238
+
239
+ union TempStorage
240
+ {
241
+ struct ScanStorage
242
+ {
243
+ typename BlockScan::TempStorage scan;
244
+ typename TilePrefixCallback::TempStorage prefix;
245
+ typename BlockDiscontinuityItems::TempStorage discontinuity;
246
+ } scan_storage;
247
+
248
+ typename BlockLoadItems::TempStorage load_items;
249
+ shared_items_t shared_items;
250
+
251
+ }; // union TempStorage
252
+ }; // struct PtxPlan
253
+
254
+ typedef typename core::specialize_plan_msvc10_war<PtxPlan>::type::type ptx_plan;
255
+
256
+ typedef typename ptx_plan::ItemsLoadIt ItemsLoadIt;
257
+ typedef typename ptx_plan::BlockLoadItems BlockLoadItems;
258
+ typedef typename ptx_plan::BlockDiscontinuityItems BlockDiscontinuityItems;
259
+ typedef typename ptx_plan::TilePrefixCallback TilePrefixCallback;
260
+ typedef typename ptx_plan::BlockScan BlockScan;
261
+ typedef typename ptx_plan::shared_items_t shared_items_t;
262
+ typedef typename ptx_plan::TempStorage TempStorage;
263
+
264
+ enum
265
+ {
266
+ BLOCK_THREADS = ptx_plan::BLOCK_THREADS,
267
+ ITEMS_PER_THREAD = ptx_plan::ITEMS_PER_THREAD,
268
+ ITEMS_PER_TILE = ptx_plan::ITEMS_PER_TILE
269
+ };
270
+
271
+ struct impl
272
+ {
273
+ //---------------------------------------------------------------------
274
+ // Per-thread fields
275
+ //---------------------------------------------------------------------
276
+
277
+ TempStorage & temp_storage;
278
+ ScanTileState & tile_state;
279
+ ItemsLoadIt items_in;
280
+ ItemsOutputIt items_out;
281
+ cub::InequalityWrapper<BinaryPred> predicate;
282
+ Size num_items;
283
+
284
+ //---------------------------------------------------------------------
285
+ // Utility functions
286
+ //---------------------------------------------------------------------
287
+
288
+ THRUST_DEVICE_FUNCTION
289
+ shared_items_t &get_shared()
290
+ {
291
+ return temp_storage.shared_items;
292
+ }
293
+
294
+ void THRUST_DEVICE_FUNCTION
295
+ scatter(item_type (&items)[ITEMS_PER_THREAD],
296
+ Size (&selection_flags)[ITEMS_PER_THREAD],
297
+ Size (&selection_indices)[ITEMS_PER_THREAD],
298
+ int /*num_tile_items*/,
299
+ int num_tile_selections,
300
+ Size num_selections_prefix,
301
+ Size /*num_selections*/)
302
+ {
303
+ using core::sync_threadblock;
304
+
305
+ #pragma unroll
306
+ for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
307
+ {
308
+ int local_scatter_offset = selection_indices[ITEM] -
309
+ num_selections_prefix;
310
+ if (selection_flags[ITEM])
311
+ {
312
+ get_shared()[local_scatter_offset] = items[ITEM];
313
+ }
314
+ }
315
+
316
+ sync_threadblock();
317
+
318
+ for (int item = threadIdx.x;
319
+ item < num_tile_selections;
320
+ item += BLOCK_THREADS)
321
+ {
322
+ items_out[num_selections_prefix + item] = get_shared()[item];
323
+ }
324
+
325
+ sync_threadblock();
326
+ }
327
+
328
+ //---------------------------------------------------------------------
329
+ // Tile processing
330
+ //---------------------------------------------------------------------
331
+
332
+ template <bool IS_LAST_TILE, bool IS_FIRST_TILE>
333
+ Size THRUST_DEVICE_FUNCTION
334
+ consume_tile_impl(int num_tile_items,
335
+ int tile_idx,
336
+ Size tile_base)
337
+ {
338
+ using core::sync_threadblock;
339
+ using core::uninitialized_array;
340
+
341
+ item_type items_loc[ITEMS_PER_THREAD];
342
+ Size selection_flags[ITEMS_PER_THREAD];
343
+ Size selection_idx[ITEMS_PER_THREAD];
344
+
345
+ if (IS_LAST_TILE)
346
+ {
347
+ BlockLoadItems(temp_storage.load_items)
348
+ .Load(items_in + tile_base,
349
+ items_loc,
350
+ num_tile_items,
351
+ *(items_in + tile_base));
352
+ }
353
+ else
354
+ {
355
+ BlockLoadItems(temp_storage.load_items)
356
+ .Load(items_in + tile_base, items_loc);
357
+ }
358
+
359
+
360
+ sync_threadblock();
361
+
362
+ if (IS_FIRST_TILE)
363
+ {
364
+ BlockDiscontinuityItems(temp_storage.scan_storage.discontinuity)
365
+ .FlagHeads(selection_flags, items_loc, predicate);
366
+ }
367
+ else
368
+ {
369
+ item_type tile_predecessor = items_in[tile_base - 1];
370
+ BlockDiscontinuityItems(temp_storage.scan_storage.discontinuity)
371
+ .FlagHeads(selection_flags, items_loc, predicate, tile_predecessor);
372
+ }
373
+
374
+ #pragma unroll
375
+ for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
376
+ {
377
+ // Set selection_flags for out-of-bounds items
378
+ if ((IS_LAST_TILE) &&
379
+ (Size(threadIdx.x * ITEMS_PER_THREAD) + ITEM >= num_tile_items))
380
+ selection_flags[ITEM] = 1;
381
+ }
382
+
383
+ sync_threadblock();
384
+
385
+ Size num_tile_selections = 0;
386
+ Size num_selections = 0;
387
+ Size num_selections_prefix = 0;
388
+ if (IS_FIRST_TILE)
389
+ {
390
+ BlockScan(temp_storage.scan_storage.scan)
391
+ .ExclusiveSum(selection_flags,
392
+ selection_idx,
393
+ num_tile_selections);
394
+
395
+ if (threadIdx.x == 0)
396
+ {
397
+ // Update tile status if this is not the last tile
398
+ if (!IS_LAST_TILE)
399
+ tile_state.SetInclusive(0, num_tile_selections);
400
+ }
401
+
402
+ // Do not count any out-of-bounds selections
403
+ if (IS_LAST_TILE)
404
+ {
405
+ int num_discount = ITEMS_PER_TILE - num_tile_items;
406
+ num_tile_selections -= num_discount;
407
+ }
408
+ num_selections = num_tile_selections;
409
+ }
410
+ else
411
+ {
412
+ TilePrefixCallback prefix_cb(tile_state,
413
+ temp_storage.scan_storage.prefix,
414
+ cub::Sum(),
415
+ tile_idx);
416
+ BlockScan(temp_storage.scan_storage.scan)
417
+ .ExclusiveSum(selection_flags,
418
+ selection_idx,
419
+ prefix_cb);
420
+
421
+ num_selections = prefix_cb.GetInclusivePrefix();
422
+ num_tile_selections = prefix_cb.GetBlockAggregate();
423
+ num_selections_prefix = prefix_cb.GetExclusivePrefix();
424
+
425
+ if (IS_LAST_TILE)
426
+ {
427
+ int num_discount = ITEMS_PER_TILE - num_tile_items;
428
+ num_tile_selections -= num_discount;
429
+ num_selections -= num_discount;
430
+ }
431
+ }
432
+
433
+ sync_threadblock();
434
+
435
+ scatter(items_loc,
436
+ selection_flags,
437
+ selection_idx,
438
+ num_tile_items,
439
+ num_tile_selections,
440
+ num_selections_prefix,
441
+ num_selections);
442
+
443
+ return num_selections;
444
+ }
445
+
446
+
447
+ template <bool IS_LAST_TILE>
448
+ Size THRUST_DEVICE_FUNCTION
449
+ consume_tile(int num_tile_items,
450
+ int tile_idx,
451
+ Size tile_base)
452
+ {
453
+ if (tile_idx == 0)
454
+ {
455
+ return consume_tile_impl<IS_LAST_TILE, true>(num_tile_items,
456
+ tile_idx,
457
+ tile_base);
458
+ }
459
+ else
460
+ {
461
+ return consume_tile_impl<IS_LAST_TILE, false>(num_tile_items,
462
+ tile_idx,
463
+ tile_base);
464
+ }
465
+ }
466
+
467
+ //---------------------------------------------------------------------
468
+ // Constructor
469
+ //---------------------------------------------------------------------
470
+
471
+ THRUST_DEVICE_FUNCTION
472
+ impl(TempStorage & temp_storage_,
473
+ ScanTileState & tile_state_,
474
+ ItemsLoadIt items_in_,
475
+ ItemsOutputIt items_out_,
476
+ BinaryPred binary_pred_,
477
+ Size num_items_,
478
+ int num_tiles,
479
+ NumSelectedOutIt num_selected_out)
480
+ : temp_storage(temp_storage_),
481
+ tile_state(tile_state_),
482
+ items_in(items_in_),
483
+ items_out(items_out_),
484
+ predicate(binary_pred_),
485
+ num_items(num_items_)
486
+ {
487
+ int tile_idx = blockIdx.x;
488
+ Size tile_base = tile_idx * ITEMS_PER_TILE;
489
+
490
+ if (tile_idx < num_tiles - 1)
491
+ {
492
+ consume_tile<false>(ITEMS_PER_TILE,
493
+ tile_idx,
494
+ tile_base);
495
+ }
496
+ else
497
+ {
498
+ int num_remaining = static_cast<int>(num_items - tile_base);
499
+ Size num_selections = consume_tile<true>(num_remaining,
500
+ tile_idx,
501
+ tile_base);
502
+ if (threadIdx.x == 0)
503
+ {
504
+ *num_selected_out = num_selections;
505
+ }
506
+ }
507
+ }
508
+ }; // struct impl
509
+
510
+ //---------------------------------------------------------------------
511
+ // Agent entry point
512
+ //---------------------------------------------------------------------
513
+
514
+ THRUST_AGENT_ENTRY(ItemsIt items_in,
515
+ ItemsOutputIt items_out,
516
+ BinaryPred binary_pred,
517
+ NumSelectedOutIt num_selected_out,
518
+ Size num_items,
519
+ ScanTileState tile_state,
520
+ int num_tiles,
521
+ char * shmem)
522
+ {
523
+ TempStorage &storage = *reinterpret_cast<TempStorage *>(shmem);
524
+
525
+ impl(storage,
526
+ tile_state,
527
+ core::make_load_iterator(ptx_plan(), items_in),
528
+ items_out,
529
+ binary_pred,
530
+ num_items,
531
+ num_tiles,
532
+ num_selected_out);
533
+ }
534
+ }; // struct UniqueAgent
535
+
536
+ template <class ScanTileState,
537
+ class NumSelectedIt,
538
+ class Size>
539
+ struct InitAgent
540
+ {
541
+ template <class Arch>
542
+ struct PtxPlan : PtxPolicy<128> {};
543
+ typedef core::specialize_plan<PtxPlan> ptx_plan;
544
+
545
+ //---------------------------------------------------------------------
546
+ // Agent entry point
547
+ //---------------------------------------------------------------------
548
+
549
+ THRUST_AGENT_ENTRY(ScanTileState tile_state,
550
+ Size num_tiles,
551
+ NumSelectedIt num_selected_out,
552
+ char * /*shmem*/)
553
+ {
554
+ tile_state.InitializeStatus(num_tiles);
555
+ if (blockIdx.x == 0 && threadIdx.x == 0)
556
+ *num_selected_out = 0;
557
+ }
558
+
559
+ }; // struct InitAgent
560
+
561
+ template <class ItemsInputIt,
562
+ class ItemsOutputIt,
563
+ class BinaryPred,
564
+ class Size,
565
+ class NumSelectedOutIt>
566
+ static cudaError_t THRUST_RUNTIME_FUNCTION
567
+ doit_step(void * d_temp_storage,
568
+ size_t & temp_storage_bytes,
569
+ ItemsInputIt items_in,
570
+ ItemsOutputIt items_out,
571
+ BinaryPred binary_pred,
572
+ NumSelectedOutIt num_selected_out,
573
+ Size num_items,
574
+ cudaStream_t stream)
575
+ {
576
+ using core::AgentLauncher;
577
+ using core::AgentPlan;
578
+ using core::get_agent_plan;
579
+
580
+ typedef AgentLauncher<
581
+ UniqueAgent<ItemsInputIt,
582
+ ItemsOutputIt,
583
+ BinaryPred,
584
+ Size,
585
+ NumSelectedOutIt> >
586
+ unique_agent;
587
+
588
+ typedef typename unique_agent::ScanTileState ScanTileState;
589
+
590
+ typedef AgentLauncher<
591
+ InitAgent<ScanTileState, NumSelectedOutIt, Size> >
592
+ init_agent;
593
+
594
+ using core::get_plan;
595
+ typename get_plan<init_agent>::type init_plan = init_agent::get_plan();
596
+ typename get_plan<unique_agent>::type unique_plan = unique_agent::get_plan(stream);
597
+
598
+
599
+ int tile_size = unique_plan.items_per_tile;
600
+ size_t num_tiles = cub::DivideAndRoundUp(num_items, tile_size);
601
+
602
+ size_t vshmem_size = core::vshmem_size(unique_plan.shared_memory_size,
603
+ num_tiles);
604
+
605
+ cudaError_t status = cudaSuccess;
606
+ size_t allocation_sizes[2] = {0, vshmem_size};
607
+ status = ScanTileState::AllocationSize(static_cast<int>(num_tiles), allocation_sizes[0]);
608
+ CUDA_CUB_RET_IF_FAIL(status);
609
+
610
+ void *allocations[2] = {NULL, NULL};
611
+ //
612
+ status = cub::AliasTemporaries(d_temp_storage,
613
+ temp_storage_bytes,
614
+ allocations,
615
+ allocation_sizes);
616
+ CUDA_CUB_RET_IF_FAIL(status);
617
+
618
+ if (d_temp_storage == NULL)
619
+ {
620
+ return status;
621
+ }
622
+
623
+ ScanTileState tile_status;
624
+ status = tile_status.Init(static_cast<int>(num_tiles), allocations[0], allocation_sizes[0]);
625
+ CUDA_CUB_RET_IF_FAIL(status);
626
+
627
+ num_tiles = max<size_t>(1,num_tiles);
628
+ init_agent ia(init_plan, num_tiles, stream, "unique_by_key::init_agent");
629
+ ia.launch(tile_status, num_tiles, num_selected_out);
630
+ CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
631
+
632
+ if (num_items == 0) { return status; }
633
+
634
+ char *vshmem_ptr = vshmem_size > 0 ? (char *)allocations[1] : NULL;
635
+
636
+ unique_agent ua(unique_plan, num_items, stream, vshmem_ptr, "unique_by_key::unique_agent");
637
+ ua.launch(items_in,
638
+ items_out,
639
+ binary_pred,
640
+ num_selected_out,
641
+ num_items,
642
+ tile_status,
643
+ num_tiles);
644
+ CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
645
+ return status;
646
+ }
647
+
648
+ template <typename Derived,
649
+ typename ItemsInputIt,
650
+ typename ItemsOutputIt,
651
+ typename BinaryPred>
652
+ THRUST_RUNTIME_FUNCTION
653
+ ItemsOutputIt unique(execution_policy<Derived>& policy,
654
+ ItemsInputIt items_first,
655
+ ItemsInputIt items_last,
656
+ ItemsOutputIt items_result,
657
+ BinaryPred binary_pred)
658
+ {
659
+ // typedef typename iterator_traits<ItemsInputIt>::difference_type size_type;
660
+ typedef int size_type;
661
+
662
+ size_type num_items = static_cast<size_type>(thrust::distance(items_first, items_last));
663
+ size_t temp_storage_bytes = 0;
664
+ cudaStream_t stream = cuda_cub::stream(policy);
665
+
666
+ cudaError_t status;
667
+ status = doit_step(NULL,
668
+ temp_storage_bytes,
669
+ items_first,
670
+ items_result,
671
+ binary_pred,
672
+ reinterpret_cast<size_type*>(NULL),
673
+ num_items,
674
+ stream);
675
+ cuda_cub::throw_on_error(status, "unique: failed on 1st step");
676
+
677
+ size_t allocation_sizes[2] = {sizeof(size_type), temp_storage_bytes};
678
+ void * allocations[2] = {NULL, NULL};
679
+
680
+ size_t storage_size = 0;
681
+ status = core::alias_storage(NULL,
682
+ storage_size,
683
+ allocations,
684
+ allocation_sizes);
685
+ cuda_cub::throw_on_error(status, "unique: failed on 1st step");
686
+
687
+ // Allocate temporary storage.
688
+ thrust::detail::temporary_array<thrust::detail::uint8_t, Derived>
689
+ tmp(policy, storage_size);
690
+ void *ptr = static_cast<void*>(tmp.data().get());
691
+
692
+ status = core::alias_storage(ptr,
693
+ storage_size,
694
+ allocations,
695
+ allocation_sizes);
696
+ cuda_cub::throw_on_error(status, "unique: failed on 2nd step");
697
+
698
+ size_type* d_num_selected_out
699
+ = thrust::detail::aligned_reinterpret_cast<size_type*>(allocations[0]);
700
+
701
+ status = doit_step(allocations[1],
702
+ temp_storage_bytes,
703
+ items_first,
704
+ items_result,
705
+ binary_pred,
706
+ d_num_selected_out,
707
+ num_items,
708
+ stream);
709
+ cuda_cub::throw_on_error(status, "unique: failed on 2nd step");
710
+
711
+ status = cuda_cub::synchronize(policy);
712
+ cuda_cub::throw_on_error(status, "unique: failed to synchronize");
713
+
714
+ size_type num_selected = get_value(policy, d_num_selected_out);
715
+
716
+ return items_result + num_selected;
717
+ }
718
+ } // namespace __unique
719
+
720
+ //-------------------------
721
+ // Thrust API entry points
722
+ //-------------------------
723
+
724
+ __thrust_exec_check_disable__
725
+ template <class Derived,
726
+ class InputIt,
727
+ class OutputIt,
728
+ class BinaryPred>
729
+ OutputIt __host__ __device__
730
+ unique_copy(execution_policy<Derived> &policy,
731
+ InputIt first,
732
+ InputIt last,
733
+ OutputIt result,
734
+ BinaryPred binary_pred)
735
+ {
736
+ THRUST_CDP_DISPATCH(
737
+ (result = __unique::unique(policy, first, last, result, binary_pred);),
738
+ (result = thrust::unique_copy(cvt_to_seq(derived_cast(policy)),
739
+ first,
740
+ last,
741
+ result,
742
+ binary_pred);));
743
+ return result;
744
+ }
745
+
746
+ template <class Derived,
747
+ class InputIt,
748
+ class OutputIt>
749
+ OutputIt __host__ __device__
750
+ unique_copy(execution_policy<Derived> &policy,
751
+ InputIt first,
752
+ InputIt last,
753
+ OutputIt result)
754
+ {
755
+ typedef typename iterator_traits<InputIt>::value_type input_type;
756
+ return cuda_cub::unique_copy(policy, first, last, result, equal_to<input_type>());
757
+ }
758
+
759
+
760
+
761
+ __thrust_exec_check_disable__
762
+ template <class Derived,
763
+ class ForwardIt,
764
+ class BinaryPred>
765
+ ForwardIt __host__ __device__
766
+ unique(execution_policy<Derived> &policy,
767
+ ForwardIt first,
768
+ ForwardIt last,
769
+ BinaryPred binary_pred)
770
+ {
771
+ ForwardIt ret = first;
772
+ THRUST_CDP_DISPATCH(
773
+ (ret = cuda_cub::unique_copy(policy, first, last, first, binary_pred);),
774
+ (ret = thrust::unique(cvt_to_seq(derived_cast(policy)),
775
+ first,
776
+ last,
777
+ binary_pred);));
778
+ return ret;
779
+ }
780
+
781
+ template <class Derived,
782
+ class ForwardIt>
783
+ ForwardIt __host__ __device__
784
+ unique(execution_policy<Derived> &policy,
785
+ ForwardIt first,
786
+ ForwardIt last)
787
+ {
788
+ typedef typename iterator_traits<ForwardIt>::value_type input_type;
789
+ return cuda_cub::unique(policy, first, last, equal_to<input_type>());
790
+ }
791
+
792
+
793
+ template <typename BinaryPred>
794
+ struct zip_adj_not_predicate {
795
+ template <typename TupleType>
796
+ bool __host__ __device__ operator()(TupleType&& tuple) {
797
+ return !binary_pred(thrust::get<0>(tuple), thrust::get<1>(tuple));
798
+ }
799
+
800
+ BinaryPred binary_pred;
801
+ };
802
+
803
+
804
+ __thrust_exec_check_disable__
805
+ template <class Derived,
806
+ class ForwardIt,
807
+ class BinaryPred>
808
+ typename thrust::iterator_traits<ForwardIt>::difference_type
809
+ __host__ __device__
810
+ unique_count(execution_policy<Derived> &policy,
811
+ ForwardIt first,
812
+ ForwardIt last,
813
+ BinaryPred binary_pred)
814
+ {
815
+ if (first == last) {
816
+ return 0;
817
+ }
818
+ auto size = thrust::distance(first, last);
819
+ auto it = thrust::make_zip_iterator(thrust::make_tuple(first, thrust::next(first)));
820
+ return 1 + thrust::count_if(policy, it, thrust::next(it, size - 1), zip_adj_not_predicate<BinaryPred>{binary_pred});
821
+ }
822
+
823
+ } // namespace cuda_cub
824
+ THRUST_NAMESPACE_END
825
+
826
+ //
827
+ #include <thrust/memory.h>
828
+ #include <thrust/unique.h>
829
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/unique_by_key.h ADDED
@@ -0,0 +1,927 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
40
+
41
+ #include <thrust/detail/alignment.h>
42
+ #include <thrust/detail/cstdint.h>
43
+ #include <thrust/detail/temporary_array.h>
44
+ #include <thrust/detail/minmax.h>
45
+ #include <thrust/detail/mpl/math.h>
46
+ #include <thrust/distance.h>
47
+ #include <thrust/functional.h>
48
+ #include <thrust/pair.h>
49
+ #include <thrust/system/cuda/config.h>
50
+ #include <thrust/system/cuda/detail/cdp_dispatch.h>
51
+ #include <thrust/system/cuda/detail/core/agent_launcher.h>
52
+ #include <thrust/system/cuda/detail/get_value.h>
53
+ #include <thrust/system/cuda/detail/par_to_seq.h>
54
+ #include <thrust/system/cuda/detail/util.h>
55
+
56
+ #include <cub/device/device_select.cuh>
57
+ #include <cub/util_math.cuh>
58
+
59
+ THRUST_NAMESPACE_BEGIN
60
+
61
+ template <typename DerivedPolicy,
62
+ typename ForwardIterator1,
63
+ typename ForwardIterator2>
64
+ __host__ __device__ thrust::pair<ForwardIterator1, ForwardIterator2>
65
+ unique_by_key(
66
+ const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
67
+ ForwardIterator1 keys_first,
68
+ ForwardIterator1 keys_last,
69
+ ForwardIterator2 values_first);
70
+ template <typename DerivedPolicy,
71
+ typename InputIterator1,
72
+ typename InputIterator2,
73
+ typename OutputIterator1,
74
+ typename OutputIterator2>
75
+ __host__ __device__ thrust::pair<OutputIterator1, OutputIterator2>
76
+ unique_by_key_copy(
77
+ const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
78
+ InputIterator1 keys_first,
79
+ InputIterator1 keys_last,
80
+ InputIterator2 values_first,
81
+ OutputIterator1 keys_result,
82
+ OutputIterator2 values_result);
83
+
84
+
85
+ namespace cuda_cub {
86
+
87
+ // XXX it should be possible to unify unique & unique_by_key into a single
88
+ // agent with various specializations, similar to what is done
89
+ // with partition
90
+ namespace __unique_by_key {
91
+
92
+ template <int _BLOCK_THREADS,
93
+ int _ITEMS_PER_THREAD = 1,
94
+ cub::BlockLoadAlgorithm _LOAD_ALGORITHM = cub::BLOCK_LOAD_DIRECT,
95
+ cub::CacheLoadModifier _LOAD_MODIFIER = cub::LOAD_LDG,
96
+ cub::BlockScanAlgorithm _SCAN_ALGORITHM = cub::BLOCK_SCAN_WARP_SCANS>
97
+ struct PtxPolicy
98
+ {
99
+ enum
100
+ {
101
+ BLOCK_THREADS = _BLOCK_THREADS,
102
+ ITEMS_PER_THREAD = _ITEMS_PER_THREAD,
103
+ ITEMS_PER_TILE = _BLOCK_THREADS * _ITEMS_PER_THREAD,
104
+ };
105
+ static const cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM;
106
+ static const cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER;
107
+ static const cub::BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM;
108
+ }; // struct PtxPolicy
109
+
110
+ template<class,class>
111
+ struct Tuning;
112
+
113
+ namespace mpl = thrust::detail::mpl::math;
114
+
115
+ template<class T, size_t NOMINAL_4B_ITEMS_PER_THREAD>
116
+ struct items_per_thread
117
+ {
118
+ enum
119
+ {
120
+ value = mpl::min<
121
+ int,
122
+ static_cast<int>(NOMINAL_4B_ITEMS_PER_THREAD),
123
+ mpl::max<int,
124
+ 1,
125
+ static_cast<int>(NOMINAL_4B_ITEMS_PER_THREAD * 4 /
126
+ sizeof(T))>::value>::value
127
+ };
128
+ };
129
+
130
+
131
+ template<class T>
132
+ struct Tuning<sm52,T>
133
+ {
134
+ const static int INPUT_SIZE = sizeof(T);
135
+ enum
136
+ {
137
+ NOMINAL_4B_ITEMS_PER_THREAD = 11,
138
+ //
139
+ ITEMS_PER_THREAD = items_per_thread<T,
140
+ NOMINAL_4B_ITEMS_PER_THREAD>::value
141
+ };
142
+
143
+ typedef PtxPolicy<64,
144
+ ITEMS_PER_THREAD,
145
+ cub::BLOCK_LOAD_WARP_TRANSPOSE,
146
+ cub::LOAD_LDG,
147
+ cub::BLOCK_SCAN_WARP_SCANS>
148
+ type;
149
+ }; // Tuning for sm52
150
+
151
+ template<class T>
152
+ struct Tuning<sm35,T>
153
+ {
154
+ const static int INPUT_SIZE = sizeof(T);
155
+ enum
156
+ {
157
+ NOMINAL_4B_ITEMS_PER_THREAD = 9,
158
+ //
159
+ ITEMS_PER_THREAD = items_per_thread<T,
160
+ NOMINAL_4B_ITEMS_PER_THREAD>::value
161
+ };
162
+
163
+ typedef PtxPolicy<128,
164
+ ITEMS_PER_THREAD,
165
+ cub::BLOCK_LOAD_WARP_TRANSPOSE,
166
+ cub::LOAD_LDG,
167
+ cub::BLOCK_SCAN_WARP_SCANS>
168
+ type;
169
+ }; // Tuning for sm35
170
+
171
+ template<class T>
172
+ struct Tuning<sm30,T>
173
+ {
174
+ const static int INPUT_SIZE = sizeof(T);
175
+ enum
176
+ {
177
+ NOMINAL_4B_ITEMS_PER_THREAD = 7,
178
+ //
179
+ ITEMS_PER_THREAD = items_per_thread<T,
180
+ NOMINAL_4B_ITEMS_PER_THREAD>::value
181
+ };
182
+
183
+ typedef PtxPolicy<128,
184
+ ITEMS_PER_THREAD,
185
+ cub::BLOCK_LOAD_WARP_TRANSPOSE,
186
+ cub::LOAD_DEFAULT,
187
+ cub::BLOCK_SCAN_WARP_SCANS>
188
+ type;
189
+ }; // Tuning for sm30
190
+
191
+ template <class KeyInputIt,
192
+ class ValInputIt,
193
+ class KeyOutputIt,
194
+ class ValOutputIt,
195
+ class BinaryPred,
196
+ class Size,
197
+ class NumSelectedOutIt>
198
+ struct UniqueByKeyAgent
199
+ {
200
+ typedef typename iterator_traits<KeyInputIt>::value_type key_type;
201
+ typedef typename iterator_traits<ValInputIt>::value_type value_type;
202
+
203
+ typedef cub::ScanTileState<Size> ScanTileState;
204
+
205
+ template <class Arch>
206
+ struct PtxPlan : Tuning<Arch, key_type>::type
207
+ {
208
+ typedef Tuning<Arch, key_type> tuning;
209
+
210
+ typedef typename core::LoadIterator<PtxPlan, KeyInputIt>::type KeyLoadIt;
211
+ typedef typename core::LoadIterator<PtxPlan, ValInputIt>::type ValLoadIt;
212
+
213
+ typedef typename core::BlockLoad<PtxPlan, KeyLoadIt>::type BlockLoadKeys;
214
+ typedef typename core::BlockLoad<PtxPlan, ValLoadIt>::type BlockLoadValues;
215
+
216
+ typedef cub::BlockDiscontinuity<key_type,
217
+ PtxPlan::BLOCK_THREADS,
218
+ 1,
219
+ 1,
220
+ Arch::ver>
221
+ BlockDiscontinuityKeys;
222
+
223
+ typedef cub::TilePrefixCallbackOp<Size,
224
+ cub::Sum,
225
+ ScanTileState,
226
+ Arch::ver>
227
+ TilePrefixCallback;
228
+ typedef cub::BlockScan<Size,
229
+ PtxPlan::BLOCK_THREADS,
230
+ PtxPlan::SCAN_ALGORITHM,
231
+ 1,
232
+ 1,
233
+ Arch::ver>
234
+ BlockScan;
235
+
236
+ typedef core::uninitialized_array<key_type, PtxPlan::ITEMS_PER_TILE>
237
+ shared_keys_t;
238
+ typedef core::uninitialized_array<value_type, PtxPlan::ITEMS_PER_TILE>
239
+ shared_values_t;
240
+
241
+ union TempStorage
242
+ {
243
+ struct ScanStorage
244
+ {
245
+ typename BlockScan::TempStorage scan;
246
+ typename TilePrefixCallback::TempStorage prefix;
247
+ typename BlockDiscontinuityKeys::TempStorage discontinuity;
248
+ } scan_storage;
249
+
250
+ typename BlockLoadKeys::TempStorage load_keys;
251
+ typename BlockLoadValues::TempStorage load_values;
252
+
253
+ shared_keys_t shared_keys;
254
+ shared_values_t shared_values;
255
+ }; // union TempStorage
256
+ }; // struct PtxPlan
257
+
258
+ typedef typename core::specialize_plan_msvc10_war<PtxPlan>::type::type ptx_plan;
259
+
260
+ typedef typename ptx_plan::KeyLoadIt KeyLoadIt;
261
+ typedef typename ptx_plan::ValLoadIt ValLoadIt;
262
+ typedef typename ptx_plan::BlockLoadKeys BlockLoadKeys;
263
+ typedef typename ptx_plan::BlockLoadValues BlockLoadValues;
264
+ typedef typename ptx_plan::BlockDiscontinuityKeys BlockDiscontinuityKeys;
265
+ typedef typename ptx_plan::TilePrefixCallback TilePrefixCallback;
266
+ typedef typename ptx_plan::BlockScan BlockScan;
267
+ typedef typename ptx_plan::TempStorage TempStorage;
268
+ typedef typename ptx_plan::shared_keys_t shared_keys_t;
269
+ typedef typename ptx_plan::shared_values_t shared_values_t;
270
+
271
+ enum
272
+ {
273
+ BLOCK_THREADS = ptx_plan::BLOCK_THREADS,
274
+ ITEMS_PER_THREAD = ptx_plan::ITEMS_PER_THREAD,
275
+ ITEMS_PER_TILE = ptx_plan::ITEMS_PER_TILE
276
+ };
277
+
278
+ struct impl
279
+ {
280
+ //---------------------------------------------------------------------
281
+ // Per-thread fields
282
+ //---------------------------------------------------------------------
283
+
284
+ TempStorage & temp_storage;
285
+ ScanTileState & tile_state;
286
+ KeyLoadIt keys_in;
287
+ ValLoadIt values_in;
288
+ KeyOutputIt keys_out;
289
+ ValOutputIt values_out;
290
+ cub::InequalityWrapper<BinaryPred> predicate;
291
+ Size num_items;
292
+
293
+ //---------------------------------------------------------------------
294
+ // Utility functions
295
+ //---------------------------------------------------------------------
296
+
297
+ struct key_tag {};
298
+ struct value_tag {};
299
+
300
+ THRUST_DEVICE_FUNCTION
301
+ shared_keys_t &get_shared(key_tag)
302
+ {
303
+ return temp_storage.shared_keys;
304
+ }
305
+ THRUST_DEVICE_FUNCTION
306
+ shared_values_t &get_shared(value_tag)
307
+ {
308
+ return temp_storage.shared_values;
309
+ }
310
+
311
+
312
+ template <class Tag,
313
+ class OutputIt,
314
+ class T>
315
+ void THRUST_DEVICE_FUNCTION
316
+ scatter(Tag tag,
317
+ OutputIt items_out,
318
+ T (&items)[ITEMS_PER_THREAD],
319
+ Size (&selection_flags)[ITEMS_PER_THREAD],
320
+ Size (&selection_indices)[ITEMS_PER_THREAD],
321
+ int /*num_tile_items*/,
322
+ int num_tile_selections,
323
+ Size num_selections_prefix,
324
+ Size /*num_selections*/)
325
+ {
326
+ using core::sync_threadblock;
327
+
328
+ #pragma unroll
329
+ for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
330
+ {
331
+ int local_scatter_offset = selection_indices[ITEM] -
332
+ num_selections_prefix;
333
+ if (selection_flags[ITEM])
334
+ {
335
+ get_shared(tag)[local_scatter_offset] = items[ITEM];
336
+ }
337
+ }
338
+
339
+ sync_threadblock();
340
+
341
+ for (int item = threadIdx.x;
342
+ item < num_tile_selections;
343
+ item += BLOCK_THREADS)
344
+ {
345
+ items_out[num_selections_prefix + item] = get_shared(tag)[item];
346
+ }
347
+
348
+ sync_threadblock();
349
+ }
350
+
351
+ //---------------------------------------------------------------------
352
+ // Tile processing
353
+ //---------------------------------------------------------------------
354
+
355
+ template <bool IS_LAST_TILE, bool IS_FIRST_TILE>
356
+ Size THRUST_DEVICE_FUNCTION
357
+ consume_tile_impl(int num_tile_items,
358
+ int tile_idx,
359
+ Size tile_base)
360
+ {
361
+ using core::sync_threadblock;
362
+
363
+ key_type keys[ITEMS_PER_THREAD];
364
+ Size selection_flags[ITEMS_PER_THREAD];
365
+ Size selection_idx[ITEMS_PER_THREAD];
366
+
367
+ if (IS_LAST_TILE)
368
+ {
369
+ // Fill last elements with the first element
370
+ // because collectives are not suffix guarded
371
+ BlockLoadKeys(temp_storage.load_keys)
372
+ .Load(keys_in + tile_base,
373
+ keys,
374
+ num_tile_items,
375
+ *(keys_in + tile_base));
376
+ }
377
+ else
378
+ {
379
+ BlockLoadKeys(temp_storage.load_keys).Load(keys_in + tile_base, keys);
380
+ }
381
+
382
+
383
+ sync_threadblock();
384
+
385
+ value_type values[ITEMS_PER_THREAD];
386
+ if (IS_LAST_TILE)
387
+ {
388
+ // Fill last elements with the first element
389
+ // because collectives are not suffix guarded
390
+ BlockLoadValues(temp_storage.load_values)
391
+ .Load(values_in + tile_base,
392
+ values,
393
+ num_tile_items,
394
+ *(values_in + tile_base));
395
+ }
396
+ else
397
+ {
398
+ BlockLoadValues(temp_storage.load_values)
399
+ .Load(values_in + tile_base, values);
400
+ }
401
+
402
+ sync_threadblock();
403
+
404
+ if (IS_FIRST_TILE)
405
+ {
406
+ BlockDiscontinuityKeys(temp_storage.scan_storage.discontinuity)
407
+ .FlagHeads(selection_flags, keys, predicate);
408
+ }
409
+ else
410
+ {
411
+ key_type tile_predecessor = keys_in[tile_base - 1];
412
+ BlockDiscontinuityKeys(temp_storage.scan_storage.discontinuity)
413
+ .FlagHeads(selection_flags, keys, predicate, tile_predecessor);
414
+ }
415
+ #pragma unroll
416
+ for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
417
+ {
418
+ // Set selection_flags for out-of-bounds items
419
+ if ((IS_LAST_TILE) && (Size(threadIdx.x * ITEMS_PER_THREAD) + ITEM >= num_tile_items))
420
+ selection_flags[ITEM] = 1;
421
+ }
422
+
423
+ sync_threadblock();
424
+
425
+
426
+ Size num_tile_selections = 0;
427
+ Size num_selections = 0;
428
+ Size num_selections_prefix = 0;
429
+ if (IS_FIRST_TILE)
430
+ {
431
+ BlockScan(temp_storage.scan_storage.scan)
432
+ .ExclusiveSum(selection_flags,
433
+ selection_idx,
434
+ num_tile_selections);
435
+
436
+ if (threadIdx.x == 0)
437
+ {
438
+ // Update tile status if this is not the last tile
439
+ if (!IS_LAST_TILE)
440
+ tile_state.SetInclusive(0, num_tile_selections);
441
+ }
442
+
443
+ // Do not count any out-of-bounds selections
444
+ if (IS_LAST_TILE)
445
+ {
446
+ int num_discount = ITEMS_PER_TILE - num_tile_items;
447
+ num_tile_selections -= num_discount;
448
+ }
449
+ num_selections = num_tile_selections;
450
+ }
451
+ else
452
+ {
453
+ TilePrefixCallback prefix_cb(tile_state,
454
+ temp_storage.scan_storage.prefix,
455
+ cub::Sum(),
456
+ tile_idx);
457
+ BlockScan(temp_storage.scan_storage.scan)
458
+ .ExclusiveSum(selection_flags,
459
+ selection_idx,
460
+ prefix_cb);
461
+
462
+ num_selections = prefix_cb.GetInclusivePrefix();
463
+ num_tile_selections = prefix_cb.GetBlockAggregate();
464
+ num_selections_prefix = prefix_cb.GetExclusivePrefix();
465
+
466
+ if (IS_LAST_TILE)
467
+ {
468
+ int num_discount = ITEMS_PER_TILE - num_tile_items;
469
+ num_tile_selections -= num_discount;
470
+ num_selections -= num_discount;
471
+ }
472
+ }
473
+
474
+ sync_threadblock();
475
+
476
+ scatter(key_tag(),
477
+ keys_out,
478
+ keys,
479
+ selection_flags,
480
+ selection_idx,
481
+ num_tile_items,
482
+ num_tile_selections,
483
+ num_selections_prefix,
484
+ num_selections);
485
+
486
+ sync_threadblock();
487
+
488
+ scatter(value_tag(),
489
+ values_out,
490
+ values,
491
+ selection_flags,
492
+ selection_idx,
493
+ num_tile_items,
494
+ num_tile_selections,
495
+ num_selections_prefix,
496
+ num_selections);
497
+
498
+ return num_selections;
499
+ }
500
+
501
+
502
+ template <bool IS_LAST_TILE>
503
+ Size THRUST_DEVICE_FUNCTION
504
+ consume_tile(int num_tile_items,
505
+ int tile_idx,
506
+ Size tile_base)
507
+ {
508
+ if (tile_idx == 0)
509
+ {
510
+ return consume_tile_impl<IS_LAST_TILE, true>(num_tile_items,
511
+ tile_idx,
512
+ tile_base);
513
+ }
514
+ else
515
+ {
516
+ return consume_tile_impl<IS_LAST_TILE, false>(num_tile_items,
517
+ tile_idx,
518
+ tile_base);
519
+ }
520
+ }
521
+
522
+ //---------------------------------------------------------------------
523
+ // Constructor
524
+ //---------------------------------------------------------------------
525
+
526
+ THRUST_DEVICE_FUNCTION
527
+ impl(TempStorage & temp_storage_,
528
+ ScanTileState & tile_state_,
529
+ KeyLoadIt keys_in_,
530
+ ValLoadIt values_in_,
531
+ KeyOutputIt keys_out_,
532
+ ValOutputIt values_out_,
533
+ BinaryPred binary_pred_,
534
+ Size num_items_,
535
+ int num_tiles,
536
+ NumSelectedOutIt num_selected_out)
537
+ // filed ctors
538
+ : temp_storage(temp_storage_),
539
+ tile_state(tile_state_),
540
+ keys_in(keys_in_),
541
+ values_in(values_in_),
542
+ keys_out(keys_out_),
543
+ values_out(values_out_),
544
+ predicate(binary_pred_),
545
+ num_items(num_items_)
546
+ {
547
+ int tile_idx = blockIdx.x;
548
+ Size tile_base = tile_idx * ITEMS_PER_TILE;
549
+
550
+ if (tile_idx < num_tiles - 1)
551
+ {
552
+ consume_tile<false>(ITEMS_PER_TILE,
553
+ tile_idx,
554
+ tile_base);
555
+ }
556
+ else
557
+ {
558
+ int num_remaining = static_cast<int>(num_items - tile_base);
559
+ Size num_selections = consume_tile<true>(num_remaining,
560
+ tile_idx,
561
+ tile_base);
562
+ if (threadIdx.x == 0)
563
+ {
564
+ *num_selected_out = num_selections;
565
+ }
566
+ }
567
+ }
568
+ }; // struct impl
569
+
570
+ //---------------------------------------------------------------------
571
+ // Agent entry point
572
+ //---------------------------------------------------------------------
573
+
574
+ THRUST_AGENT_ENTRY(KeyInputIt keys_in,
575
+ ValInputIt values_in,
576
+ KeyOutputIt keys_out,
577
+ ValOutputIt values_out,
578
+ BinaryPred binary_pred,
579
+ NumSelectedOutIt num_selected_out,
580
+ Size num_items,
581
+ ScanTileState tile_state,
582
+ int num_tiles,
583
+ char * shmem)
584
+ {
585
+ TempStorage &storage = *reinterpret_cast<TempStorage *>(shmem);
586
+
587
+ impl(storage,
588
+ tile_state,
589
+ core::make_load_iterator(ptx_plan(), keys_in),
590
+ core::make_load_iterator(ptx_plan(), values_in),
591
+ keys_out,
592
+ values_out,
593
+ binary_pred,
594
+ num_items,
595
+ num_tiles,
596
+ num_selected_out);
597
+ }
598
+ }; // struct UniqueByKeyAgent
599
+
600
+
601
+ template <class ScanTileState,
602
+ class NumSelectedIt,
603
+ class Size>
604
+ struct InitAgent
605
+ {
606
+ template <class Arch>
607
+ struct PtxPlan : PtxPolicy<128> {};
608
+
609
+ typedef core::specialize_plan<PtxPlan> ptx_plan;
610
+
611
+ //---------------------------------------------------------------------
612
+ // Agent entry point
613
+ //---------------------------------------------------------------------
614
+
615
+ THRUST_AGENT_ENTRY(ScanTileState tile_state,
616
+ Size num_tiles,
617
+ NumSelectedIt num_selected_out,
618
+ char * /*shmem*/)
619
+ {
620
+ tile_state.InitializeStatus(num_tiles);
621
+ if (blockIdx.x == 0 && threadIdx.x == 0)
622
+ *num_selected_out = 0;
623
+ }
624
+
625
+ }; // struct InitAgent
626
+
627
+
628
+ template <class KeyInputIt,
629
+ class ValInputIt,
630
+ class KeyOutputIt,
631
+ class ValOutputIt,
632
+ class BinaryPred,
633
+ class Size,
634
+ class NumSelectedOutIt>
635
+ static cudaError_t THRUST_RUNTIME_FUNCTION
636
+ doit_step(void * d_temp_storage,
637
+ size_t & temp_storage_bytes,
638
+ KeyInputIt keys_in,
639
+ ValInputIt values_in,
640
+ KeyOutputIt keys_out,
641
+ ValOutputIt values_out,
642
+ BinaryPred binary_pred,
643
+ NumSelectedOutIt num_selected_out,
644
+ Size num_items,
645
+ cudaStream_t stream)
646
+ {
647
+ using core::AgentLauncher;
648
+ using core::AgentPlan;
649
+ using core::get_agent_plan;
650
+
651
+ typedef AgentLauncher<
652
+ UniqueByKeyAgent<KeyInputIt,
653
+ ValInputIt,
654
+ KeyOutputIt,
655
+ ValOutputIt,
656
+ BinaryPred,
657
+ Size,
658
+ NumSelectedOutIt> >
659
+ unique_agent;
660
+
661
+ typedef typename unique_agent::ScanTileState ScanTileState;
662
+
663
+ typedef AgentLauncher<
664
+ InitAgent<ScanTileState, NumSelectedOutIt, Size> >
665
+ init_agent;
666
+
667
+ using core::get_plan;
668
+ typename get_plan<init_agent>::type init_plan = init_agent::get_plan();
669
+ typename get_plan<unique_agent>::type unique_plan = unique_agent::get_plan(stream);
670
+
671
+
672
+ int tile_size = unique_plan.items_per_tile;
673
+ size_t num_tiles = cub::DivideAndRoundUp(num_items, tile_size);
674
+
675
+ size_t vshmem_size = core::vshmem_size(unique_plan.shared_memory_size,
676
+ num_tiles);
677
+
678
+ cudaError_t status = cudaSuccess;
679
+ size_t allocation_sizes[2] = {0, vshmem_size};
680
+ status = ScanTileState::AllocationSize(static_cast<int>(num_tiles), allocation_sizes[0]);
681
+ CUDA_CUB_RET_IF_FAIL(status);
682
+
683
+ void *allocations[2] = {NULL, NULL};
684
+ //
685
+ status = cub::AliasTemporaries(d_temp_storage,
686
+ temp_storage_bytes,
687
+ allocations,
688
+ allocation_sizes);
689
+ CUDA_CUB_RET_IF_FAIL(status);
690
+
691
+ if (d_temp_storage == NULL)
692
+ {
693
+ return status;
694
+ }
695
+
696
+ ScanTileState tile_status;
697
+ status = tile_status.Init(static_cast<int>(num_tiles), allocations[0], allocation_sizes[0]);
698
+ CUDA_CUB_RET_IF_FAIL(status);
699
+
700
+ num_tiles = max<size_t>(1,num_tiles);
701
+ init_agent ia(init_plan, num_tiles, stream, "unique_by_key::init_agent");
702
+ ia.launch(tile_status, num_tiles, num_selected_out);
703
+ CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
704
+
705
+ if (num_items == 0) { return status; }
706
+
707
+ char *vshmem_ptr = vshmem_size > 0 ? (char *)allocations[1] : NULL;
708
+
709
+ unique_agent ua(unique_plan, num_items, stream, vshmem_ptr, "unique_by_key::unique_agent");
710
+ ua.launch(keys_in,
711
+ values_in,
712
+ keys_out,
713
+ values_out,
714
+ binary_pred,
715
+ num_selected_out,
716
+ num_items,
717
+ tile_status,
718
+ num_tiles);
719
+ CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError());
720
+ return status;
721
+ }
722
+
723
+ template <typename Derived,
724
+ typename KeyInputIt,
725
+ typename ValInputIt,
726
+ typename KeyOutputIt,
727
+ typename ValOutputIt,
728
+ typename BinaryPred>
729
+ THRUST_RUNTIME_FUNCTION
730
+ pair<KeyOutputIt, ValOutputIt>
731
+ unique_by_key(execution_policy<Derived>& policy,
732
+ KeyInputIt keys_first,
733
+ KeyInputIt keys_last,
734
+ ValInputIt values_first,
735
+ KeyOutputIt keys_result,
736
+ ValOutputIt values_result,
737
+ BinaryPred binary_pred)
738
+ {
739
+
740
+ typedef int size_type;
741
+
742
+ size_type num_items
743
+ = static_cast<size_type>(thrust::distance(keys_first, keys_last));
744
+
745
+ size_t temp_storage_bytes = 0;
746
+ cudaStream_t stream = cuda_cub::stream(policy);
747
+
748
+ cudaError_t status;
749
+ status = __unique_by_key::doit_step(NULL,
750
+ temp_storage_bytes,
751
+ keys_first,
752
+ values_first,
753
+ keys_result,
754
+ values_result,
755
+ binary_pred,
756
+ reinterpret_cast<size_type*>(NULL),
757
+ num_items,
758
+ stream);
759
+ cuda_cub::throw_on_error(status, "unique_by_key: failed on 1st step");
760
+
761
+ size_t allocation_sizes[2] = {sizeof(size_type), temp_storage_bytes};
762
+ void * allocations[2] = {NULL, NULL};
763
+
764
+ size_t storage_size = 0;
765
+ status = core::alias_storage(NULL,
766
+ storage_size,
767
+ allocations,
768
+ allocation_sizes);
769
+ cuda_cub::throw_on_error(status, "unique_by_key failed on 1st alias_storage");
770
+
771
+ // Allocate temporary storage.
772
+ thrust::detail::temporary_array<thrust::detail::uint8_t, Derived>
773
+ tmp(policy, storage_size);
774
+ void *ptr = static_cast<void*>(tmp.data().get());
775
+
776
+ status = core::alias_storage(ptr,
777
+ storage_size,
778
+ allocations,
779
+ allocation_sizes);
780
+ cuda_cub::throw_on_error(status, "unique_by_key failed on 2nd alias_storage");
781
+
782
+ size_type* d_num_selected_out
783
+ = thrust::detail::aligned_reinterpret_cast<size_type*>(allocations[0]);
784
+
785
+ status = __unique_by_key::doit_step(allocations[1],
786
+ temp_storage_bytes,
787
+ keys_first,
788
+ values_first,
789
+ keys_result,
790
+ values_result,
791
+ binary_pred,
792
+ d_num_selected_out,
793
+ num_items,
794
+ stream);
795
+ cuda_cub::throw_on_error(status, "unique_by_key: failed on 2nd step");
796
+
797
+ status = cuda_cub::synchronize(policy);
798
+ cuda_cub::throw_on_error(status, "unique_by_key: failed to synchronize");
799
+
800
+ size_type num_selected = get_value(policy, d_num_selected_out);
801
+
802
+ return thrust::make_pair(
803
+ keys_result + num_selected,
804
+ values_result + num_selected
805
+ );
806
+ }
807
+
808
+ } // namespace __unique_by_key
809
+
810
+
811
+ //-------------------------
812
+ // Thrust API entry points
813
+ //-------------------------
814
+
815
+
816
+ __thrust_exec_check_disable__
817
+ template <class Derived,
818
+ class KeyInputIt,
819
+ class ValInputIt,
820
+ class KeyOutputIt,
821
+ class ValOutputIt,
822
+ class BinaryPred>
823
+ pair<KeyOutputIt, ValOutputIt> __host__ __device__
824
+ unique_by_key_copy(execution_policy<Derived> &policy,
825
+ KeyInputIt keys_first,
826
+ KeyInputIt keys_last,
827
+ ValInputIt values_first,
828
+ KeyOutputIt keys_result,
829
+ ValOutputIt values_result,
830
+ BinaryPred binary_pred)
831
+ {
832
+ auto ret = thrust::make_pair(keys_result, values_result);
833
+ THRUST_CDP_DISPATCH(
834
+ (ret = __unique_by_key::unique_by_key(policy,
835
+ keys_first,
836
+ keys_last,
837
+ values_first,
838
+ keys_result,
839
+ values_result,
840
+ binary_pred);),
841
+ (ret = thrust::unique_by_key_copy(cvt_to_seq(derived_cast(policy)),
842
+ keys_first,
843
+ keys_last,
844
+ values_first,
845
+ keys_result,
846
+ values_result,
847
+ binary_pred);));
848
+ return ret;
849
+ }
850
+
851
+ template <class Derived,
852
+ class KeyInputIt,
853
+ class ValInputIt,
854
+ class KeyOutputIt,
855
+ class ValOutputIt>
856
+ pair<KeyOutputIt, ValOutputIt> __host__ __device__
857
+ unique_by_key_copy(execution_policy<Derived> &policy,
858
+ KeyInputIt keys_first,
859
+ KeyInputIt keys_last,
860
+ ValInputIt values_first,
861
+ KeyOutputIt keys_result,
862
+ ValOutputIt values_result)
863
+ {
864
+ typedef typename iterator_traits<KeyInputIt>::value_type key_type;
865
+ return cuda_cub::unique_by_key_copy(policy,
866
+ keys_first,
867
+ keys_last,
868
+ values_first,
869
+ keys_result,
870
+ values_result,
871
+ equal_to<key_type>());
872
+ }
873
+
874
+ template <class Derived,
875
+ class KeyInputIt,
876
+ class ValInputIt,
877
+ class BinaryPred>
878
+ pair<KeyInputIt, ValInputIt> __host__ __device__
879
+ unique_by_key(execution_policy<Derived> &policy,
880
+ KeyInputIt keys_first,
881
+ KeyInputIt keys_last,
882
+ ValInputIt values_first,
883
+ BinaryPred binary_pred)
884
+ {
885
+ auto ret = thrust::make_pair(keys_first, values_first);
886
+ THRUST_CDP_DISPATCH(
887
+ (ret = cuda_cub::unique_by_key_copy(policy,
888
+ keys_first,
889
+ keys_last,
890
+ values_first,
891
+ keys_first,
892
+ values_first,
893
+ binary_pred);),
894
+ (ret = thrust::unique_by_key(cvt_to_seq(derived_cast(policy)),
895
+ keys_first,
896
+ keys_last,
897
+ values_first,
898
+ binary_pred);));
899
+ return ret;
900
+ }
901
+
902
+ template <class Derived,
903
+ class KeyInputIt,
904
+ class ValInputIt>
905
+ pair<KeyInputIt, ValInputIt> __host__ __device__
906
+ unique_by_key(execution_policy<Derived> &policy,
907
+ KeyInputIt keys_first,
908
+ KeyInputIt keys_last,
909
+ ValInputIt values_first)
910
+ {
911
+ typedef typename iterator_traits<KeyInputIt>::value_type key_type;
912
+ return cuda_cub::unique_by_key(policy,
913
+ keys_first,
914
+ keys_last,
915
+ values_first,
916
+ equal_to<key_type>());
917
+ }
918
+
919
+
920
+
921
+ } // namespace cuda_cub
922
+ THRUST_NAMESPACE_END
923
+
924
+ #include <thrust/memory.h>
925
+ #include <thrust/unique.h>
926
+
927
+ #endif
miniCUDA124/include/thrust/system/cuda/detail/util.h ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights meserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
+ * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+ #pragma once
28
+
29
+ #include <thrust/detail/config.h>
30
+
31
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
32
+ # pragma GCC system_header
33
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
34
+ # pragma clang system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
36
+ # pragma system_header
37
+ #endif // no system header
38
+
39
+ #include <cstdio>
40
+ #include <exception>
41
+ #include <thrust/iterator/iterator_traits.h>
42
+ #include <thrust/system/cuda/detail/execution_policy.h>
43
+ #include <thrust/system_error.h>
44
+ #include <thrust/system/cuda/error.h>
45
+
46
+ #include <cub/detail/device_synchronize.cuh>
47
+ #include <cub/config.cuh>
48
+ #include <cub/util_device.cuh>
49
+
50
+ #include <nv/target>
51
+
52
+ THRUST_NAMESPACE_BEGIN
53
+ namespace cuda_cub {
54
+
55
+ inline __host__ __device__
56
+ cudaStream_t
57
+ default_stream()
58
+ {
59
+ #ifdef CUDA_API_PER_THREAD_DEFAULT_STREAM
60
+ return cudaStreamPerThread;
61
+ #else
62
+ return cudaStreamLegacy;
63
+ #endif
64
+ }
65
+
66
+ // Fallback implementation of the customization point.
67
+ template <class Derived>
68
+ __host__ __device__
69
+ cudaStream_t
70
+ get_stream(execution_policy<Derived> &)
71
+ {
72
+ return default_stream();
73
+ }
74
+
75
+ // Entry point/interface.
76
+ template <class Derived>
77
+ __host__ __device__ cudaStream_t
78
+ stream(execution_policy<Derived> &policy)
79
+ {
80
+ return get_stream(derived_cast(policy));
81
+ }
82
+
83
+
84
+ // Fallback implementation of the customization point.
85
+ template <class Derived>
86
+ __host__ __device__
87
+ bool
88
+ must_perform_optional_stream_synchronization(execution_policy<Derived> &)
89
+ {
90
+ return true;
91
+ }
92
+
93
+ // Entry point/interface.
94
+ template <class Derived>
95
+ __host__ __device__ bool
96
+ must_perform_optional_synchronization(execution_policy<Derived> &policy)
97
+ {
98
+ return must_perform_optional_stream_synchronization(derived_cast(policy));
99
+ }
100
+
101
+
102
+ // Fallback implementation of the customization point.
103
+ __thrust_exec_check_disable__
104
+ template <class Derived>
105
+ __host__ __device__
106
+ cudaError_t
107
+ synchronize_stream(execution_policy<Derived> &policy)
108
+ {
109
+ return cub::SyncStream(stream(policy));
110
+ }
111
+
112
+ // Entry point/interface.
113
+ template <class Policy>
114
+ __host__ __device__
115
+ cudaError_t
116
+ synchronize(Policy &policy)
117
+ {
118
+ return synchronize_stream(derived_cast(policy));
119
+ }
120
+
121
+ // Fallback implementation of the customization point.
122
+ __thrust_exec_check_disable__
123
+ template <class Derived>
124
+ __host__ __device__
125
+ cudaError_t
126
+ synchronize_stream_optional(execution_policy<Derived> &policy)
127
+ {
128
+ cudaError_t result;
129
+
130
+ if (must_perform_optional_synchronization(policy))
131
+ {
132
+ result = synchronize_stream(policy);
133
+ }
134
+ else
135
+ {
136
+ result = cudaSuccess;
137
+ }
138
+
139
+ return result;
140
+ }
141
+
142
+ // Entry point/interface.
143
+ template <class Policy>
144
+ __host__ __device__
145
+ cudaError_t
146
+ synchronize_optional(Policy &policy)
147
+ {
148
+ return synchronize_stream_optional(derived_cast(policy));
149
+ }
150
+
151
+ template <class Type>
152
+ THRUST_HOST_FUNCTION cudaError_t
153
+ trivial_copy_from_device(Type * dst,
154
+ Type const * src,
155
+ size_t count,
156
+ cudaStream_t stream)
157
+ {
158
+ cudaError status = cudaSuccess;
159
+ if (count == 0) return status;
160
+
161
+ status = ::cudaMemcpyAsync(dst,
162
+ src,
163
+ sizeof(Type) * count,
164
+ cudaMemcpyDeviceToHost,
165
+ stream);
166
+ cudaStreamSynchronize(stream);
167
+ return status;
168
+ }
169
+
170
+ template <class Type>
171
+ THRUST_HOST_FUNCTION cudaError_t
172
+ trivial_copy_to_device(Type * dst,
173
+ Type const * src,
174
+ size_t count,
175
+ cudaStream_t stream)
176
+ {
177
+ cudaError status = cudaSuccess;
178
+ if (count == 0) return status;
179
+
180
+ status = ::cudaMemcpyAsync(dst,
181
+ src,
182
+ sizeof(Type) * count,
183
+ cudaMemcpyHostToDevice,
184
+ stream);
185
+ cudaStreamSynchronize(stream);
186
+ return status;
187
+ }
188
+
189
+ template <class Policy, class Type>
190
+ __host__ __device__ cudaError_t
191
+ trivial_copy_device_to_device(Policy & policy,
192
+ Type * dst,
193
+ Type const *src,
194
+ size_t count)
195
+ {
196
+ cudaError_t status = cudaSuccess;
197
+ if (count == 0) return status;
198
+
199
+ cudaStream_t stream = cuda_cub::stream(policy);
200
+ //
201
+ status = ::cudaMemcpyAsync(dst,
202
+ src,
203
+ sizeof(Type) * count,
204
+ cudaMemcpyDeviceToDevice,
205
+ stream);
206
+ cuda_cub::synchronize(policy);
207
+ return status;
208
+ }
209
+
210
+ inline void __host__ __device__
211
+ terminate()
212
+ {
213
+ NV_IF_TARGET(NV_IS_HOST, (std::terminate();), (asm("trap;");));
214
+ }
215
+
216
+ __host__ __device__
217
+ inline void throw_on_error(cudaError_t status)
218
+ {
219
+ // Clear the global CUDA error state which may have been set by the last
220
+ // call. Otherwise, errors may "leak" to unrelated kernel launches.
221
+ #ifdef THRUST_RDC_ENABLED
222
+ cudaGetLastError();
223
+ #else
224
+ NV_IF_TARGET(NV_IS_HOST, (cudaGetLastError();));
225
+ #endif
226
+
227
+ if (cudaSuccess != status)
228
+ {
229
+
230
+ // Can't use #if inside NV_IF_TARGET, use a temp macro to hoist the device
231
+ // instructions out of the target logic.
232
+ #ifdef THRUST_RDC_ENABLED
233
+
234
+ #define THRUST_TEMP_DEVICE_CODE \
235
+ printf("Thrust CUDA backend error: %s: %s\n", \
236
+ cudaGetErrorName(status), \
237
+ cudaGetErrorString(status))
238
+
239
+ #else
240
+
241
+ #define THRUST_TEMP_DEVICE_CODE \
242
+ printf("Thrust CUDA backend error: %d\n", \
243
+ static_cast<int>(status))
244
+
245
+ #endif
246
+
247
+ NV_IF_TARGET(NV_IS_HOST, (
248
+ throw thrust::system_error(status, thrust::cuda_category());
249
+ ), (
250
+ THRUST_TEMP_DEVICE_CODE;
251
+ cuda_cub::terminate();
252
+ ));
253
+
254
+ #undef THRUST_TEMP_DEVICE_CODE
255
+
256
+ }
257
+ }
258
+
259
+ __host__ __device__
260
+ inline void throw_on_error(cudaError_t status, char const *msg)
261
+ {
262
+ // Clear the global CUDA error state which may have been set by the last
263
+ // call. Otherwise, errors may "leak" to unrelated kernel launches.
264
+ #ifdef THRUST_RDC_ENABLED
265
+ cudaGetLastError();
266
+ #else
267
+ NV_IF_TARGET(NV_IS_HOST, (cudaGetLastError();));
268
+ #endif
269
+
270
+ if (cudaSuccess != status)
271
+ {
272
+ // Can't use #if inside NV_IF_TARGET, use a temp macro to hoist the device
273
+ // instructions out of the target logic.
274
+ #ifdef THRUST_RDC_ENABLED
275
+
276
+ #define THRUST_TEMP_DEVICE_CODE \
277
+ printf("Thrust CUDA backend error: %s: %s: %s\n", \
278
+ cudaGetErrorName(status), \
279
+ cudaGetErrorString(status),\
280
+ msg)
281
+
282
+ #else
283
+
284
+ #define THRUST_TEMP_DEVICE_CODE \
285
+ printf("Thrust CUDA backend error: %d: %s\n", \
286
+ static_cast<int>(status), \
287
+ msg)
288
+
289
+ #endif
290
+
291
+ NV_IF_TARGET(NV_IS_HOST, (
292
+ throw thrust::system_error(status, thrust::cuda_category(), msg);
293
+ ), (
294
+ THRUST_TEMP_DEVICE_CODE;
295
+ cuda_cub::terminate();
296
+ ));
297
+
298
+ #undef THRUST_TEMP_DEVICE_CODE
299
+
300
+ }
301
+ }
302
+
303
+ // FIXME: Move the iterators elsewhere.
304
+
305
+ template <class ValueType,
306
+ class InputIt,
307
+ class UnaryOp>
308
+ struct transform_input_iterator_t
309
+ {
310
+ typedef transform_input_iterator_t self_t;
311
+ typedef typename iterator_traits<InputIt>::difference_type difference_type;
312
+ typedef ValueType value_type;
313
+ typedef void pointer;
314
+ typedef value_type reference;
315
+ typedef std::random_access_iterator_tag iterator_category;
316
+
317
+ InputIt input;
318
+ mutable UnaryOp op;
319
+
320
+ __host__ __device__ __forceinline__
321
+ transform_input_iterator_t(InputIt input, UnaryOp op)
322
+ : input(input), op(op) {}
323
+
324
+ #if THRUST_CPP_DIALECT >= 2011
325
+ transform_input_iterator_t(const self_t &) = default;
326
+ #endif
327
+
328
+ // UnaryOp might not be copy assignable, such as when it is a lambda. Define
329
+ // an explicit copy assignment operator that doesn't try to assign it.
330
+ __host__ __device__
331
+ self_t& operator=(const self_t& o)
332
+ {
333
+ input = o.input;
334
+ return *this;
335
+ }
336
+
337
+ /// Postfix increment
338
+ __host__ __device__ __forceinline__ self_t operator++(int)
339
+ {
340
+ self_t retval = *this;
341
+ ++input;
342
+ return retval;
343
+ }
344
+
345
+ /// Prefix increment
346
+ __host__ __device__ __forceinline__ self_t operator++()
347
+ {
348
+ ++input;
349
+ return *this;
350
+ }
351
+
352
+ /// Indirection
353
+ __host__ __device__ __forceinline__ reference operator*() const
354
+ {
355
+ typename thrust::iterator_value<InputIt>::type x = *input;
356
+ return op(x);
357
+ }
358
+ /// Indirection
359
+ __host__ __device__ __forceinline__ reference operator*()
360
+ {
361
+ typename thrust::iterator_value<InputIt>::type x = *input;
362
+ return op(x);
363
+ }
364
+
365
+ /// Addition
366
+ __host__ __device__ __forceinline__ self_t operator+(difference_type n) const
367
+ {
368
+ return self_t(input + n, op);
369
+ }
370
+
371
+ /// Addition assignment
372
+ __host__ __device__ __forceinline__ self_t &operator+=(difference_type n)
373
+ {
374
+ input += n;
375
+ return *this;
376
+ }
377
+
378
+ /// Subtraction
379
+ __host__ __device__ __forceinline__ self_t operator-(difference_type n) const
380
+ {
381
+ return self_t(input - n, op);
382
+ }
383
+
384
+ /// Subtraction assignment
385
+ __host__ __device__ __forceinline__ self_t &operator-=(difference_type n)
386
+ {
387
+ input -= n;
388
+ return *this;
389
+ }
390
+
391
+ /// Distance
392
+ __host__ __device__ __forceinline__ difference_type operator-(self_t other) const
393
+ {
394
+ return input - other.input;
395
+ }
396
+
397
+ /// Array subscript
398
+ __host__ __device__ __forceinline__ reference operator[](difference_type n) const
399
+ {
400
+ return op(input[n]);
401
+ }
402
+
403
+ /// Equal to
404
+ __host__ __device__ __forceinline__ bool operator==(const self_t &rhs) const
405
+ {
406
+ return (input == rhs.input);
407
+ }
408
+
409
+ /// Not equal to
410
+ __host__ __device__ __forceinline__ bool operator!=(const self_t &rhs) const
411
+ {
412
+ return (input != rhs.input);
413
+ }
414
+ }; // struct transform_input_iterarot_t
415
+
416
+ template <class ValueType,
417
+ class InputIt1,
418
+ class InputIt2,
419
+ class BinaryOp>
420
+ struct transform_pair_of_input_iterators_t
421
+ {
422
+ typedef transform_pair_of_input_iterators_t self_t;
423
+ typedef typename iterator_traits<InputIt1>::difference_type difference_type;
424
+ typedef ValueType value_type;
425
+ typedef void pointer;
426
+ typedef value_type reference;
427
+ typedef std::random_access_iterator_tag iterator_category;
428
+
429
+ InputIt1 input1;
430
+ InputIt2 input2;
431
+ mutable BinaryOp op;
432
+
433
+ __host__ __device__ __forceinline__
434
+ transform_pair_of_input_iterators_t(InputIt1 input1_,
435
+ InputIt2 input2_,
436
+ BinaryOp op_)
437
+ : input1(input1_), input2(input2_), op(op_) {}
438
+
439
+ #if THRUST_CPP_DIALECT >= 2011
440
+ transform_pair_of_input_iterators_t(const self_t &) = default;
441
+ #endif
442
+
443
+ // BinaryOp might not be copy assignable, such as when it is a lambda.
444
+ // Define an explicit copy assignment operator that doesn't try to assign it.
445
+ __host__ __device__
446
+ self_t& operator=(const self_t& o)
447
+ {
448
+ input1 = o.input1;
449
+ input2 = o.input2;
450
+ return *this;
451
+ }
452
+
453
+ /// Postfix increment
454
+ __host__ __device__ __forceinline__ self_t operator++(int)
455
+ {
456
+ self_t retval = *this;
457
+ ++input1;
458
+ ++input2;
459
+ return retval;
460
+ }
461
+
462
+ /// Prefix increment
463
+ __host__ __device__ __forceinline__ self_t operator++()
464
+ {
465
+ ++input1;
466
+ ++input2;
467
+ return *this;
468
+ }
469
+
470
+ /// Indirection
471
+ __host__ __device__ __forceinline__ reference operator*() const
472
+ {
473
+ return op(*input1, *input2);
474
+ }
475
+ /// Indirection
476
+ __host__ __device__ __forceinline__ reference operator*()
477
+ {
478
+ return op(*input1, *input2);
479
+ }
480
+
481
+ /// Addition
482
+ __host__ __device__ __forceinline__ self_t operator+(difference_type n) const
483
+ {
484
+ return self_t(input1 + n, input2 + n, op);
485
+ }
486
+
487
+ /// Addition assignment
488
+ __host__ __device__ __forceinline__ self_t &operator+=(difference_type n)
489
+ {
490
+ input1 += n;
491
+ input2 += n;
492
+ return *this;
493
+ }
494
+
495
+ /// Subtraction
496
+ __host__ __device__ __forceinline__ self_t operator-(difference_type n) const
497
+ {
498
+ return self_t(input1 - n, input2 - n, op);
499
+ }
500
+
501
+ /// Subtraction assignment
502
+ __host__ __device__ __forceinline__ self_t &operator-=(difference_type n)
503
+ {
504
+ input1 -= n;
505
+ input2 -= n;
506
+ return *this;
507
+ }
508
+
509
+ /// Distance
510
+ __host__ __device__ __forceinline__ difference_type operator-(self_t other) const
511
+ {
512
+ return input1 - other.input1;
513
+ }
514
+
515
+ /// Array subscript
516
+ __host__ __device__ __forceinline__ reference operator[](difference_type n) const
517
+ {
518
+ return op(input1[n], input2[n]);
519
+ }
520
+
521
+ /// Equal to
522
+ __host__ __device__ __forceinline__ bool operator==(const self_t &rhs) const
523
+ {
524
+ return (input1 == rhs.input1) && (input2 == rhs.input2);
525
+ }
526
+
527
+ /// Not equal to
528
+ __host__ __device__ __forceinline__ bool operator!=(const self_t &rhs) const
529
+ {
530
+ return (input1 != rhs.input1) || (input2 != rhs.input2);
531
+ }
532
+
533
+ }; // struct transform_pair_of_input_iterators_t
534
+
535
+
536
+ struct identity
537
+ {
538
+ template <class T>
539
+ __host__ __device__ T const &
540
+ operator()(T const &t) const
541
+ {
542
+ return t;
543
+ }
544
+
545
+ template <class T>
546
+ __host__ __device__ T &
547
+ operator()(T &t) const
548
+ {
549
+ return t;
550
+ }
551
+ };
552
+
553
+
554
+ template <class T>
555
+ struct counting_iterator_t
556
+ {
557
+ typedef counting_iterator_t self_t;
558
+ typedef T difference_type;
559
+ typedef T value_type;
560
+ typedef void pointer;
561
+ typedef T reference;
562
+ typedef std::random_access_iterator_tag iterator_category;
563
+
564
+ T count;
565
+
566
+ __host__ __device__ __forceinline__
567
+ counting_iterator_t(T count_) : count(count_) {}
568
+
569
+ /// Postfix increment
570
+ __host__ __device__ __forceinline__ self_t operator++(int)
571
+ {
572
+ self_t retval = *this;
573
+ ++count;
574
+ return retval;
575
+ }
576
+
577
+ /// Prefix increment
578
+ __host__ __device__ __forceinline__ self_t operator++()
579
+ {
580
+ ++count;
581
+ return *this;
582
+ }
583
+
584
+ /// Indirection
585
+ __host__ __device__ __forceinline__ reference operator*() const
586
+ {
587
+ return count;
588
+ }
589
+
590
+ /// Indirection
591
+ __host__ __device__ __forceinline__ reference operator*()
592
+ {
593
+ return count;
594
+ }
595
+
596
+ /// Addition
597
+ __host__ __device__ __forceinline__ self_t operator+(difference_type n) const
598
+ {
599
+ return self_t(count + n);
600
+ }
601
+
602
+ /// Addition assignment
603
+ __host__ __device__ __forceinline__ self_t &operator+=(difference_type n)
604
+ {
605
+ count += n;
606
+ return *this;
607
+ }
608
+
609
+ /// Subtraction
610
+ __host__ __device__ __forceinline__ self_t operator-(difference_type n) const
611
+ {
612
+ return self_t(count - n);
613
+ }
614
+
615
+ /// Subtraction assignment
616
+ __host__ __device__ __forceinline__ self_t &operator-=(difference_type n)
617
+ {
618
+ count -= n;
619
+ return *this;
620
+ }
621
+
622
+ /// Distance
623
+ __host__ __device__ __forceinline__ difference_type operator-(self_t other) const
624
+ {
625
+ return count - other.count;
626
+ }
627
+
628
+ /// Array subscript
629
+ __host__ __device__ __forceinline__ reference operator[](difference_type n) const
630
+ {
631
+ return count + n;
632
+ }
633
+
634
+ /// Equal to
635
+ __host__ __device__ __forceinline__ bool operator==(const self_t &rhs) const
636
+ {
637
+ return (count == rhs.count);
638
+ }
639
+
640
+ /// Not equal to
641
+ __host__ __device__ __forceinline__ bool operator!=(const self_t &rhs) const
642
+ {
643
+ return (count != rhs.count);
644
+ }
645
+
646
+ }; // struct count_iterator_t
647
+
648
+ } // cuda_
649
+
650
+ THRUST_NAMESPACE_END
miniCUDA124/include/thrust/system/detail/adl/adjacent_difference.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ // the purpose of this header is to #include the adjacent_difference.h header
30
+ // of the sequential, host, and device systems. It should be #included in any
31
+ // code which uses adl to dispatch adjacent_difference
32
+
33
+ #include <thrust/system/detail/sequential/adjacent_difference.h>
34
+
35
+ // SCons can't see through the #defines below to figure out what this header
36
+ // includes, so we fake it out by specifying all possible files we might end up
37
+ // including inside an #if 0.
38
+ #if 0
39
+ #include <thrust/system/cpp/detail/adjacent_difference.h>
40
+ #include <thrust/system/cuda/detail/adjacent_difference.h>
41
+ #include <thrust/system/omp/detail/adjacent_difference.h>
42
+ #include <thrust/system/tbb/detail/adjacent_difference.h>
43
+ #endif
44
+
45
+ #define __THRUST_HOST_SYSTEM_ADJACENT_DIFFERENCE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/adjacent_difference.h>
46
+ #include __THRUST_HOST_SYSTEM_ADJACENT_DIFFERENCE_HEADER
47
+ #undef __THRUST_HOST_SYSTEM_ADJACENT_DIFFERENCE_HEADER
48
+
49
+ #define __THRUST_DEVICE_SYSTEM_ADJACENT_DIFFERENCE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/adjacent_difference.h>
50
+ #include __THRUST_DEVICE_SYSTEM_ADJACENT_DIFFERENCE_HEADER
51
+ #undef __THRUST_DEVICE_SYSTEM_ADJACENT_DIFFERENCE_HEADER
52
+
miniCUDA124/include/thrust/system/detail/adl/assign_value.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ // the purpose of this header is to #include the assign_value.h header
30
+ // of the sequential, host, and device systems. It should be #included in any
31
+ // code which uses adl to dispatch assign_value
32
+
33
+ #include <thrust/system/detail/sequential/assign_value.h>
34
+
35
+ // SCons can't see through the #defines below to figure out what this header
36
+ // includes, so we fake it out by specifying all possible files we might end up
37
+ // including inside an #if 0.
38
+ #if 0
39
+ #include <thrust/system/cpp/detail/assign_value.h>
40
+ #include <thrust/system/cuda/detail/assign_value.h>
41
+ #include <thrust/system/omp/detail/assign_value.h>
42
+ #include <thrust/system/tbb/detail/assign_value.h>
43
+ #endif
44
+
45
+ #define __THRUST_HOST_SYSTEM_ASSIGN_VALUE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/assign_value.h>
46
+ #include __THRUST_HOST_SYSTEM_ASSIGN_VALUE_HEADER
47
+ #undef __THRUST_HOST_SYSTEM_ASSIGN_VALUE_HEADER
48
+
49
+ #define __THRUST_DEVICE_SYSTEM_ASSIGN_VALUE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/assign_value.h>
50
+ #include __THRUST_DEVICE_SYSTEM_ASSIGN_VALUE_HEADER
51
+ #undef __THRUST_DEVICE_SYSTEM_ASSIGN_VALUE_HEADER
52
+
miniCUDA124/include/thrust/system/detail/adl/binary_search.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ // the purpose of this header is to #include the binary_search.h header
30
+ // of the sequential, host, and device systems. It should be #included in any
31
+ // code which uses adl to dispatch binary_search
32
+
33
+ #include <thrust/system/detail/sequential/binary_search.h>
34
+
35
+ // SCons can't see through the #defines below to figure out what this header
36
+ // includes, so we fake it out by specifying all possible files we might end up
37
+ // including inside an #if 0.
38
+ #if 0
39
+ #include <thrust/system/cpp/detail/binary_search.h>
40
+ #include <thrust/system/cuda/detail/binary_search.h>
41
+ #include <thrust/system/omp/detail/binary_search.h>
42
+ #include <thrust/system/tbb/detail/binary_search.h>
43
+ #endif
44
+
45
+ #define __THRUST_HOST_SYSTEM_BINARY_SEARCH_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/binary_search.h>
46
+ #include __THRUST_HOST_SYSTEM_BINARY_SEARCH_HEADER
47
+ #undef __THRUST_HOST_SYSTEM_BINARY_SEARCH_HEADER
48
+
49
+ #define __THRUST_DEVICE_SYSTEM_BINARY_SEARCH_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/binary_search.h>
50
+ #include __THRUST_DEVICE_SYSTEM_BINARY_SEARCH_HEADER
51
+ #undef __THRUST_DEVICE_SYSTEM_BINARY_SEARCH_HEADER
52
+
miniCUDA124/include/thrust/system/detail/adl/copy.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ // the purpose of this header is to #include the copy.h header
30
+ // of the sequential, host, and device systems. It should be #included in any
31
+ // code which uses adl to dispatch copy
32
+
33
+ #include <thrust/system/detail/sequential/copy.h>
34
+
35
+ // SCons can't see through the #defines below to figure out what this header
36
+ // includes, so we fake it out by specifying all possible files we might end up
37
+ // including inside an #if 0.
38
+ #if 0
39
+ #include <thrust/system/cpp/detail/copy.h>
40
+ #include <thrust/system/cuda/detail/copy.h>
41
+ #include <thrust/system/omp/detail/copy.h>
42
+ #include <thrust/system/tbb/detail/copy.h>
43
+ #endif
44
+
45
+ #define __THRUST_HOST_SYSTEM_COPY_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/copy.h>
46
+ #include __THRUST_HOST_SYSTEM_COPY_HEADER
47
+ #undef __THRUST_HOST_SYSTEM_COPY_HEADER
48
+
49
+ #define __THRUST_DEVICE_SYSTEM_COPY_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/copy.h>
50
+ #include __THRUST_DEVICE_SYSTEM_COPY_HEADER
51
+ #undef __THRUST_DEVICE_SYSTEM_COPY_HEADER
52
+
miniCUDA124/include/thrust/system/detail/adl/copy_if.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy_if.h of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ // the purpose of this header is to #include the copy_if.h header
30
+ // of the sequential, host, and device systems. It should be #included in any
31
+ // code which uses adl to dispatch copy_if
32
+
33
+ #include <thrust/system/detail/sequential/copy_if.h>
34
+
35
+ // SCons can't see through the #defines below to figure out what this header
36
+ // includes, so we fake it out by specifying all possible files we might end up
37
+ // including inside an #if 0.
38
+ #if 0
39
+ #include <thrust/system/cpp/detail/copy_if.h>
40
+ #include <thrust/system/cuda/detail/copy_if.h>
41
+ #include <thrust/system/omp/detail/copy_if.h>
42
+ #include <thrust/system/tbb/detail/copy_if.h>
43
+ #endif
44
+
45
+ #define __THRUST_HOST_SYSTEM_COPY_IF_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/copy_if.h>
46
+ #include __THRUST_HOST_SYSTEM_COPY_IF_HEADER
47
+ #undef __THRUST_HOST_SYSTEM_COPY_IF_HEADER
48
+
49
+ #define __THRUST_DEVICE_SYSTEM_COPY_IF_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/copy_if.h>
50
+ #include __THRUST_DEVICE_SYSTEM_COPY_IF_HEADER
51
+ #undef __THRUST_DEVICE_SYSTEM_COPY_IF_HEADER
52
+
miniCUDA124/include/thrust/system/detail/adl/count.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a count of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ // the purpose of this header is to #include the count.h header
30
+ // of the sequential, host, and device systems. It should be #included in any
31
+ // code which uses adl to dispatch count
32
+
33
+ #include <thrust/system/detail/sequential/count.h>
34
+
35
+ // SCons can't see through the #defines below to figure out what this header
36
+ // includes, so we fake it out by specifying all possible files we might end up
37
+ // including inside an #if 0.
38
+ #if 0
39
+ #include <thrust/system/cpp/detail/count.h>
40
+ #include <thrust/system/cuda/detail/count.h>
41
+ #include <thrust/system/omp/detail/count.h>
42
+ #include <thrust/system/tbb/detail/count.h>
43
+ #endif
44
+
45
+ #define __THRUST_HOST_SYSTEM_COUNT_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/count.h>
46
+ #include __THRUST_HOST_SYSTEM_COUNT_HEADER
47
+ #undef __THRUST_HOST_SYSTEM_COUNT_HEADER
48
+
49
+ #define __THRUST_DEVICE_SYSTEM_COUNT_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/count.h>
50
+ #include __THRUST_DEVICE_SYSTEM_COUNT_HEADER
51
+ #undef __THRUST_DEVICE_SYSTEM_COUNT_HEADER
52
+
miniCUDA124/include/thrust/system/detail/adl/equal.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a equal of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ // the purpose of this header is to #include the equal.h header
30
+ // of the sequential, host, and device systems. It should be #included in any
31
+ // code which uses adl to dispatch equal
32
+
33
+ #include <thrust/system/detail/sequential/equal.h>
34
+
35
+ // SCons can't see through the #defines below to figure out what this header
36
+ // includes, so we fake it out by specifying all possible files we might end up
37
+ // including inside an #if 0.
38
+ #if 0
39
+ #include <thrust/system/cpp/detail/equal.h>
40
+ #include <thrust/system/cuda/detail/equal.h>
41
+ #include <thrust/system/omp/detail/equal.h>
42
+ #include <thrust/system/tbb/detail/equal.h>
43
+ #endif
44
+
45
+ #define __THRUST_HOST_SYSTEM_EQUAL_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/equal.h>
46
+ #include __THRUST_HOST_SYSTEM_EQUAL_HEADER
47
+ #undef __THRUST_HOST_SYSTEM_EQUAL_HEADER
48
+
49
+ #define __THRUST_DEVICE_SYSTEM_EQUAL_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/equal.h>
50
+ #include __THRUST_DEVICE_SYSTEM_EQUAL_HEADER
51
+ #undef __THRUST_DEVICE_SYSTEM_EQUAL_HEADER
52
+
miniCUDA124/include/thrust/system/detail/adl/extrema.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a extrema of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ // the purpose of this header is to #include the extrema.h header
30
+ // of the sequential, host, and device systems. It should be #included in any
31
+ // code which uses adl to dispatch extrema
32
+
33
+ #include <thrust/system/detail/sequential/extrema.h>
34
+
35
+ // SCons can't see through the #defines below to figure out what this header
36
+ // includes, so we fake it out by specifying all possible files we might end up
37
+ // including inside an #if 0.
38
+ #if 0
39
+ #include <thrust/system/cpp/detail/extrema.h>
40
+ #include <thrust/system/cuda/detail/extrema.h>
41
+ #include <thrust/system/omp/detail/extrema.h>
42
+ #include <thrust/system/tbb/detail/extrema.h>
43
+ #endif
44
+
45
+ #define __THRUST_HOST_SYSTEM_EXTREMA_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/extrema.h>
46
+ #include __THRUST_HOST_SYSTEM_EXTREMA_HEADER
47
+ #undef __THRUST_HOST_SYSTEM_EXTREMA_HEADER
48
+
49
+ #define __THRUST_DEVICE_SYSTEM_EXTREMA_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/extrema.h>
50
+ #include __THRUST_DEVICE_SYSTEM_EXTREMA_HEADER
51
+ #undef __THRUST_DEVICE_SYSTEM_EXTREMA_HEADER
52
+
miniCUDA124/include/thrust/system/detail/adl/fill.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a fill of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ // the purpose of this header is to #include the fill.h header
30
+ // of the sequential, host, and device systems. It should be #included in any
31
+ // code which uses adl to dispatch fill
32
+
33
+ #include <thrust/system/detail/sequential/fill.h>
34
+
35
+ // SCons can't see through the #defines below to figure out what this header
36
+ // includes, so we fake it out by specifying all possible files we might end up
37
+ // including inside an #if 0.
38
+ #if 0
39
+ #include <thrust/system/cpp/detail/fill.h>
40
+ #include <thrust/system/cuda/detail/fill.h>
41
+ #include <thrust/system/omp/detail/fill.h>
42
+ #include <thrust/system/tbb/detail/fill.h>
43
+ #endif
44
+
45
+ #define __THRUST_HOST_SYSTEM_FILL_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/fill.h>
46
+ #include __THRUST_HOST_SYSTEM_FILL_HEADER
47
+ #undef __THRUST_HOST_SYSTEM_FILL_HEADER
48
+
49
+ #define __THRUST_DEVICE_SYSTEM_FILL_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/fill.h>
50
+ #include __THRUST_DEVICE_SYSTEM_FILL_HEADER
51
+ #undef __THRUST_DEVICE_SYSTEM_FILL_HEADER
52
+
miniCUDA124/include/thrust/system/detail/adl/find.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a fill of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ // the purpose of this header is to #include the find.h header
30
+ // of the sequential, host, and device systems. It should be #included in any
31
+ // code which uses adl to dispatch find
32
+
33
+ #include <thrust/system/detail/sequential/find.h>
34
+
35
+ // SCons can't see through the #defines below to figure out what this header
36
+ // includes, so we fake it out by specifying all possible files we might end up
37
+ // including inside an #if 0.
38
+ #if 0
39
+ #include <thrust/system/cpp/detail/find.h>
40
+ #include <thrust/system/cuda/detail/find.h>
41
+ #include <thrust/system/omp/detail/find.h>
42
+ #include <thrust/system/tbb/detail/find.h>
43
+ #endif
44
+
45
+ #define __THRUST_HOST_SYSTEM_FIND_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/find.h>
46
+ #include __THRUST_HOST_SYSTEM_FIND_HEADER
47
+ #undef __THRUST_HOST_SYSTEM_FIND_HEADER
48
+
49
+ #define __THRUST_DEVICE_SYSTEM_FIND_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/find.h>
50
+ #include __THRUST_DEVICE_SYSTEM_FIND_HEADER
51
+ #undef __THRUST_DEVICE_SYSTEM_FIND_HEADER
52
+
miniCUDA124/include/thrust/system/detail/adl/for_each.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ // the purpose of this header is to #include the for_each.h header
30
+ // of the sequential, host, and device systems. It should be #included in any
31
+ // code which uses adl to dispatch for_each
32
+
33
+ #include <thrust/system/detail/sequential/for_each.h>
34
+
35
+ // SCons can't see through the #defines below to figure out what this header
36
+ // includes, so we fake it out by specifying all possible files we might end up
37
+ // including inside an #if 0.
38
+ #if 0
39
+ #include <thrust/system/cpp/detail/for_each.h>
40
+ #include <thrust/system/cuda/detail/for_each.h>
41
+ #include <thrust/system/omp/detail/for_each.h>
42
+ #include <thrust/system/tbb/detail/for_each.h>
43
+ #endif
44
+
45
+ #define __THRUST_HOST_SYSTEM_FOR_EACH_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/for_each.h>
46
+ #include __THRUST_HOST_SYSTEM_FOR_EACH_HEADER
47
+ #undef __THRUST_HOST_SYSTEM_FOR_EACH_HEADER
48
+
49
+ #define __THRUST_DEVICE_SYSTEM_FOR_EACH_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/for_each.h>
50
+ #include __THRUST_DEVICE_SYSTEM_FOR_EACH_HEADER
51
+ #undef __THRUST_DEVICE_SYSTEM_FOR_EACH_HEADER
52
+