ZTWHHH commited on
Commit
a9376b4
·
verified ·
1 Parent(s): 96fa03e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/algorithm_wrapper.h +27 -0
  2. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/allocator/copy_construct_range.h +46 -0
  3. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/allocator/copy_construct_range.inl +310 -0
  4. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/allocator/default_construct_range.h +36 -0
  5. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/allocator/destroy_range.inl +167 -0
  6. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/allocator/fill_construct_range.h +35 -0
  7. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/allocator/no_throw_allocator.h +72 -0
  8. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/allocator/tagged_allocator.inl +104 -0
  9. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/binary_search.inl +480 -0
  10. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config.h +24 -0
  11. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/compiler.h +189 -0
  12. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/compiler_fence.h +62 -0
  13. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/config.h +40 -0
  14. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/cpp_compatibility.h +101 -0
  15. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/cpp_dialect.h +140 -0
  16. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/deprecated.h +42 -0
  17. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/device_system.h +44 -0
  18. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/exec_check_disable.h +43 -0
  19. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/forceinline.h +36 -0
  20. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/global_workarounds.h +27 -0
  21. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/host_device.h +44 -0
  22. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/host_system.h +41 -0
  23. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/memory_resource.h +35 -0
  24. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/namespace.h +120 -0
  25. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/simple_defines.h +30 -0
  26. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/contiguous_storage.h +235 -0
  27. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/contiguous_storage.inl +550 -0
  28. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/copy.h +90 -0
  29. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/copy_if.h +71 -0
  30. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/copy_if.inl +107 -0
  31. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/count.h +60 -0
  32. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/dependencies_aware_execution_policy.h +106 -0
  33. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/device_malloc.inl +53 -0
  34. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/device_ptr.inl +64 -0
  35. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/distance.inl +35 -0
  36. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/execute_with_allocator.h +149 -0
  37. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/execute_with_dependencies.h +267 -0
  38. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/extrema.inl +169 -0
  39. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/functional/operators/arithmetic_operators.h +436 -0
  40. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/functional/operators/assignment_operator.h +79 -0
  41. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/functional/operators/bitwise_operators.h +338 -0
  42. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/functional/operators/compound_assignment_operators.h +512 -0
  43. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/functional/operators/logical_operators.h +143 -0
  44. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/functional/operators/relational_operators.h +322 -0
  45. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/gather.inl +161 -0
  46. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/integer_math.h +152 -0
  47. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/integer_traits.h +130 -0
  48. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/logical.inl +95 -0
  49. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/memory_algorithms.h +237 -0
  50. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/memory_wrapper.h +30 -0
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/algorithm_wrapper.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ // When a compiler uses Thrust as part of its implementation of Standard C++
20
+ // algorithms, a cycle of included files may result when Thrust code tries to
21
+ // use a standard algorithm. Having a macro that is defined only when Thrust
22
+ // is including an algorithms-related header gives the compiler a chance to
23
+ // detect and break the cycle of includes.
24
+
25
+ #define THRUST_INCLUDING_ALGORITHMS_HEADER
26
+ #include <algorithm>
27
+ #undef THRUST_INCLUDING_ALGORITHMS_HEADER
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/allocator/copy_construct_range.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/execution_policy.h>
21
+
22
+ THRUST_NAMESPACE_BEGIN
23
+ namespace detail
24
+ {
25
+
26
+ template<typename System, typename Allocator, typename InputIterator, typename Pointer>
27
+ __host__ __device__
28
+ Pointer copy_construct_range(thrust::execution_policy<System> &from_system,
29
+ Allocator &a,
30
+ InputIterator first,
31
+ InputIterator last,
32
+ Pointer result);
33
+
34
+ template<typename System, typename Allocator, typename InputIterator, typename Size, typename Pointer>
35
+ __host__ __device__
36
+ Pointer copy_construct_range_n(thrust::execution_policy<System> &from_system,
37
+ Allocator &a,
38
+ InputIterator first,
39
+ Size n,
40
+ Pointer result);
41
+
42
+ } // end detail
43
+ THRUST_NAMESPACE_END
44
+
45
+ #include <thrust/detail/allocator/copy_construct_range.inl>
46
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/allocator/copy_construct_range.inl ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/allocator/allocator_traits.h>
21
+ #include <thrust/detail/type_traits/pointer_traits.h>
22
+ #include <thrust/iterator/iterator_traits.h>
23
+ #include <thrust/detail/copy.h>
24
+ #include <thrust/tuple.h>
25
+ #include <thrust/advance.h>
26
+ #include <thrust/distance.h>
27
+ #include <thrust/iterator/zip_iterator.h>
28
+ #include <thrust/for_each.h>
29
+ #include <thrust/detail/memory_wrapper.h>
30
+
31
+ THRUST_NAMESPACE_BEGIN
32
+ namespace detail
33
+ {
34
+ namespace allocator_traits_detail
35
+ {
36
+
37
+
38
+ template<typename Allocator, typename InputType, typename OutputType>
39
+ struct copy_construct_with_allocator
40
+ {
41
+ Allocator &a;
42
+
43
+ __host__ __device__
44
+ copy_construct_with_allocator(Allocator &a)
45
+ : a(a)
46
+ {}
47
+
48
+ template<typename Tuple>
49
+ inline __host__ __device__
50
+ void operator()(Tuple t)
51
+ {
52
+ const InputType &in = thrust::get<0>(t);
53
+ OutputType &out = thrust::get<1>(t);
54
+
55
+ allocator_traits<Allocator>::construct(a, &out, in);
56
+ }
57
+ };
58
+
59
+
60
+ // we need to use allocator_traits<Allocator>::construct() to
61
+ // copy construct a T if either:
62
+ // 1. Allocator has a 2-argument construct() member or
63
+ // 2. T has a non-trivial copy constructor
64
+ template<typename Allocator, typename T>
65
+ struct needs_copy_construct_via_allocator
66
+ : integral_constant<
67
+ bool,
68
+ (has_member_construct2<Allocator,T,T>::value || !has_trivial_copy_constructor<T>::value)
69
+ >
70
+ {};
71
+
72
+
73
+ // we know that std::allocator::construct's only effect is to call T's
74
+ // copy constructor, so we needn't consider or use its construct() member for copy construction
75
+ template<typename U, typename T>
76
+ struct needs_copy_construct_via_allocator<std::allocator<U>, T>
77
+ : integral_constant<
78
+ bool,
79
+ !has_trivial_copy_constructor<T>::value
80
+ >
81
+ {};
82
+
83
+
84
+ // XXX it's regrettable that this implementation is copied almost
85
+ // exactly from system::detail::generic::uninitialized_copy
86
+ // perhaps generic::uninitialized_copy could call this routine
87
+ // with a default allocator
88
+ template<typename Allocator, typename FromSystem, typename ToSystem, typename InputIterator, typename Pointer>
89
+ __host__ __device__
90
+ typename enable_if_convertible<
91
+ FromSystem,
92
+ ToSystem,
93
+ Pointer
94
+ >::type
95
+ uninitialized_copy_with_allocator(Allocator &a,
96
+ const thrust::execution_policy<FromSystem> &,
97
+ const thrust::execution_policy<ToSystem> &to_system,
98
+ InputIterator first,
99
+ InputIterator last,
100
+ Pointer result)
101
+ {
102
+ // zip up the iterators
103
+ typedef thrust::tuple<InputIterator,Pointer> IteratorTuple;
104
+ typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
105
+
106
+ ZipIterator begin = thrust::make_zip_iterator(thrust::make_tuple(first,result));
107
+ ZipIterator end = begin;
108
+
109
+ // get a zip_iterator pointing to the end
110
+ const typename thrust::iterator_difference<InputIterator>::type n = thrust::distance(first,last);
111
+ thrust::advance(end,n);
112
+
113
+ // create a functor
114
+ typedef typename iterator_traits<InputIterator>::value_type InputType;
115
+ typedef typename iterator_traits<Pointer>::value_type OutputType;
116
+
117
+ // do the for_each
118
+ // note we use to_system to dispatch the for_each
119
+ thrust::for_each(to_system, begin, end, copy_construct_with_allocator<Allocator,InputType,OutputType>(a));
120
+
121
+ // return the end of the output range
122
+ return thrust::get<1>(end.get_iterator_tuple());
123
+ }
124
+
125
+
126
+ // XXX it's regrettable that this implementation is copied almost
127
+ // exactly from system::detail::generic::uninitialized_copy_n
128
+ // perhaps generic::uninitialized_copy_n could call this routine
129
+ // with a default allocator
130
+ template<typename Allocator, typename FromSystem, typename ToSystem, typename InputIterator, typename Size, typename Pointer>
131
+ __host__ __device__
132
+ typename enable_if_convertible<
133
+ FromSystem,
134
+ ToSystem,
135
+ Pointer
136
+ >::type
137
+ uninitialized_copy_with_allocator_n(Allocator &a,
138
+ const thrust::execution_policy<FromSystem> &,
139
+ const thrust::execution_policy<ToSystem> &to_system,
140
+ InputIterator first,
141
+ Size n,
142
+ Pointer result)
143
+ {
144
+ // zip up the iterators
145
+ typedef thrust::tuple<InputIterator,Pointer> IteratorTuple;
146
+ typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
147
+
148
+ ZipIterator begin = thrust::make_zip_iterator(thrust::make_tuple(first,result));
149
+
150
+ // create a functor
151
+ typedef typename iterator_traits<InputIterator>::value_type InputType;
152
+ typedef typename iterator_traits<Pointer>::value_type OutputType;
153
+
154
+ // do the for_each_n
155
+ // note we use to_system to dispatch the for_each_n
156
+ ZipIterator end = thrust::for_each_n(to_system, begin, n, copy_construct_with_allocator<Allocator,InputType,OutputType>(a));
157
+
158
+ // return the end of the output range
159
+ return thrust::get<1>(end.get_iterator_tuple());
160
+ }
161
+
162
+
163
+ template<typename Allocator, typename FromSystem, typename ToSystem, typename InputIterator, typename Pointer>
164
+ __host__ __device__
165
+ typename disable_if_convertible<
166
+ FromSystem,
167
+ ToSystem,
168
+ Pointer
169
+ >::type
170
+ uninitialized_copy_with_allocator(Allocator &,
171
+ const thrust::execution_policy<FromSystem> &from_system,
172
+ const thrust::execution_policy<ToSystem> &to_system,
173
+ InputIterator first,
174
+ InputIterator last,
175
+ Pointer result)
176
+ {
177
+ // the systems aren't trivially interoperable
178
+ // just call two_system_copy and hope for the best
179
+ return thrust::detail::two_system_copy(from_system, to_system, first, last, result);
180
+ } // end uninitialized_copy_with_allocator()
181
+
182
+
183
+ template<typename Allocator, typename FromSystem, typename ToSystem, typename InputIterator, typename Size, typename Pointer>
184
+ __host__ __device__
185
+ typename disable_if_convertible<
186
+ FromSystem,
187
+ ToSystem,
188
+ Pointer
189
+ >::type
190
+ uninitialized_copy_with_allocator_n(Allocator &,
191
+ const thrust::execution_policy<FromSystem> &from_system,
192
+ const thrust::execution_policy<ToSystem> &to_system,
193
+ InputIterator first,
194
+ Size n,
195
+ Pointer result)
196
+ {
197
+ // the systems aren't trivially interoperable
198
+ // just call two_system_copy_n and hope for the best
199
+ return thrust::detail::two_system_copy_n(from_system, to_system, first, n, result);
200
+ } // end uninitialized_copy_with_allocator_n()
201
+
202
+
203
+ template<typename FromSystem, typename Allocator, typename InputIterator, typename Pointer>
204
+ __host__ __device__
205
+ typename disable_if<
206
+ needs_copy_construct_via_allocator<
207
+ Allocator,
208
+ typename pointer_element<Pointer>::type
209
+ >::value,
210
+ Pointer
211
+ >::type
212
+ copy_construct_range(thrust::execution_policy<FromSystem> &from_system,
213
+ Allocator &a,
214
+ InputIterator first,
215
+ InputIterator last,
216
+ Pointer result)
217
+ {
218
+ // just call two_system_copy
219
+ return thrust::detail::two_system_copy(from_system, allocator_system<Allocator>::get(a), first, last, result);
220
+ }
221
+
222
+
223
+ template<typename FromSystem, typename Allocator, typename InputIterator, typename Size, typename Pointer>
224
+ __host__ __device__
225
+ typename disable_if<
226
+ needs_copy_construct_via_allocator<
227
+ Allocator,
228
+ typename pointer_element<Pointer>::type
229
+ >::value,
230
+ Pointer
231
+ >::type
232
+ copy_construct_range_n(thrust::execution_policy<FromSystem> &from_system,
233
+ Allocator &a,
234
+ InputIterator first,
235
+ Size n,
236
+ Pointer result)
237
+ {
238
+ // just call two_system_copy_n
239
+ return thrust::detail::two_system_copy_n(from_system, allocator_system<Allocator>::get(a), first, n, result);
240
+ }
241
+
242
+
243
+ template<typename FromSystem, typename Allocator, typename InputIterator, typename Pointer>
244
+ __host__ __device__
245
+ typename enable_if<
246
+ needs_copy_construct_via_allocator<
247
+ Allocator,
248
+ typename pointer_element<Pointer>::type
249
+ >::value,
250
+ Pointer
251
+ >::type
252
+ copy_construct_range(thrust::execution_policy<FromSystem> &from_system,
253
+ Allocator &a,
254
+ InputIterator first,
255
+ InputIterator last,
256
+ Pointer result)
257
+ {
258
+ return uninitialized_copy_with_allocator(a, from_system, allocator_system<Allocator>::get(a), first, last, result);
259
+ }
260
+
261
+
262
+ template<typename FromSystem, typename Allocator, typename InputIterator, typename Size, typename Pointer>
263
+ __host__ __device__
264
+ typename enable_if<
265
+ needs_copy_construct_via_allocator<
266
+ Allocator,
267
+ typename pointer_element<Pointer>::type
268
+ >::value,
269
+ Pointer
270
+ >::type
271
+ copy_construct_range_n(thrust::execution_policy<FromSystem> &from_system,
272
+ Allocator &a,
273
+ InputIterator first,
274
+ Size n,
275
+ Pointer result)
276
+ {
277
+ return uninitialized_copy_with_allocator_n(a, from_system, allocator_system<Allocator>::get(a), first, n, result);
278
+ }
279
+
280
+
281
+ } // end allocator_traits_detail
282
+
283
+
284
+ template<typename System, typename Allocator, typename InputIterator, typename Pointer>
285
+ __host__ __device__
286
+ Pointer copy_construct_range(thrust::execution_policy<System> &from_system,
287
+ Allocator &a,
288
+ InputIterator first,
289
+ InputIterator last,
290
+ Pointer result)
291
+ {
292
+ return allocator_traits_detail::copy_construct_range(from_system, a, first, last, result);
293
+ }
294
+
295
+
296
+ template<typename System, typename Allocator, typename InputIterator, typename Size, typename Pointer>
297
+ __host__ __device__
298
+ Pointer copy_construct_range_n(thrust::execution_policy<System> &from_system,
299
+ Allocator &a,
300
+ InputIterator first,
301
+ Size n,
302
+ Pointer result)
303
+ {
304
+ return allocator_traits_detail::copy_construct_range_n(from_system, a, first, n, result);
305
+ }
306
+
307
+
308
+ } // end detail
309
+ THRUST_NAMESPACE_END
310
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/allocator/default_construct_range.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ THRUST_NAMESPACE_BEGIN
22
+ namespace detail
23
+ {
24
+
25
+
26
+ template<typename Allocator, typename Pointer, typename Size>
27
+ __host__ __device__
28
+ inline void default_construct_range(Allocator &a, Pointer p, Size n);
29
+
30
+
31
+ } // end detail
32
+ THRUST_NAMESPACE_END
33
+
34
+ #include <thrust/detail/allocator/default_construct_range.inl>
35
+
36
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/allocator/destroy_range.inl ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2021 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #include <thrust/detail/allocator/destroy_range.h>
22
+ #include <thrust/detail/allocator/allocator_traits.h>
23
+ #include <thrust/detail/type_traits/pointer_traits.h>
24
+ #include <thrust/for_each.h>
25
+ #include <thrust/detail/memory_wrapper.h>
26
+
27
+ THRUST_NAMESPACE_BEGIN
28
+ namespace detail
29
+ {
30
+ namespace allocator_traits_detail
31
+ {
32
+
33
+
34
+ // destroy_range has three cases:
35
+ // if Allocator has an effectful member function destroy:
36
+ // 1. destroy via the allocator
37
+ // else
38
+ // 2. if T has a non-trivial destructor, destroy the range without using the allocator
39
+ // 3. if T has a trivial destructor, do a no-op
40
+
41
+ template<typename Allocator, typename T>
42
+ struct has_effectful_member_destroy
43
+ : has_member_destroy<Allocator,T>
44
+ {};
45
+
46
+ // std::allocator::destroy's only effect is to invoke its argument's destructor
47
+ template<typename U, typename T>
48
+ struct has_effectful_member_destroy<std::allocator<U>, T>
49
+ : thrust::detail::false_type
50
+ {};
51
+
52
+ // case 1: Allocator has an effectful 1-argument member function "destroy"
53
+ template<typename Allocator, typename Pointer>
54
+ struct enable_if_destroy_range_case1
55
+ : thrust::detail::enable_if<
56
+ has_effectful_member_destroy<
57
+ Allocator,
58
+ typename pointer_element<Pointer>::type
59
+ >::value
60
+ >
61
+ {};
62
+
63
+ // case 2: Allocator has no member function "destroy", but T has a non-trivial destructor
64
+ template<typename Allocator, typename Pointer>
65
+ struct enable_if_destroy_range_case2
66
+ : thrust::detail::enable_if<
67
+ !has_effectful_member_destroy<
68
+ Allocator,
69
+ typename pointer_element<Pointer>::type
70
+ >::value &&
71
+ !has_trivial_destructor<
72
+ typename pointer_element<Pointer>::type
73
+ >::value
74
+ >
75
+ {};
76
+
77
+ // case 3: Allocator has no member function "destroy", and T has a trivial destructor
78
+ template<typename Allocator, typename Pointer>
79
+ struct enable_if_destroy_range_case3
80
+ : thrust::detail::enable_if<
81
+ !has_effectful_member_destroy<
82
+ Allocator,
83
+ typename pointer_element<Pointer>::type
84
+ >::value &&
85
+ has_trivial_destructor<
86
+ typename pointer_element<Pointer>::type
87
+ >::value
88
+ >
89
+ {};
90
+
91
+
92
+
93
+ template<typename Allocator>
94
+ struct destroy_via_allocator
95
+ {
96
+ Allocator &a;
97
+
98
+ __host__ __device__
99
+ destroy_via_allocator(Allocator &a)
100
+ : a(a)
101
+ {}
102
+
103
+ template<typename T>
104
+ inline __host__ __device__
105
+ void operator()(T &x)
106
+ {
107
+ allocator_traits<Allocator>::destroy(a, &x);
108
+ }
109
+ };
110
+
111
+
112
+ // destroy_range case 1: destroy via allocator
113
+ template<typename Allocator, typename Pointer, typename Size>
114
+ __host__ __device__
115
+ typename enable_if_destroy_range_case1<Allocator,Pointer>::type
116
+ destroy_range(Allocator &a, Pointer p, Size n)
117
+ {
118
+ thrust::for_each_n(allocator_system<Allocator>::get(a), p, n, destroy_via_allocator<Allocator>(a));
119
+ }
120
+
121
+
122
+ // we must prepare for His coming
123
+ struct gozer
124
+ {
125
+ __thrust_exec_check_disable__
126
+ template<typename T>
127
+ inline __host__ __device__
128
+ void operator()(T &x)
129
+ {
130
+ x.~T();
131
+ }
132
+ };
133
+
134
+ // destroy_range case 2: destroy without the allocator
135
+ template<typename Allocator, typename Pointer, typename Size>
136
+ __host__ __device__
137
+ typename enable_if_destroy_range_case2<Allocator,Pointer>::type
138
+ destroy_range(Allocator &a, Pointer p, Size n)
139
+ {
140
+ thrust::for_each_n(allocator_system<Allocator>::get(a), p, n, gozer());
141
+ }
142
+
143
+
144
+ // destroy_range case 3: no-op
145
+ template<typename Allocator, typename Pointer, typename Size>
146
+ __host__ __device__
147
+ typename enable_if_destroy_range_case3<Allocator,Pointer>::type
148
+ destroy_range(Allocator &, Pointer, Size)
149
+ {
150
+ // no op
151
+ }
152
+
153
+
154
+ } // end allocator_traits_detail
155
+
156
+
157
+ template<typename Allocator, typename Pointer, typename Size>
158
+ __host__ __device__
159
+ void destroy_range(Allocator &a, Pointer p, Size n)
160
+ {
161
+ return allocator_traits_detail::destroy_range(a,p,n);
162
+ }
163
+
164
+
165
+ } // end detail
166
+ THRUST_NAMESPACE_END
167
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/allocator/fill_construct_range.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ THRUST_NAMESPACE_BEGIN
22
+ namespace detail
23
+ {
24
+
25
+
26
+ template<typename Allocator, typename Pointer, typename Size, typename T>
27
+ __host__ __device__
28
+ inline void fill_construct_range(Allocator &a, Pointer p, Size n, const T &value);
29
+
30
+
31
+ } // end detail
32
+ THRUST_NAMESPACE_END
33
+
34
+ #include <thrust/detail/allocator/fill_construct_range.inl>
35
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/allocator/no_throw_allocator.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #include <nv/target>
22
+
23
+ THRUST_NAMESPACE_BEGIN
24
+ namespace detail
25
+ {
26
+
27
+ template<typename BaseAllocator>
28
+ struct no_throw_allocator : BaseAllocator
29
+ {
30
+ private:
31
+ typedef BaseAllocator super_t;
32
+
33
+ public:
34
+ inline __host__ __device__
35
+ no_throw_allocator(const BaseAllocator &other = BaseAllocator())
36
+ : super_t(other)
37
+ {}
38
+
39
+ template<typename U>
40
+ struct rebind
41
+ {
42
+ typedef no_throw_allocator<typename super_t::template rebind<U>::other> other;
43
+ }; // end rebind
44
+
45
+ __host__ __device__
46
+ void deallocate(typename super_t::pointer p, typename super_t::size_type n)
47
+ {
48
+ NV_IF_TARGET(NV_IS_HOST, (
49
+ try
50
+ {
51
+ super_t::deallocate(p, n);
52
+ } // end try
53
+ catch(...)
54
+ {
55
+ // catch anything
56
+ } // end catch
57
+ ), (
58
+ super_t::deallocate(p, n);
59
+ ));
60
+ } // end deallocate()
61
+
62
+ inline __host__ __device__
63
+ bool operator==(no_throw_allocator const &other) { return super_t::operator==(other); }
64
+
65
+ inline __host__ __device__
66
+ bool operator!=(no_throw_allocator const &other) { return super_t::operator!=(other); }
67
+ }; // end no_throw_allocator
68
+
69
+ } // end detail
70
+ THRUST_NAMESPACE_END
71
+
72
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/allocator/tagged_allocator.inl ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/allocator/tagged_allocator.h>
21
+ #include <limits>
22
+
23
+ THRUST_NAMESPACE_BEGIN
24
+ namespace detail
25
+ {
26
+
27
+
28
+ template<typename T, typename Tag, typename Pointer>
29
+ __host__ __device__
30
+ tagged_allocator<T,Tag,Pointer>
31
+ ::tagged_allocator()
32
+ {}
33
+
34
+
35
+ template<typename T, typename Tag, typename Pointer>
36
+ __host__ __device__
37
+ tagged_allocator<T,Tag,Pointer>
38
+ ::tagged_allocator(const tagged_allocator<T,Tag,Pointer> &)
39
+ {}
40
+
41
+
42
+ template<typename T, typename Tag, typename Pointer>
43
+ template<typename U, typename OtherPointer>
44
+ __host__ __device__
45
+ tagged_allocator<T,Tag,Pointer>
46
+ ::tagged_allocator(const tagged_allocator<U,Tag,OtherPointer> &)
47
+ {}
48
+
49
+
50
+ template<typename T, typename Tag, typename Pointer>
51
+ __host__ __device__
52
+ tagged_allocator<T,Tag,Pointer>
53
+ ::~tagged_allocator()
54
+ {}
55
+
56
+
57
+ template<typename T, typename Tag, typename Pointer>
58
+ __host__ __device__
59
+ typename tagged_allocator<T,Tag,Pointer>::pointer
60
+ tagged_allocator<T,Tag,Pointer>
61
+ ::address(reference x) const
62
+ {
63
+ return &x;
64
+ }
65
+
66
+
67
+ template<typename T, typename Tag, typename Pointer>
68
+ __host__ __device__
69
+ typename tagged_allocator<T,Tag,Pointer>::const_pointer
70
+ tagged_allocator<T,Tag,Pointer>
71
+ ::address(const_reference x) const
72
+ {
73
+ return &x;
74
+ }
75
+
76
+
77
+ template<typename T, typename Tag, typename Pointer>
78
+ typename tagged_allocator<T,Tag,Pointer>::size_type
79
+ tagged_allocator<T,Tag,Pointer>
80
+ ::max_size() const
81
+ {
82
+ return (std::numeric_limits<size_type>::max)() / sizeof(T);
83
+ }
84
+
85
+
86
+ template<typename T1, typename Pointer1, typename T2, typename Pointer2, typename Tag>
87
+ __host__ __device__
88
+ bool operator==(const tagged_allocator<T1,Pointer1,Tag> &, const tagged_allocator<T2,Pointer2,Tag> &)
89
+ {
90
+ return true;
91
+ }
92
+
93
+
94
+ template<typename T1, typename Pointer1, typename T2, typename Pointer2, typename Tag>
95
+ __host__ __device__
96
+ bool operator!=(const tagged_allocator<T1,Pointer1,Tag> &, const tagged_allocator<T2,Pointer2,Tag> &)
97
+ {
98
+ return false;
99
+ }
100
+
101
+
102
+ } // end detail
103
+ THRUST_NAMESPACE_END
104
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/binary_search.inl ADDED
@@ -0,0 +1,480 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/binary_search.h>
21
+ #include <thrust/iterator/iterator_traits.h>
22
+ #include <thrust/system/detail/generic/select_system.h>
23
+ #include <thrust/system/detail/generic/binary_search.h>
24
+ #include <thrust/system/detail/adl/binary_search.h>
25
+
26
+ THRUST_NAMESPACE_BEGIN
27
+
28
+ __thrust_exec_check_disable__
29
+ template <typename DerivedPolicy, typename ForwardIterator, typename LessThanComparable>
30
+ __host__ __device__
31
+ ForwardIterator lower_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
32
+ ForwardIterator first,
33
+ ForwardIterator last,
34
+ const LessThanComparable &value)
35
+ {
36
+ using thrust::system::detail::generic::lower_bound;
37
+ return lower_bound(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, value);
38
+ }
39
+
40
+
41
+ __thrust_exec_check_disable__
42
+ template<typename DerivedPolicy, typename ForwardIterator, typename T, typename StrictWeakOrdering>
43
+ __host__ __device__
44
+ ForwardIterator lower_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
45
+ ForwardIterator first,
46
+ ForwardIterator last,
47
+ const T &value,
48
+ StrictWeakOrdering comp)
49
+ {
50
+ using thrust::system::detail::generic::lower_bound;
51
+ return lower_bound(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, value, comp);
52
+ }
53
+
54
+
55
+ __thrust_exec_check_disable__
56
+ template<typename DerivedPolicy, typename ForwardIterator, typename LessThanComparable>
57
+ __host__ __device__
58
+ ForwardIterator upper_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
59
+ ForwardIterator first,
60
+ ForwardIterator last,
61
+ const LessThanComparable &value)
62
+ {
63
+ using thrust::system::detail::generic::upper_bound;
64
+ return upper_bound(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, value);
65
+ }
66
+
67
+
68
+ __thrust_exec_check_disable__
69
+ template<typename DerivedPolicy, typename ForwardIterator, typename T, typename StrictWeakOrdering>
70
+ __host__ __device__
71
+ ForwardIterator upper_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
72
+ ForwardIterator first,
73
+ ForwardIterator last,
74
+ const T &value,
75
+ StrictWeakOrdering comp)
76
+ {
77
+ using thrust::system::detail::generic::upper_bound;
78
+ return upper_bound(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, value, comp);
79
+ }
80
+
81
+
82
+ __thrust_exec_check_disable__
83
+ template <typename DerivedPolicy, typename ForwardIterator, typename LessThanComparable>
84
+ __host__ __device__
85
+ bool binary_search(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
86
+ ForwardIterator first,
87
+ ForwardIterator last,
88
+ const LessThanComparable& value)
89
+ {
90
+ using thrust::system::detail::generic::binary_search;
91
+ return binary_search(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, value);
92
+ }
93
+
94
+
95
+ __thrust_exec_check_disable__
96
+ template <typename DerivedPolicy, typename ForwardIterator, typename T, typename StrictWeakOrdering>
97
+ __host__ __device__
98
+ bool binary_search(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
99
+ ForwardIterator first,
100
+ ForwardIterator last,
101
+ const T& value,
102
+ StrictWeakOrdering comp)
103
+ {
104
+ using thrust::system::detail::generic::binary_search;
105
+ return binary_search(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, value, comp);
106
+ }
107
+
108
+
109
+ __thrust_exec_check_disable__
110
+ template <typename DerivedPolicy, typename ForwardIterator, typename T, typename StrictWeakOrdering>
111
+ __host__ __device__
112
+ thrust::pair<ForwardIterator, ForwardIterator>
113
+ equal_range(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
114
+ ForwardIterator first,
115
+ ForwardIterator last,
116
+ const T& value,
117
+ StrictWeakOrdering comp)
118
+ {
119
+ using thrust::system::detail::generic::equal_range;
120
+ return equal_range(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, value, comp);
121
+ }
122
+
123
+
124
+ __thrust_exec_check_disable__
125
+ template <typename DerivedPolicy, typename ForwardIterator, typename LessThanComparable>
126
+ __host__ __device__
127
+ thrust::pair<ForwardIterator, ForwardIterator>
128
+ equal_range(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
129
+ ForwardIterator first,
130
+ ForwardIterator last,
131
+ const LessThanComparable& value)
132
+ {
133
+ using thrust::system::detail::generic::equal_range;
134
+ return equal_range(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, value);
135
+ }
136
+
137
+
138
+ __thrust_exec_check_disable__
139
+ template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator>
140
+ __host__ __device__
141
+ OutputIterator lower_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
142
+ ForwardIterator first,
143
+ ForwardIterator last,
144
+ InputIterator values_first,
145
+ InputIterator values_last,
146
+ OutputIterator output)
147
+ {
148
+ using thrust::system::detail::generic::lower_bound;
149
+ return lower_bound(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, values_first, values_last, output);
150
+ }
151
+
152
+
153
+ __thrust_exec_check_disable__
154
+ template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator, typename StrictWeakOrdering>
155
+ __host__ __device__
156
+ OutputIterator lower_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
157
+ ForwardIterator first,
158
+ ForwardIterator last,
159
+ InputIterator values_first,
160
+ InputIterator values_last,
161
+ OutputIterator output,
162
+ StrictWeakOrdering comp)
163
+ {
164
+ using thrust::system::detail::generic::lower_bound;
165
+ return lower_bound(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, values_first, values_last, output, comp);
166
+ }
167
+
168
+
169
+ __thrust_exec_check_disable__
170
+ template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator>
171
+ __host__ __device__
172
+ OutputIterator upper_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
173
+ ForwardIterator first,
174
+ ForwardIterator last,
175
+ InputIterator values_first,
176
+ InputIterator values_last,
177
+ OutputIterator output)
178
+ {
179
+ using thrust::system::detail::generic::upper_bound;
180
+ return upper_bound(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, values_first, values_last, output);
181
+ }
182
+
183
+
184
+ __thrust_exec_check_disable__
185
+ template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator, typename StrictWeakOrdering>
186
+ __host__ __device__
187
+ OutputIterator upper_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
188
+ ForwardIterator first,
189
+ ForwardIterator last,
190
+ InputIterator values_first,
191
+ InputIterator values_last,
192
+ OutputIterator output,
193
+ StrictWeakOrdering comp)
194
+ {
195
+ using thrust::system::detail::generic::upper_bound;
196
+ return upper_bound(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, values_first, values_last, output, comp);
197
+ }
198
+
199
+
200
+ __thrust_exec_check_disable__
201
+ template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator>
202
+ __host__ __device__
203
+ OutputIterator binary_search(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
204
+ ForwardIterator first,
205
+ ForwardIterator last,
206
+ InputIterator values_first,
207
+ InputIterator values_last,
208
+ OutputIterator output)
209
+ {
210
+ using thrust::system::detail::generic::binary_search;
211
+ return binary_search(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, values_first, values_last, output);
212
+ }
213
+
214
+
215
+ __thrust_exec_check_disable__
216
+ template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator, typename StrictWeakOrdering>
217
+ __host__ __device__
218
+ OutputIterator binary_search(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
219
+ ForwardIterator first,
220
+ ForwardIterator last,
221
+ InputIterator values_first,
222
+ InputIterator values_last,
223
+ OutputIterator output,
224
+ StrictWeakOrdering comp)
225
+ {
226
+ using thrust::system::detail::generic::binary_search;
227
+ return binary_search(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, values_first, values_last, output, comp);
228
+ }
229
+
230
+
231
+ //////////////////////
232
+ // Scalar Functions //
233
+ //////////////////////
234
+
235
+ template <typename ForwardIterator, typename LessThanComparable>
236
+ ForwardIterator lower_bound(ForwardIterator first,
237
+ ForwardIterator last,
238
+ const LessThanComparable& value)
239
+ {
240
+ using thrust::system::detail::generic::select_system;
241
+
242
+ typedef typename thrust::iterator_system<ForwardIterator>::type System;
243
+
244
+ System system;
245
+
246
+ return thrust::lower_bound(select_system(system), first, last, value);
247
+ }
248
+
249
+ template <typename ForwardIterator, typename T, typename StrictWeakOrdering>
250
+ ForwardIterator lower_bound(ForwardIterator first,
251
+ ForwardIterator last,
252
+ const T& value,
253
+ StrictWeakOrdering comp)
254
+ {
255
+ using thrust::system::detail::generic::select_system;
256
+
257
+ typedef typename thrust::iterator_system<ForwardIterator>::type System;
258
+
259
+ System system;
260
+
261
+ return thrust::lower_bound(select_system(system), first, last, value, comp);
262
+ }
263
+
264
+ template <typename ForwardIterator, typename LessThanComparable>
265
+ ForwardIterator upper_bound(ForwardIterator first,
266
+ ForwardIterator last,
267
+ const LessThanComparable& value)
268
+ {
269
+ using thrust::system::detail::generic::select_system;
270
+
271
+ typedef typename thrust::iterator_system<ForwardIterator>::type System;
272
+
273
+ System system;
274
+
275
+ return thrust::upper_bound(select_system(system), first, last, value);
276
+ }
277
+
278
+ template <typename ForwardIterator, typename T, typename StrictWeakOrdering>
279
+ ForwardIterator upper_bound(ForwardIterator first,
280
+ ForwardIterator last,
281
+ const T& value,
282
+ StrictWeakOrdering comp)
283
+ {
284
+ using thrust::system::detail::generic::select_system;
285
+
286
+ typedef typename thrust::iterator_system<ForwardIterator>::type System;
287
+
288
+ System system;
289
+
290
+ return thrust::upper_bound(select_system(system), first, last, value, comp);
291
+ }
292
+
293
+ template <typename ForwardIterator, typename LessThanComparable>
294
+ bool binary_search(ForwardIterator first,
295
+ ForwardIterator last,
296
+ const LessThanComparable& value)
297
+ {
298
+ using thrust::system::detail::generic::select_system;
299
+
300
+ typedef typename thrust::iterator_system<ForwardIterator>::type System;
301
+
302
+ System system;
303
+
304
+ return thrust::binary_search(select_system(system), first, last, value);
305
+ }
306
+
307
+ template <typename ForwardIterator, typename T, typename StrictWeakOrdering>
308
+ bool binary_search(ForwardIterator first,
309
+ ForwardIterator last,
310
+ const T& value,
311
+ StrictWeakOrdering comp)
312
+ {
313
+ using thrust::system::detail::generic::select_system;
314
+
315
+ typedef typename thrust::iterator_system<ForwardIterator>::type System;
316
+
317
+ System system;
318
+
319
+ return thrust::binary_search(select_system(system), first, last, value, comp);
320
+ }
321
+
322
+ template <typename ForwardIterator, typename LessThanComparable>
323
+ thrust::pair<ForwardIterator, ForwardIterator>
324
+ equal_range(ForwardIterator first,
325
+ ForwardIterator last,
326
+ const LessThanComparable& value)
327
+ {
328
+ using thrust::system::detail::generic::select_system;
329
+
330
+ typedef typename thrust::iterator_system<ForwardIterator>::type System;
331
+
332
+ System system;
333
+
334
+ return thrust::equal_range(select_system(system), first, last, value);
335
+ }
336
+
337
+ template <typename ForwardIterator, typename T, typename StrictWeakOrdering>
338
+ thrust::pair<ForwardIterator, ForwardIterator>
339
+ equal_range(ForwardIterator first,
340
+ ForwardIterator last,
341
+ const T& value,
342
+ StrictWeakOrdering comp)
343
+ {
344
+ using thrust::system::detail::generic::select_system;
345
+
346
+ typedef typename thrust::iterator_system<ForwardIterator>::type System;
347
+
348
+ System system;
349
+
350
+ return thrust::equal_range(select_system(system), first, last, value, comp);
351
+ }
352
+
353
+ //////////////////////
354
+ // Vector Functions //
355
+ //////////////////////
356
+
357
+ template <typename ForwardIterator, typename InputIterator, typename OutputIterator>
358
+ OutputIterator lower_bound(ForwardIterator first,
359
+ ForwardIterator last,
360
+ InputIterator values_first,
361
+ InputIterator values_last,
362
+ OutputIterator output)
363
+ {
364
+ using thrust::system::detail::generic::select_system;
365
+
366
+ typedef typename thrust::iterator_system<ForwardIterator>::type System1;
367
+ typedef typename thrust::iterator_system<InputIterator>::type System2;
368
+ typedef typename thrust::iterator_system<OutputIterator>::type System3;
369
+
370
+ System1 system1;
371
+ System2 system2;
372
+ System3 system3;
373
+
374
+ return thrust::lower_bound(select_system(system1,system2,system3), first, last, values_first, values_last, output);
375
+ }
376
+
377
+ template <typename ForwardIterator, typename InputIterator, typename OutputIterator, typename StrictWeakOrdering>
378
+ OutputIterator lower_bound(ForwardIterator first,
379
+ ForwardIterator last,
380
+ InputIterator values_first,
381
+ InputIterator values_last,
382
+ OutputIterator output,
383
+ StrictWeakOrdering comp)
384
+ {
385
+ using thrust::system::detail::generic::select_system;
386
+
387
+ typedef typename thrust::iterator_system<ForwardIterator>::type System1;
388
+ typedef typename thrust::iterator_system<InputIterator>::type System2;
389
+ typedef typename thrust::iterator_system<OutputIterator>::type System3;
390
+
391
+ System1 system1;
392
+ System2 system2;
393
+ System3 system3;
394
+
395
+ return thrust::lower_bound(select_system(system1,system2,system3), first, last, values_first, values_last, output, comp);
396
+ }
397
+
398
+ template <typename ForwardIterator, typename InputIterator, typename OutputIterator>
399
+ OutputIterator upper_bound(ForwardIterator first,
400
+ ForwardIterator last,
401
+ InputIterator values_first,
402
+ InputIterator values_last,
403
+ OutputIterator output)
404
+ {
405
+ using thrust::system::detail::generic::select_system;
406
+
407
+ typedef typename thrust::iterator_system<ForwardIterator>::type System1;
408
+ typedef typename thrust::iterator_system<InputIterator>::type System2;
409
+ typedef typename thrust::iterator_system<OutputIterator>::type System3;
410
+
411
+ System1 system1;
412
+ System2 system2;
413
+ System3 system3;
414
+
415
+ return thrust::upper_bound(select_system(system1,system2,system3), first, last, values_first, values_last, output);
416
+ }
417
+
418
+ template <typename ForwardIterator, typename InputIterator, typename OutputIterator, typename StrictWeakOrdering>
419
+ OutputIterator upper_bound(ForwardIterator first,
420
+ ForwardIterator last,
421
+ InputIterator values_first,
422
+ InputIterator values_last,
423
+ OutputIterator output,
424
+ StrictWeakOrdering comp)
425
+ {
426
+ using thrust::system::detail::generic::select_system;
427
+
428
+ typedef typename thrust::iterator_system<ForwardIterator>::type System1;
429
+ typedef typename thrust::iterator_system<InputIterator>::type System2;
430
+ typedef typename thrust::iterator_system<OutputIterator>::type System3;
431
+
432
+ System1 system1;
433
+ System2 system2;
434
+ System3 system3;
435
+
436
+ return thrust::upper_bound(select_system(system1,system2,system3), first, last, values_first, values_last, output, comp);
437
+ }
438
+
439
+ template <typename ForwardIterator, typename InputIterator, typename OutputIterator>
440
+ OutputIterator binary_search(ForwardIterator first,
441
+ ForwardIterator last,
442
+ InputIterator values_first,
443
+ InputIterator values_last,
444
+ OutputIterator output)
445
+ {
446
+ using thrust::system::detail::generic::select_system;
447
+
448
+ typedef typename thrust::iterator_system<ForwardIterator>::type System1;
449
+ typedef typename thrust::iterator_system<InputIterator>::type System2;
450
+ typedef typename thrust::iterator_system<OutputIterator>::type System3;
451
+
452
+ System1 system1;
453
+ System2 system2;
454
+ System3 system3;
455
+
456
+ return thrust::binary_search(select_system(system1,system2,system3), first, last, values_first, values_last, output);
457
+ }
458
+
459
+ template <typename ForwardIterator, typename InputIterator, typename OutputIterator, typename StrictWeakOrdering>
460
+ OutputIterator binary_search(ForwardIterator first,
461
+ ForwardIterator last,
462
+ InputIterator values_first,
463
+ InputIterator values_last,
464
+ OutputIterator output,
465
+ StrictWeakOrdering comp)
466
+ {
467
+ using thrust::system::detail::generic::select_system;
468
+
469
+ typedef typename thrust::iterator_system<ForwardIterator>::type System1;
470
+ typedef typename thrust::iterator_system<InputIterator>::type System2;
471
+ typedef typename thrust::iterator_system<OutputIterator>::type System3;
472
+
473
+ System1 system1;
474
+ System2 system2;
475
+ System3 system3;
476
+
477
+ return thrust::binary_search(select_system(system1,system2,system3), first, last, values_first, values_last, output, comp);
478
+ }
479
+
480
+ THRUST_NAMESPACE_END
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+ /*! \file config.h
17
+ * \brief Defines platform configuration.
18
+ */
19
+
20
+ #pragma once
21
+
22
+ #include <thrust/version.h>
23
+ #include <thrust/detail/config/config.h>
24
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/compiler.h ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ /*! \file compiler.h
18
+ * \brief Compiler-specific configuration
19
+ */
20
+
21
+ #pragma once
22
+
23
+ // enumerate host compilers we know about
24
+ #define THRUST_HOST_COMPILER_UNKNOWN 0
25
+ #define THRUST_HOST_COMPILER_MSVC 1
26
+ #define THRUST_HOST_COMPILER_GCC 2
27
+ #define THRUST_HOST_COMPILER_CLANG 3
28
+ #define THRUST_HOST_COMPILER_INTEL 4
29
+
30
+ // enumerate device compilers we know about
31
+ #define THRUST_DEVICE_COMPILER_UNKNOWN 0
32
+ #define THRUST_DEVICE_COMPILER_MSVC 1
33
+ #define THRUST_DEVICE_COMPILER_GCC 2
34
+ #define THRUST_DEVICE_COMPILER_CLANG 3
35
+ #define THRUST_DEVICE_COMPILER_NVCC 4
36
+
37
+ // figure out which host compiler we're using
38
+ // XXX we should move the definition of THRUST_DEPRECATED out of this logic
39
+ #if defined(_MSC_VER)
40
+ #define THRUST_HOST_COMPILER THRUST_HOST_COMPILER_MSVC
41
+ #define THRUST_MSVC_VERSION _MSC_VER
42
+ #define THRUST_MSVC_VERSION_FULL _MSC_FULL_VER
43
+ #elif defined(__ICC)
44
+ #define THRUST_HOST_COMPILER THRUST_HOST_COMPILER_INTEL
45
+ #elif defined(__clang__)
46
+ #define THRUST_HOST_COMPILER THRUST_HOST_COMPILER_CLANG
47
+ #define THRUST_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)
48
+ #elif defined(__GNUC__)
49
+ #define THRUST_HOST_COMPILER THRUST_HOST_COMPILER_GCC
50
+ #define THRUST_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
51
+ #if (THRUST_GCC_VERSION >= 50000)
52
+ #define THRUST_MODERN_GCC
53
+ #else
54
+ #define THRUST_LEGACY_GCC
55
+ #endif
56
+ #else
57
+ #define THRUST_HOST_COMPILER THRUST_HOST_COMPILER_UNKNOWN
58
+ #endif // THRUST_HOST_COMPILER
59
+
60
+ // figure out which device compiler we're using
61
+ #if defined(__CUDACC__) || defined(_NVHPC_CUDA)
62
+ #define THRUST_DEVICE_COMPILER THRUST_DEVICE_COMPILER_NVCC
63
+ #elif THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC
64
+ #define THRUST_DEVICE_COMPILER THRUST_DEVICE_COMPILER_MSVC
65
+ #elif THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC
66
+ #define THRUST_DEVICE_COMPILER THRUST_DEVICE_COMPILER_GCC
67
+ #elif THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_CLANG
68
+ // CUDA-capable clang should behave similar to NVCC.
69
+ #if defined(__CUDA__)
70
+ #define THRUST_DEVICE_COMPILER THRUST_DEVICE_COMPILER_NVCC
71
+ #else
72
+ #define THRUST_DEVICE_COMPILER THRUST_DEVICE_COMPILER_CLANG
73
+ #endif
74
+ #else
75
+ #define THRUST_DEVICE_COMPILER THRUST_DEVICE_COMPILER_UNKNOWN
76
+ #endif
77
+
78
+ // is the device compiler capable of compiling omp?
79
+ #if defined(_OPENMP) || defined(_NVHPC_STDPAR_OPENMP)
80
+ #define THRUST_DEVICE_COMPILER_IS_OMP_CAPABLE THRUST_TRUE
81
+ #else
82
+ #define THRUST_DEVICE_COMPILER_IS_OMP_CAPABLE THRUST_FALSE
83
+ #endif // _OPENMP
84
+
85
+
86
+ #if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC) && !defined(__CUDA_ARCH__)
87
+ #define THRUST_DISABLE_MSVC_WARNING_BEGIN(x) \
88
+ __pragma(warning(push)) \
89
+ __pragma(warning(disable : x)) \
90
+ /**/
91
+ #define THRUST_DISABLE_MSVC_WARNING_END(x) \
92
+ __pragma(warning(pop)) \
93
+ /**/
94
+ #else
95
+ #define THRUST_DISABLE_MSVC_WARNING_BEGIN(x)
96
+ #define THRUST_DISABLE_MSVC_WARNING_END(x)
97
+ #endif
98
+
99
+ #if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_CLANG) && !defined(__CUDA_ARCH__)
100
+ #define THRUST_IGNORE_CLANG_WARNING_IMPL(x) \
101
+ THRUST_PP_STRINGIZE(clang diagnostic ignored x) \
102
+ /**/
103
+ #define THRUST_IGNORE_CLANG_WARNING(x) \
104
+ THRUST_IGNORE_CLANG_WARNING_IMPL(THRUST_PP_STRINGIZE(x)) \
105
+ /**/
106
+
107
+ #define THRUST_DISABLE_CLANG_WARNING_BEGIN(x) \
108
+ _Pragma("clang diagnostic push") \
109
+ _Pragma(THRUST_IGNORE_CLANG_WARNING(x)) \
110
+ /**/
111
+ #define THRUST_DISABLE_CLANG_WARNING_END(x) \
112
+ _Pragma("clang diagnostic pop") \
113
+ /**/
114
+ #else
115
+ #define THRUST_DISABLE_CLANG_WARNING_BEGIN(x)
116
+ #define THRUST_DISABLE_CLANG_WARNING_END(x)
117
+ #endif
118
+
119
+ #if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) && !defined(__CUDA_ARCH__)
120
+ #define THRUST_IGNORE_GCC_WARNING_IMPL(x) \
121
+ THRUST_PP_STRINGIZE(GCC diagnostic ignored x) \
122
+ /**/
123
+ #define THRUST_IGNORE_GCC_WARNING(x) \
124
+ THRUST_IGNORE_GCC_WARNING_IMPL(THRUST_PP_STRINGIZE(x)) \
125
+ /**/
126
+
127
+ #define THRUST_DISABLE_GCC_WARNING_BEGIN(x) \
128
+ _Pragma("GCC diagnostic push") \
129
+ _Pragma(THRUST_IGNORE_GCC_WARNING(x)) \
130
+ /**/
131
+ #define THRUST_DISABLE_GCC_WARNING_END(x) \
132
+ _Pragma("GCC diagnostic pop") \
133
+ /**/
134
+ #else
135
+ #define THRUST_DISABLE_GCC_WARNING_BEGIN(x)
136
+ #define THRUST_DISABLE_GCC_WARNING_END(x)
137
+ #endif
138
+
139
+ #define THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN \
140
+ THRUST_DISABLE_MSVC_WARNING_BEGIN(4244 4267) \
141
+ /**/
142
+ #define THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END \
143
+ THRUST_DISABLE_MSVC_WARNING_END(4244 4267) \
144
+ /**/
145
+ #define THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING(x) \
146
+ THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN \
147
+ x; \
148
+ THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END \
149
+ /**/
150
+
151
+ #define THRUST_DISABLE_MSVC_FORCING_VALUE_TO_BOOL_WARNING_BEGIN \
152
+ THRUST_DISABLE_MSVC_WARNING_BEGIN(4800) \
153
+ /**/
154
+ #define THRUST_DISABLE_MSVC_FORCING_VALUE_TO_BOOL_WARNING_END \
155
+ THRUST_DISABLE_MSVC_WARNING_END(4800) \
156
+ /**/
157
+ #define THRUST_DISABLE_MSVC_FORCING_VALUE_TO_BOOL_WARNING(x) \
158
+ THRUST_DISABLE_MSVC_FORCING_VALUE_TO_BOOL_WARNING_BEGIN \
159
+ x; \
160
+ THRUST_DISABLE_MSVC_FORCING_VALUE_TO_BOOL_WARNING_END \
161
+ /**/
162
+
163
+ #define THRUST_DISABLE_CLANG_SELF_ASSIGNMENT_WARNING_BEGIN \
164
+ THRUST_DISABLE_CLANG_WARNING_BEGIN(-Wself-assign) \
165
+ /**/
166
+ #define THRUST_DISABLE_CLANG_SELF_ASSIGNMENT_WARNING_END \
167
+ THRUST_DISABLE_CLANG_WARNING_END(-Wself-assign) \
168
+ /**/
169
+ #define THRUST_DISABLE_CLANG_SELF_ASSIGNMENT_WARNING(x) \
170
+ THRUST_DISABLE_CLANG_SELF_ASSIGNMENT_WARNING_BEGIN \
171
+ x; \
172
+ THRUST_DISABLE_CLANG_SELF_ASSIGNMENT_WARNING_END \
173
+ /**/
174
+
175
+ #define THRUST_DISABLE_CLANG_AND_GCC_INITIALIZER_REORDERING_WARNING_BEGIN \
176
+ THRUST_DISABLE_CLANG_WARNING_BEGIN(-Wreorder) \
177
+ THRUST_DISABLE_GCC_WARNING_BEGIN(-Wreorder) \
178
+ /**/
179
+ #define THRUST_DISABLE_CLANG_AND_GCC_INITIALIZER_REORDERING_WARNING_END \
180
+ THRUST_DISABLE_CLANG_WARNING_END(-Wreorder) \
181
+ THRUST_DISABLE_GCC_WARNING_END(-Wreorder) \
182
+ /**/
183
+ #define THRUST_DISABLE_CLANG_AND_GCC_INITIALIZER_REORDERING_WARNING(x) \
184
+ THRUST_DISABLE_CLANG_AND_GCC_INITIALIZER_REORDERING_WARNING_BEGIN \
185
+ x; \
186
+ THRUST_DISABLE_CLANG_AND_GCC_INITIALIZER_REORDERING_WARNING_END \
187
+ /**/
188
+
189
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/compiler_fence.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/preprocessor.h>
21
+
22
+ // TODO: Enable this or remove this file once nvGRAPH/CUSP migrates off of it.
23
+ //#if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC
24
+ // #pragma message("warning: The functionality in this header is unsafe, deprecated, and will soon be removed. Use C++11 or C11 atomics instead.")
25
+ //#else
26
+ // #warning The functionality in this header is unsafe, deprecated, and will soon be removed. Use C++11 or C11 atomics instead.
27
+ //#endif
28
+
29
+ // msvc case
30
+ #if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC
31
+
32
+ #ifndef _DEBUG
33
+
34
+ #include <intrin.h>
35
+ #pragma intrinsic(_ReadWriteBarrier)
36
+ #define __thrust_compiler_fence() _ReadWriteBarrier()
37
+ #else
38
+
39
+ #define __thrust_compiler_fence() do {} while (0)
40
+
41
+ #endif // _DEBUG
42
+
43
+ // gcc case
44
+ #elif THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC
45
+
46
+ #if THRUST_GCC_VERSION >= 40200 // atomic built-ins were introduced ~4.2
47
+ #define __thrust_compiler_fence() __sync_synchronize()
48
+ #else
49
+ // allow the code to compile without any guarantees
50
+ #define __thrust_compiler_fence() do {} while (0)
51
+ #endif // THRUST_GCC_VERSION
52
+
53
+ // unknown case
54
+ #elif THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_CLANG
55
+ #define __thrust_compiler_fence() __sync_synchronize()
56
+ #elif THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_UNKNOWN
57
+
58
+ // allow the code to compile without any guarantees
59
+ #define __thrust_compiler_fence() do {} while (0)
60
+
61
+ #endif
62
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/config.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ /*! \file config.h
18
+ * \brief Defines platform configuration.
19
+ */
20
+
21
+ #pragma once
22
+
23
+ // NOTE: The order of these #includes matters.
24
+
25
+ #include <thrust/detail/config/simple_defines.h>
26
+ #include <thrust/detail/config/compiler.h>
27
+ #include <thrust/detail/config/cpp_dialect.h>
28
+ #include <thrust/detail/config/cpp_compatibility.h>
29
+ #include <thrust/detail/config/deprecated.h>
30
+ // host_system.h & device_system.h must be #included as early as possible
31
+ // because other config headers depend on it
32
+ #include <thrust/detail/config/host_system.h>
33
+ #include <thrust/detail/config/device_system.h>
34
+ #include <thrust/detail/config/host_device.h>
35
+ #include <thrust/detail/config/debug.h>
36
+ #include <thrust/detail/config/forceinline.h>
37
+ #include <thrust/detail/config/exec_check_disable.h>
38
+ #include <thrust/detail/config/global_workarounds.h>
39
+ #include <thrust/detail/config/namespace.h>
40
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/cpp_compatibility.h ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2018 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config/cpp_dialect.h>
20
+
21
+ #include <cstddef>
22
+
23
+ #ifndef __has_cpp_attribute
24
+ # define __has_cpp_attribute(X) 0
25
+ #endif
26
+
27
+ // Trailing return types seem to confuse Doxygen, and cause it to interpret
28
+ // parts of the function's body as new function signatures.
29
+ #if defined(THRUST_DOXYGEN)
30
+ # define THRUST_TRAILING_RETURN(...)
31
+ #else
32
+ # define THRUST_TRAILING_RETURN(...) -> __VA_ARGS__
33
+ #endif
34
+
35
+ #if THRUST_CPP_DIALECT >= 2014 && __has_cpp_attribute(nodiscard)
36
+ # define THRUST_NODISCARD [[nodiscard]]
37
+ #else
38
+ # define THRUST_NODISCARD
39
+ #endif
40
+
41
+ #if THRUST_CPP_DIALECT >= 2017 && __cpp_if_constexpr
42
+ # define THRUST_IF_CONSTEXPR if constexpr
43
+ #else
44
+ # define THRUST_IF_CONSTEXPR if
45
+ #endif
46
+
47
+ // FIXME: Combine THRUST_INLINE_CONSTANT and
48
+ // THRUST_INLINE_INTEGRAL_MEMBER_CONSTANT into one macro when NVCC properly
49
+ // supports `constexpr` globals in host and device code.
50
+ #if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA)
51
+ // FIXME: Add this when NVCC supports inline variables.
52
+ //# if THRUST_CPP_DIALECT >= 2017
53
+ //# define THRUST_INLINE_CONSTANT inline constexpr
54
+ //# define THRUST_INLINE_INTEGRAL_MEMBER_CONSTANT inline constexpr
55
+ # if THRUST_CPP_DIALECT >= 2011
56
+ # define THRUST_INLINE_CONSTANT static const __device__
57
+ # define THRUST_INLINE_INTEGRAL_MEMBER_CONSTANT static constexpr
58
+ # else
59
+ # define THRUST_INLINE_CONSTANT static const __device__
60
+ # define THRUST_INLINE_INTEGRAL_MEMBER_CONSTANT static const
61
+ # endif
62
+ #else
63
+ // FIXME: Add this when NVCC supports inline variables.
64
+ //# if THRUST_CPP_DIALECT >= 2017
65
+ //# define THRUST_INLINE_CONSTANT inline constexpr
66
+ //# define THRUST_INLINE_INTEGRAL_MEMBER_CONSTANT inline constexpr
67
+ # if THRUST_CPP_DIALECT >= 2011
68
+ # define THRUST_INLINE_CONSTANT static constexpr
69
+ # define THRUST_INLINE_INTEGRAL_MEMBER_CONSTANT static constexpr
70
+ # else
71
+ # define THRUST_INLINE_CONSTANT static const
72
+ # define THRUST_INLINE_INTEGRAL_MEMBER_CONSTANT static const
73
+ # endif
74
+ #endif
75
+
76
+ // These definitions were intended for internal use only and are now obsolete.
77
+ // If you relied on them, consider porting your code to use the functionality
78
+ // in libcu++'s <nv/target> header.
79
+ // For a temporary workaround, define THRUST_PROVIDE_LEGACY_ARCH_MACROS to make
80
+ // them available again. These should be considered deprecated and will be
81
+ // fully removed in a future version.
82
+ #ifdef THRUST_PROVIDE_LEGACY_ARCH_MACROS
83
+ #ifndef THRUST_IS_DEVICE_CODE
84
+ #if defined(_NVHPC_CUDA)
85
+ #define THRUST_IS_DEVICE_CODE __builtin_is_device_code()
86
+ #define THRUST_IS_HOST_CODE (!__builtin_is_device_code())
87
+ #define THRUST_INCLUDE_DEVICE_CODE 1
88
+ #define THRUST_INCLUDE_HOST_CODE 1
89
+ #elif defined(__CUDA_ARCH__)
90
+ #define THRUST_IS_DEVICE_CODE 1
91
+ #define THRUST_IS_HOST_CODE 0
92
+ #define THRUST_INCLUDE_DEVICE_CODE 1
93
+ #define THRUST_INCLUDE_HOST_CODE 0
94
+ #else
95
+ #define THRUST_IS_DEVICE_CODE 0
96
+ #define THRUST_IS_HOST_CODE 1
97
+ #define THRUST_INCLUDE_DEVICE_CODE 0
98
+ #define THRUST_INCLUDE_HOST_CODE 1
99
+ #endif
100
+ #endif
101
+ #endif // THRUST_PROVIDE_LEGACY_ARCH_MACROS
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/cpp_dialect.h ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ /*! \file cpp_dialect.h
18
+ * \brief Detect the version of the C++ standard used by the compiler.
19
+ */
20
+
21
+ #pragma once
22
+
23
+ #include <thrust/detail/config/compiler.h>
24
+
25
+ // Deprecation warnings may be silenced by defining the following macros. These
26
+ // may be combined.
27
+ // - THRUST_IGNORE_DEPRECATED_CPP_DIALECT:
28
+ // Ignore all deprecated C++ dialects and outdated compilers.
29
+ // - THRUST_IGNORE_DEPRECATED_CPP_11:
30
+ // Ignore deprecation warnings when compiling with C++11. C++03 and outdated
31
+ // compilers will still issue warnings.
32
+ // - THRUST_IGNORE_DEPRECATED_COMPILER
33
+ // Ignore deprecation warnings when using deprecated compilers. Compiling
34
+ // with C++03 and C++11 will still issue warnings.
35
+
36
+ // Check for the CUB opt-outs as well:
37
+ #if !defined(THRUST_IGNORE_DEPRECATED_CPP_DIALECT) && \
38
+ defined(CUB_IGNORE_DEPRECATED_CPP_DIALECT)
39
+ # define THRUST_IGNORE_DEPRECATED_CPP_DIALECT
40
+ #endif
41
+ #if !defined(THRUST_IGNORE_DEPRECATED_CPP_11) && \
42
+ defined(CUB_IGNORE_DEPRECATED_CPP_11)
43
+ # define THRUST_IGNORE_DEPRECATED_CPP_11
44
+ #endif
45
+ #if !defined(THRUST_IGNORE_DEPRECATED_COMPILER) && \
46
+ defined(CUB_IGNORE_DEPRECATED_COMPILER)
47
+ # define THRUST_IGNORE_DEPRECATED_COMPILER
48
+ #endif
49
+
50
+ #ifdef THRUST_IGNORE_DEPRECATED_CPP_DIALECT
51
+ # define THRUST_IGNORE_DEPRECATED_CPP_11
52
+ # define THRUST_IGNORE_DEPRECATED_COMPILER
53
+ #endif
54
+
55
+ // Define this to override the built-in detection.
56
+ #ifndef THRUST_CPP_DIALECT
57
+
58
+ // MSVC does not define __cplusplus correctly. _MSVC_LANG is used instead.
59
+ // This macro is only defined in MSVC 2015U3+.
60
+ # ifdef _MSVC_LANG // Do not replace with THRUST_HOST_COMPILER test (see above)
61
+ // MSVC2015 reports C++14 but lacks extended constexpr support. Treat as C++11.
62
+ # if THRUST_MSVC_VERSION < 1910 && _MSVC_LANG > 201103L /* MSVC < 2017 && CPP > 2011 */
63
+ # define THRUST_CPLUSPLUS 201103L /* Fix to 2011 */
64
+ # else
65
+ # define THRUST_CPLUSPLUS _MSVC_LANG /* We'll trust this for now. */
66
+ # endif // MSVC 2015 C++14 fix
67
+ # else
68
+ # define THRUST_CPLUSPLUS __cplusplus
69
+ # endif
70
+
71
+ // Detect current dialect:
72
+ # if THRUST_CPLUSPLUS < 201103L
73
+ # define THRUST_CPP_DIALECT 2003
74
+ # elif THRUST_CPLUSPLUS < 201402L
75
+ # define THRUST_CPP_DIALECT 2011
76
+ # elif THRUST_CPLUSPLUS < 201703L
77
+ # define THRUST_CPP_DIALECT 2014
78
+ # elif THRUST_CPLUSPLUS == 201703L
79
+ # define THRUST_CPP_DIALECT 2017
80
+ # elif THRUST_CPLUSPLUS > 201703L // unknown, but is higher than 2017.
81
+ # define THRUST_CPP_DIALECT 2020
82
+ # endif
83
+
84
+ # undef THRUST_CPLUSPLUS // cleanup
85
+
86
+ #endif // !THRUST_CPP_DIALECT
87
+
88
+ // Define THRUST_COMPILER_DEPRECATION macro:
89
+ #if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC
90
+ # define THRUST_COMP_DEPR_IMPL(msg) \
91
+ __pragma(message(__FILE__ ":" THRUST_COMP_DEPR_IMPL0(__LINE__) ": warning: " #msg))
92
+ # define THRUST_COMP_DEPR_IMPL0(x) THRUST_COMP_DEPR_IMPL1(x)
93
+ # define THRUST_COMP_DEPR_IMPL1(x) #x
94
+ #else // clang / gcc:
95
+ # define THRUST_COMP_DEPR_IMPL(msg) THRUST_COMP_DEPR_IMPL0(GCC warning #msg)
96
+ # define THRUST_COMP_DEPR_IMPL0(expr) _Pragma(#expr)
97
+ # define THRUST_COMP_DEPR_IMPL1 /* intentionally blank */
98
+ #endif
99
+
100
+ #define THRUST_COMPILER_DEPRECATION(REQ) \
101
+ THRUST_COMP_DEPR_IMPL(Thrust requires at least REQ. Define THRUST_IGNORE_DEPRECATED_CPP_DIALECT to suppress this message.)
102
+
103
+ #define THRUST_COMPILER_DEPRECATION_SOFT(REQ, CUR) \
104
+ THRUST_COMP_DEPR_IMPL(Thrust requires at least REQ. CUR is deprecated but still supported. CUR support will be removed in a future release. Define THRUST_IGNORE_DEPRECATED_CPP_DIALECT to suppress this message.)
105
+
106
+ #ifndef THRUST_IGNORE_DEPRECATED_COMPILER
107
+
108
+ // Compiler checks:
109
+ # if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC && THRUST_GCC_VERSION < 50000
110
+ THRUST_COMPILER_DEPRECATION(GCC 5.0);
111
+ # elif THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_CLANG && THRUST_CLANG_VERSION < 70000
112
+ THRUST_COMPILER_DEPRECATION(Clang 7.0);
113
+ # elif THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC && THRUST_MSVC_VERSION < 1910
114
+ // <2017. Hard upgrade message:
115
+ THRUST_COMPILER_DEPRECATION(MSVC 2019 (19.20/16.0/14.20));
116
+ # elif THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC && THRUST_MSVC_VERSION < 1920
117
+ // >=2017, <2019. Soft deprecation message:
118
+ THRUST_COMPILER_DEPRECATION_SOFT(MSVC 2019 (19.20/16.0/14.20), MSVC 2017);
119
+ # endif
120
+
121
+ #endif // THRUST_IGNORE_DEPRECATED_COMPILER
122
+
123
+ #ifndef THRUST_IGNORE_DEPRECATED_DIALECT
124
+
125
+ // Dialect checks:
126
+ # if THRUST_CPP_DIALECT < 2011
127
+ // <C++11. Hard upgrade message:
128
+ THRUST_COMPILER_DEPRECATION(C++14);
129
+ # elif THRUST_CPP_DIALECT == 2011 && !defined(THRUST_IGNORE_DEPRECATED_CPP_11)
130
+ // =C++11. Soft upgrade message:
131
+ THRUST_COMPILER_DEPRECATION_SOFT(C++14, C++11);
132
+ # endif
133
+
134
+ #endif // THRUST_IGNORE_DEPRECATED_DIALECT
135
+
136
+ #undef THRUST_COMPILER_DEPRECATION_SOFT
137
+ #undef THRUST_COMPILER_DEPRECATION
138
+ #undef THRUST_COMP_DEPR_IMPL
139
+ #undef THRUST_COMP_DEPR_IMPL0
140
+ #undef THRUST_COMP_DEPR_IMPL1
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/deprecated.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2018-2020 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ /*! \file deprecated.h
18
+ * \brief Defines the THRUST_DEPRECATED macro
19
+ */
20
+
21
+ #pragma once
22
+
23
+ #include <thrust/detail/config/compiler.h>
24
+ #include <thrust/detail/config/cpp_dialect.h>
25
+
26
+ #if defined(CUB_IGNORE_DEPRECATED_API) && !defined(THRUST_IGNORE_DEPRECATED_API)
27
+ # define THRUST_IGNORE_DEPRECATED_API
28
+ #endif
29
+
30
+ #ifdef THRUST_IGNORE_DEPRECATED_API
31
+ # define THRUST_DEPRECATED
32
+ #elif THRUST_CPP_DIALECT >= 2014
33
+ # define THRUST_DEPRECATED [[deprecated]]
34
+ #elif THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC
35
+ # define THRUST_DEPRECATED __declspec(deprecated)
36
+ #elif THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_CLANG
37
+ # define THRUST_DEPRECATED __attribute__((deprecated))
38
+ #elif THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC
39
+ # define THRUST_DEPRECATED __attribute__((deprecated))
40
+ #else
41
+ # define THRUST_DEPRECATED
42
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/device_system.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ // reserve 0 for undefined
20
+ #define THRUST_DEVICE_SYSTEM_CUDA 1
21
+ #define THRUST_DEVICE_SYSTEM_OMP 2
22
+ #define THRUST_DEVICE_SYSTEM_TBB 3
23
+ #define THRUST_DEVICE_SYSTEM_CPP 4
24
+
25
+ #ifndef THRUST_DEVICE_SYSTEM
26
+ #define THRUST_DEVICE_SYSTEM THRUST_DEVICE_SYSTEM_CUDA
27
+ #endif // THRUST_DEVICE_SYSTEM
28
+
29
+ #ifdef THRUST_DEVICE_BACKEND
30
+ # error THRUST_DEVICE_BACKEND is no longer supported; use THRUST_DEVICE_SYSTEM instead.
31
+ #endif // THRUST_DEVICE_BACKEND
32
+
33
+ #if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
34
+ #define __THRUST_DEVICE_SYSTEM_NAMESPACE cuda
35
+ #elif THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_OMP
36
+ #define __THRUST_DEVICE_SYSTEM_NAMESPACE omp
37
+ #elif THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_TBB
38
+ #define __THRUST_DEVICE_SYSTEM_NAMESPACE tbb
39
+ #elif THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CPP
40
+ #define __THRUST_DEVICE_SYSTEM_NAMESPACE cpp
41
+ #endif
42
+
43
+ #define __THRUST_DEVICE_SYSTEM_ROOT thrust/system/__THRUST_DEVICE_SYSTEM_NAMESPACE
44
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/exec_check_disable.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ /*! \file exec_check_disable.h
18
+ * \brief Defines __thrust_exec_check_disable__
19
+ */
20
+
21
+ #pragma once
22
+
23
+ #include <thrust/detail/config.h>
24
+
25
+ // #pragma nv_exec_check_disable is only recognized by NVCC. Having a macro
26
+ // expand to a #pragma (rather than _Pragma) only works with NVCC's compilation
27
+ // model, not with other compilers.
28
+ #if defined(__CUDACC__) && !defined(_NVHPC_CUDA) && \
29
+ !(defined(__CUDA__) && defined(__clang__))
30
+
31
+ #if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC
32
+ #define __thrust_exec_check_disable__ __pragma("nv_exec_check_disable")
33
+ #else // MSVC
34
+ #define __thrust_exec_check_disable__ _Pragma("nv_exec_check_disable")
35
+ #endif // MSVC
36
+
37
+ #else
38
+
39
+ #define __thrust_exec_check_disable__
40
+
41
+ #endif
42
+
43
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/forceinline.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ /*! \file forceinline.h
18
+ * \brief Defines __thrust_forceinline__
19
+ */
20
+
21
+ #pragma once
22
+
23
+ #include <thrust/detail/config.h>
24
+
25
+ #if defined(__CUDACC__) || defined(_NVHPC_CUDA)
26
+
27
+ #define __thrust_forceinline__ __forceinline__
28
+
29
+ #else
30
+
31
+ // TODO add
32
+
33
+ #define __thrust_forceinline__
34
+
35
+ #endif
36
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/global_workarounds.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config/compiler.h>
20
+
21
+ // XXX workaround gcc 4.8+'s complaints about unused local typedefs by silencing them globally
22
+ #if defined(THRUST_GCC_VERSION) && (THRUST_GCC_VERSION >= 40800)
23
+ # if defined(__NVCC__) && (CUDART_VERSION >= 6000)
24
+ # pragma GCC diagnostic ignored "-Wunused-local-typedefs"
25
+ # endif // nvcc & cuda 6+
26
+ #endif // gcc 4.8
27
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/host_device.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ /*! \file host_device.h
18
+ * \brief Defines __host__ and __device__
19
+ */
20
+
21
+ #pragma once
22
+
23
+ #include <thrust/detail/config.h>
24
+
25
+ // since nvcc defines __host__ and __device__ for us,
26
+ // and only nvcc knows what to do with __host__ and __device__,
27
+ // define them to be the empty string for other compilers
28
+
29
+ #if THRUST_DEVICE_COMPILER != THRUST_DEVICE_COMPILER_NVCC
30
+
31
+ // since __host__ & __device__ might have already be defined, only
32
+ // #define them if not defined already
33
+ // XXX this will break if the client does #include <host_defines.h> later
34
+
35
+ #ifndef __host__
36
+ #define __host__
37
+ #endif // __host__
38
+
39
+ #ifndef __device__
40
+ #define __device__
41
+ #endif // __device__
42
+
43
+ #endif
44
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/host_system.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ // reserve 0 for undefined
20
+ #define THRUST_HOST_SYSTEM_CPP 1
21
+ #define THRUST_HOST_SYSTEM_OMP 2
22
+ #define THRUST_HOST_SYSTEM_TBB 3
23
+
24
+ #ifndef THRUST_HOST_SYSTEM
25
+ #define THRUST_HOST_SYSTEM THRUST_HOST_SYSTEM_CPP
26
+ #endif // THRUST_HOST_SYSTEM
27
+
28
+ #ifdef THRUST_HOST_BACKEND
29
+ # error THRUST_HOST_BACKEND is no longer supported; use THRUST_HOST_SYSTEM instead.
30
+ #endif // THRUST_HOST_BACKEND
31
+
32
+ #if THRUST_HOST_SYSTEM == THRUST_HOST_SYSTEM_CPP
33
+ #define __THRUST_HOST_SYSTEM_NAMESPACE cpp
34
+ #elif THRUST_HOST_SYSTEM == THRUST_HOST_SYSTEM_OMP
35
+ #define __THRUST_HOST_SYSTEM_NAMESPACE omp
36
+ #elif THRUST_HOST_SYSTEM == THRUST_HOST_SYSTEM_TBB
37
+ #define __THRUST_HOST_SYSTEM_NAMESPACE tbb
38
+ #endif
39
+
40
+ #define __THRUST_HOST_SYSTEM_ROOT thrust/system/__THRUST_HOST_SYSTEM_NAMESPACE
41
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/memory_resource.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2018 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <cstddef>
20
+
21
+ #include <thrust/detail/config.h>
22
+ #include <thrust/detail/alignment.h>
23
+ #include <thrust/detail/config/cpp_compatibility.h>
24
+
25
+ #define THRUST_MR_DEFAULT_ALIGNMENT THRUST_ALIGNOF(THRUST_NS_QUALIFIER::detail::max_align_t)
26
+
27
+ #if THRUST_CPP_DIALECT >= 2017
28
+ # if __has_include(<memory_resource>)
29
+ # define THRUST_MR_STD_MR_HEADER <memory_resource>
30
+ # define THRUST_MR_STD_MR_NS std::pmr
31
+ # elif __has_include(<experimental/memory_resource>)
32
+ # define THRUST_MR_STD_MR_HEADER <experimental/memory_resource>
33
+ # define THRUST_MR_STD_MR_NS std::experimental::pmr
34
+ # endif
35
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/namespace.h ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2021 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ /**
20
+ * \file namespace.h
21
+ * \brief Utilities that allow `thrust::` to be placed inside an
22
+ * application-specific namespace.
23
+ */
24
+
25
+ /**
26
+ * \def THRUST_CUB_WRAPPED_NAMESPACE
27
+ * If defined, this value will be used as the name of a namespace that wraps the
28
+ * `thrust::` and `cub::` namespaces.
29
+ * This macro should not be used with any other Thrust namespace macros.
30
+ */
31
+ #ifdef THRUST_CUB_WRAPPED_NAMESPACE
32
+ #define THRUST_WRAPPED_NAMESPACE THRUST_CUB_WRAPPED_NAMESPACE
33
+ #endif
34
+
35
+ /**
36
+ * \def THRUST_WRAPPED_NAMESPACE
37
+ * If defined, this value will be used as the name of a namespace that wraps the
38
+ * `thrust::` namespace.
39
+ * If THRUST_CUB_WRAPPED_NAMESPACE is set, this will inherit that macro's value.
40
+ * This macro should not be used with any other Thrust namespace macros.
41
+ */
42
+ #ifdef THRUST_WRAPPED_NAMESPACE
43
+ #define THRUST_NS_PREFIX \
44
+ namespace THRUST_WRAPPED_NAMESPACE \
45
+ {
46
+
47
+ #define THRUST_NS_POSTFIX }
48
+
49
+ #define THRUST_NS_QUALIFIER ::THRUST_WRAPPED_NAMESPACE::thrust
50
+ #endif
51
+
52
+ /**
53
+ * \def THRUST_NS_PREFIX
54
+ * This macro is inserted prior to all `namespace thrust { ... }` blocks. It is
55
+ * derived from THRUST_WRAPPED_NAMESPACE, if set, and will be empty otherwise.
56
+ * It may be defined by users, in which case THRUST_NS_PREFIX,
57
+ * THRUST_NS_POSTFIX, and THRUST_NS_QUALIFIER must all be set consistently.
58
+ */
59
+ #ifndef THRUST_NS_PREFIX
60
+ #define THRUST_NS_PREFIX
61
+ #endif
62
+
63
+ /**
64
+ * \def THRUST_NS_POSTFIX
65
+ * This macro is inserted following the closing braces of all
66
+ * `namespace thrust { ... }` block. It is defined appropriately when
67
+ * THRUST_WRAPPED_NAMESPACE is set, and will be empty otherwise. It may be
68
+ * defined by users, in which case THRUST_NS_PREFIX, THRUST_NS_POSTFIX, and
69
+ * THRUST_NS_QUALIFIER must all be set consistently.
70
+ */
71
+ #ifndef THRUST_NS_POSTFIX
72
+ #define THRUST_NS_POSTFIX
73
+ #endif
74
+
75
+ /**
76
+ * \def THRUST_NS_QUALIFIER
77
+ * This macro is used to qualify members of thrust:: when accessing them from
78
+ * outside of their namespace. By default, this is just `::thrust`, and will be
79
+ * set appropriately when THRUST_WRAPPED_NAMESPACE is defined. This macro may be
80
+ * defined by users, in which case THRUST_NS_PREFIX, THRUST_NS_POSTFIX, and
81
+ * THRUST_NS_QUALIFIER must all be set consistently.
82
+ */
83
+ #ifndef THRUST_NS_QUALIFIER
84
+ #define THRUST_NS_QUALIFIER ::thrust
85
+ #endif
86
+
87
+ /**
88
+ * \def THRUST_NAMESPACE_BEGIN
89
+ * This macro is used to open a `thrust::` namespace block, along with any
90
+ * enclosing namespaces requested by THRUST_WRAPPED_NAMESPACE, etc.
91
+ * This macro is defined by Thrust and may not be overridden.
92
+ */
93
+ #define THRUST_NAMESPACE_BEGIN \
94
+ THRUST_NS_PREFIX \
95
+ namespace thrust \
96
+ {
97
+
98
+ /**
99
+ * \def THRUST_NAMESPACE_END
100
+ * This macro is used to close a `thrust::` namespace block, along with any
101
+ * enclosing namespaces requested by THRUST_WRAPPED_NAMESPACE, etc.
102
+ * This macro is defined by Thrust and may not be overridden.
103
+ */
104
+ #define THRUST_NAMESPACE_END \
105
+ } /* end namespace thrust */ \
106
+ THRUST_NS_POSTFIX
107
+
108
+ // The following is just here to add docs for the thrust namespace:
109
+
110
+ THRUST_NS_PREFIX
111
+
112
+ /*! \namespace thrust
113
+ * \brief \p thrust is the top-level namespace which contains all Thrust
114
+ * functions and types.
115
+ */
116
+ namespace thrust
117
+ {
118
+ }
119
+
120
+ THRUST_NS_POSTFIX
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/config/simple_defines.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ /*! \file simple_defines.h
18
+ * \brief Primitive macros without dependencies.
19
+ */
20
+
21
+ #pragma once
22
+
23
+ #define THRUST_UNKNOWN 0
24
+ #define THRUST_FALSE 0
25
+ #define THRUST_TRUE 1
26
+
27
+ #define THRUST_UNUSED_VAR(expr) do { (void)(expr); } while (0)
28
+
29
+ #define THRUST_PREVENT_MACRO_SUBSTITUTION
30
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/contiguous_storage.h ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2018 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/iterator/detail/normal_iterator.h>
20
+ #include <thrust/detail/execution_policy.h>
21
+ #include <thrust/detail/allocator/allocator_traits.h>
22
+ #include <thrust/detail/config.h>
23
+
24
+ THRUST_NAMESPACE_BEGIN
25
+
26
+ namespace detail
27
+ {
28
+
29
+ struct copy_allocator_t {};
30
+
31
+ // XXX parameter T is redundant with parameter Alloc
32
+ template<typename T, typename Alloc>
33
+ class contiguous_storage
34
+ {
35
+ private:
36
+ typedef thrust::detail::allocator_traits<Alloc> alloc_traits;
37
+
38
+ public:
39
+ typedef Alloc allocator_type;
40
+ typedef T value_type;
41
+ typedef typename alloc_traits::pointer pointer;
42
+ typedef typename alloc_traits::const_pointer const_pointer;
43
+ typedef typename alloc_traits::size_type size_type;
44
+ typedef typename alloc_traits::difference_type difference_type;
45
+ typedef typename alloc_traits::reference reference;
46
+ typedef typename alloc_traits::const_reference const_reference;
47
+
48
+ typedef thrust::detail::normal_iterator<pointer> iterator;
49
+ typedef thrust::detail::normal_iterator<const_pointer> const_iterator;
50
+
51
+ __thrust_exec_check_disable__
52
+ __host__ __device__
53
+ explicit contiguous_storage(const allocator_type &alloc = allocator_type());
54
+
55
+ __thrust_exec_check_disable__
56
+ __host__ __device__
57
+ explicit contiguous_storage(size_type n, const allocator_type &alloc = allocator_type());
58
+
59
+ __thrust_exec_check_disable__
60
+ __host__ __device__
61
+ explicit contiguous_storage(copy_allocator_t, const contiguous_storage &other);
62
+
63
+ __thrust_exec_check_disable__
64
+ __host__ __device__
65
+ explicit contiguous_storage(copy_allocator_t, const contiguous_storage &other, size_type n);
66
+
67
+ __thrust_exec_check_disable__
68
+ __host__ __device__
69
+ ~contiguous_storage();
70
+
71
+ __host__ __device__
72
+ size_type size() const;
73
+
74
+ __host__ __device__
75
+ size_type max_size() const;
76
+
77
+ __host__ __device__
78
+ pointer data();
79
+
80
+ __host__ __device__
81
+ const_pointer data() const;
82
+
83
+ __host__ __device__
84
+ iterator begin();
85
+
86
+ __host__ __device__
87
+ const_iterator begin() const;
88
+
89
+ __host__ __device__
90
+ iterator end();
91
+
92
+ __host__ __device__
93
+ const_iterator end() const;
94
+
95
+ __host__ __device__
96
+ reference operator[](size_type n);
97
+
98
+ __host__ __device__
99
+ const_reference operator[](size_type n) const;
100
+
101
+ __host__ __device__
102
+ allocator_type get_allocator() const;
103
+
104
+ // note that allocate does *not* automatically call deallocate
105
+ __host__ __device__
106
+ void allocate(size_type n);
107
+
108
+ __host__ __device__
109
+ void deallocate();
110
+
111
+ __host__ __device__
112
+ void swap(contiguous_storage &x);
113
+
114
+ __host__ __device__
115
+ void default_construct_n(iterator first, size_type n);
116
+
117
+ __host__ __device__
118
+ void uninitialized_fill_n(iterator first, size_type n, const value_type &value);
119
+
120
+ template<typename InputIterator>
121
+ __host__ __device__
122
+ iterator uninitialized_copy(InputIterator first, InputIterator last, iterator result);
123
+
124
+ template<typename System, typename InputIterator>
125
+ __host__ __device__
126
+ iterator uninitialized_copy(thrust::execution_policy<System> &from_system,
127
+ InputIterator first,
128
+ InputIterator last,
129
+ iterator result);
130
+
131
+ template<typename InputIterator, typename Size>
132
+ __host__ __device__
133
+ iterator uninitialized_copy_n(InputIterator first, Size n, iterator result);
134
+
135
+ template<typename System, typename InputIterator, typename Size>
136
+ __host__ __device__
137
+ iterator uninitialized_copy_n(thrust::execution_policy<System> &from_system,
138
+ InputIterator first,
139
+ Size n,
140
+ iterator result);
141
+
142
+ __host__ __device__
143
+ void destroy(iterator first, iterator last);
144
+
145
+ __host__ __device__
146
+ void deallocate_on_allocator_mismatch(const contiguous_storage &other);
147
+
148
+ __host__ __device__
149
+ void destroy_on_allocator_mismatch(const contiguous_storage &other,
150
+ iterator first, iterator last);
151
+
152
+ __host__ __device__
153
+ void set_allocator(const allocator_type &alloc);
154
+
155
+ __host__ __device__
156
+ bool is_allocator_not_equal(const allocator_type &alloc) const;
157
+
158
+ __host__ __device__
159
+ bool is_allocator_not_equal(const contiguous_storage &other) const;
160
+
161
+ __host__ __device__
162
+ void propagate_allocator(const contiguous_storage &other);
163
+
164
+ #if THRUST_CPP_DIALECT >= 2011
165
+ __host__ __device__
166
+ void propagate_allocator(contiguous_storage &other);
167
+
168
+ // allow move assignment for a sane implementation of allocator propagation
169
+ // on move assignment
170
+ __host__ __device__
171
+ contiguous_storage &operator=(contiguous_storage &&other);
172
+ #endif
173
+
174
+ private:
175
+ // XXX we could inherit from this to take advantage of empty base class optimization
176
+ allocator_type m_allocator;
177
+
178
+ iterator m_begin;
179
+
180
+ size_type m_size;
181
+
182
+ // disallow assignment
183
+ contiguous_storage &operator=(const contiguous_storage &x);
184
+
185
+ __host__ __device__
186
+ void swap_allocators(true_type, const allocator_type &);
187
+
188
+ __host__ __device__
189
+ void swap_allocators(false_type, allocator_type &);
190
+
191
+ __host__ __device__
192
+ bool is_allocator_not_equal_dispatch(true_type, const allocator_type &) const;
193
+
194
+ __host__ __device__
195
+ bool is_allocator_not_equal_dispatch(false_type, const allocator_type &) const;
196
+
197
+ __host__ __device__
198
+ void deallocate_on_allocator_mismatch_dispatch(true_type, const contiguous_storage &other);
199
+
200
+ __host__ __device__
201
+ void deallocate_on_allocator_mismatch_dispatch(false_type, const contiguous_storage &other);
202
+
203
+ __host__ __device__
204
+ void destroy_on_allocator_mismatch_dispatch(true_type, const contiguous_storage &other,
205
+ iterator first, iterator last);
206
+
207
+ __host__ __device__
208
+ void destroy_on_allocator_mismatch_dispatch(false_type, const contiguous_storage &other,
209
+ iterator first, iterator last);
210
+
211
+ __host__ __device__
212
+ void propagate_allocator_dispatch(true_type, const contiguous_storage &other);
213
+
214
+ __host__ __device__
215
+ void propagate_allocator_dispatch(false_type, const contiguous_storage &other);
216
+
217
+ #if THRUST_CPP_DIALECT >= 2011
218
+ __host__ __device__
219
+ void propagate_allocator_dispatch(true_type, contiguous_storage &other);
220
+
221
+ __host__ __device__
222
+ void propagate_allocator_dispatch(false_type, contiguous_storage &other);
223
+ #endif
224
+ }; // end contiguous_storage
225
+
226
+ } // end detail
227
+
228
+ template<typename T, typename Alloc>
229
+ __host__ __device__
230
+ void swap(detail::contiguous_storage<T,Alloc> &lhs, detail::contiguous_storage<T,Alloc> &rhs);
231
+
232
+ THRUST_NAMESPACE_END
233
+
234
+ #include <thrust/detail/contiguous_storage.inl>
235
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/contiguous_storage.inl ADDED
@@ -0,0 +1,550 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2018 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/contiguous_storage.h>
21
+ #include <thrust/detail/swap.h>
22
+ #include <thrust/detail/allocator/allocator_traits.h>
23
+ #include <thrust/detail/allocator/copy_construct_range.h>
24
+ #include <thrust/detail/allocator/default_construct_range.h>
25
+ #include <thrust/detail/allocator/destroy_range.h>
26
+ #include <thrust/detail/allocator/fill_construct_range.h>
27
+
28
+ #include <nv/target>
29
+
30
+ #include <stdexcept> // for std::runtime_error
31
+ #include <utility> // for use of std::swap in the WAR below
32
+
33
+ THRUST_NAMESPACE_BEGIN
34
+
35
+ namespace detail
36
+ {
37
+
38
+ class allocator_mismatch_on_swap : public std::runtime_error
39
+ {
40
+ public:
41
+ allocator_mismatch_on_swap()
42
+ :std::runtime_error("swap called on containers with allocators that propagate on swap, but compare non-equal")
43
+ {
44
+ }
45
+ };
46
+
47
+ __thrust_exec_check_disable__
48
+ template<typename T, typename Alloc>
49
+ __host__ __device__
50
+ contiguous_storage<T,Alloc>
51
+ ::contiguous_storage(const Alloc &alloc)
52
+ :m_allocator(alloc),
53
+ m_begin(pointer(static_cast<T*>(0))),
54
+ m_size(0)
55
+ {
56
+ ;
57
+ } // end contiguous_storage::contiguous_storage()
58
+
59
+ __thrust_exec_check_disable__
60
+ template<typename T, typename Alloc>
61
+ __host__ __device__
62
+ contiguous_storage<T,Alloc>
63
+ ::contiguous_storage(size_type n, const Alloc &alloc)
64
+ :m_allocator(alloc),
65
+ m_begin(pointer(static_cast<T*>(0))),
66
+ m_size(0)
67
+ {
68
+ allocate(n);
69
+ } // end contiguous_storage::contiguous_storage()
70
+
71
+ template<typename T, typename Alloc>
72
+ __host__ __device__
73
+ contiguous_storage<T,Alloc>
74
+ ::contiguous_storage(copy_allocator_t,
75
+ const contiguous_storage &other)
76
+ :m_allocator(other.m_allocator),
77
+ m_begin(pointer(static_cast<T*>(0))),
78
+ m_size(0)
79
+ {
80
+ } // end contiguous_storage::contiguous_storage()
81
+
82
+ template<typename T, typename Alloc>
83
+ __host__ __device__
84
+ contiguous_storage<T,Alloc>
85
+ ::contiguous_storage(copy_allocator_t,
86
+ const contiguous_storage &other, size_type n)
87
+ :m_allocator(other.m_allocator),
88
+ m_begin(pointer(static_cast<T*>(0))),
89
+ m_size(0)
90
+ {
91
+ allocate(n);
92
+ } // end contiguous_storage::contiguous_storage()
93
+
94
+ __thrust_exec_check_disable__
95
+ template<typename T, typename Alloc>
96
+ __host__ __device__
97
+ contiguous_storage<T,Alloc>
98
+ ::~contiguous_storage()
99
+ {
100
+ deallocate();
101
+ } // end contiguous_storage::~contiguous_storage()
102
+
103
+ template<typename T, typename Alloc>
104
+ __host__ __device__
105
+ typename contiguous_storage<T,Alloc>::size_type
106
+ contiguous_storage<T,Alloc>
107
+ ::size() const
108
+ {
109
+ return m_size;
110
+ } // end contiguous_storage::size()
111
+
112
+ template<typename T, typename Alloc>
113
+ __host__ __device__
114
+ typename contiguous_storage<T,Alloc>::size_type
115
+ contiguous_storage<T,Alloc>
116
+ ::max_size() const
117
+ {
118
+ return alloc_traits::max_size(m_allocator);
119
+ } // end contiguous_storage::max_size()
120
+
121
+ template<typename T, typename Alloc>
122
+ __host__ __device__
123
+ typename contiguous_storage<T,Alloc>::iterator
124
+ contiguous_storage<T,Alloc>
125
+ ::begin()
126
+ {
127
+ return m_begin;
128
+ } // end contiguous_storage::begin()
129
+
130
+ template<typename T, typename Alloc>
131
+ __host__ __device__
132
+ typename contiguous_storage<T,Alloc>::const_iterator
133
+ contiguous_storage<T,Alloc>
134
+ ::begin() const
135
+ {
136
+ return m_begin;
137
+ } // end contiguous_storage::begin()
138
+
139
+ template<typename T, typename Alloc>
140
+ __host__ __device__
141
+ typename contiguous_storage<T,Alloc>::iterator
142
+ contiguous_storage<T,Alloc>
143
+ ::end()
144
+ {
145
+ return m_begin + size();
146
+ } // end contiguous_storage::end()
147
+
148
+ template<typename T, typename Alloc>
149
+ __host__ __device__
150
+ typename contiguous_storage<T,Alloc>::const_iterator
151
+ contiguous_storage<T,Alloc>
152
+ ::end() const
153
+ {
154
+ return m_begin + size();
155
+ } // end contiguous_storage::end()
156
+
157
+ template<typename T, typename Alloc>
158
+ __host__ __device__
159
+ typename contiguous_storage<T,Alloc>::pointer
160
+ contiguous_storage<T,Alloc>
161
+ ::data()
162
+ {
163
+ return &*m_begin;
164
+ } // end contiguous_storage::data()
165
+
166
+ template<typename T, typename Alloc>
167
+ __host__ __device__
168
+ typename contiguous_storage<T,Alloc>::const_pointer
169
+ contiguous_storage<T,Alloc>
170
+ ::data() const
171
+ {
172
+ return &*m_begin;
173
+ } // end contiguous_storage::data()
174
+
175
+ template<typename T, typename Alloc>
176
+ __host__ __device__
177
+ typename contiguous_storage<T,Alloc>::reference
178
+ contiguous_storage<T,Alloc>
179
+ ::operator[](size_type n)
180
+ {
181
+ return m_begin[n];
182
+ } // end contiguous_storage::operator[]()
183
+
184
+ template<typename T, typename Alloc>
185
+ __host__ __device__
186
+ typename contiguous_storage<T,Alloc>::const_reference
187
+ contiguous_storage<T,Alloc>
188
+ ::operator[](size_type n) const
189
+ {
190
+ return m_begin[n];
191
+ } // end contiguous_storage::operator[]()
192
+
193
+ __thrust_exec_check_disable__
194
+ template<typename T, typename Alloc>
195
+ __host__ __device__
196
+ typename contiguous_storage<T,Alloc>::allocator_type
197
+ contiguous_storage<T,Alloc>
198
+ ::get_allocator() const
199
+ {
200
+ return m_allocator;
201
+ } // end contiguous_storage::get_allocator()
202
+
203
+ template<typename T, typename Alloc>
204
+ __host__ __device__
205
+ void contiguous_storage<T,Alloc>
206
+ ::allocate(size_type n)
207
+ {
208
+ if(n > 0)
209
+ {
210
+ m_begin = iterator(alloc_traits::allocate(m_allocator,n));
211
+ m_size = n;
212
+ } // end if
213
+ else
214
+ {
215
+ m_begin = iterator(pointer(static_cast<T*>(0)));
216
+ m_size = 0;
217
+ } // end else
218
+ } // end contiguous_storage::allocate()
219
+
220
+ template<typename T, typename Alloc>
221
+ __host__ __device__
222
+ void contiguous_storage<T,Alloc>
223
+ ::deallocate()
224
+ {
225
+ if(size() > 0)
226
+ {
227
+ alloc_traits::deallocate(m_allocator,m_begin.base(), size());
228
+ m_begin = iterator(pointer(static_cast<T*>(0)));
229
+ m_size = 0;
230
+ } // end if
231
+ } // end contiguous_storage::deallocate()
232
+
233
+ template<typename T, typename Alloc>
234
+ __host__ __device__
235
+ void contiguous_storage<T,Alloc>
236
+ ::swap(contiguous_storage &x)
237
+ {
238
+ thrust::swap(m_begin, x.m_begin);
239
+ thrust::swap(m_size, x.m_size);
240
+
241
+ swap_allocators(
242
+ integral_constant<
243
+ bool,
244
+ allocator_traits<Alloc>::propagate_on_container_swap::value
245
+ >(),
246
+ x.m_allocator);
247
+
248
+ thrust::swap(m_allocator, x.m_allocator);
249
+ } // end contiguous_storage::swap()
250
+
251
+ template<typename T, typename Alloc>
252
+ __host__ __device__
253
+ void contiguous_storage<T,Alloc>
254
+ ::default_construct_n(iterator first, size_type n)
255
+ {
256
+ default_construct_range(m_allocator, first.base(), n);
257
+ } // end contiguous_storage::default_construct_n()
258
+
259
+ template<typename T, typename Alloc>
260
+ __host__ __device__
261
+ void contiguous_storage<T,Alloc>
262
+ ::uninitialized_fill_n(iterator first, size_type n, const value_type &x)
263
+ {
264
+ fill_construct_range(m_allocator, first.base(), n, x);
265
+ } // end contiguous_storage::uninitialized_fill()
266
+
267
+ template<typename T, typename Alloc>
268
+ template<typename System, typename InputIterator>
269
+ __host__ __device__
270
+ typename contiguous_storage<T,Alloc>::iterator
271
+ contiguous_storage<T,Alloc>
272
+ ::uninitialized_copy(thrust::execution_policy<System> &from_system, InputIterator first, InputIterator last, iterator result)
273
+ {
274
+ return iterator(copy_construct_range(from_system, m_allocator, first, last, result.base()));
275
+ } // end contiguous_storage::uninitialized_copy()
276
+
277
+ template<typename T, typename Alloc>
278
+ template<typename InputIterator>
279
+ __host__ __device__
280
+ typename contiguous_storage<T,Alloc>::iterator
281
+ contiguous_storage<T,Alloc>
282
+ ::uninitialized_copy(InputIterator first, InputIterator last, iterator result)
283
+ {
284
+ // XXX assumes InputIterator's associated System is default-constructible
285
+ typename thrust::iterator_system<InputIterator>::type from_system;
286
+
287
+ return iterator(copy_construct_range(from_system, m_allocator, first, last, result.base()));
288
+ } // end contiguous_storage::uninitialized_copy()
289
+
290
+ template<typename T, typename Alloc>
291
+ template<typename System, typename InputIterator, typename Size>
292
+ __host__ __device__
293
+ typename contiguous_storage<T,Alloc>::iterator
294
+ contiguous_storage<T,Alloc>
295
+ ::uninitialized_copy_n(thrust::execution_policy<System> &from_system, InputIterator first, Size n, iterator result)
296
+ {
297
+ return iterator(copy_construct_range_n(from_system, m_allocator, first, n, result.base()));
298
+ } // end contiguous_storage::uninitialized_copy_n()
299
+
300
+ template<typename T, typename Alloc>
301
+ template<typename InputIterator, typename Size>
302
+ __host__ __device__
303
+ typename contiguous_storage<T,Alloc>::iterator
304
+ contiguous_storage<T,Alloc>
305
+ ::uninitialized_copy_n(InputIterator first, Size n, iterator result)
306
+ {
307
+ // XXX assumes InputIterator's associated System is default-constructible
308
+ typename thrust::iterator_system<InputIterator>::type from_system;
309
+
310
+ return iterator(copy_construct_range_n(from_system, m_allocator, first, n, result.base()));
311
+ } // end contiguous_storage::uninitialized_copy_n()
312
+
313
+ template<typename T, typename Alloc>
314
+ __host__ __device__
315
+ void contiguous_storage<T,Alloc>
316
+ ::destroy(iterator first, iterator last)
317
+ {
318
+ destroy_range(m_allocator, first.base(), last - first);
319
+ } // end contiguous_storage::destroy()
320
+
321
+ template<typename T, typename Alloc>
322
+ __host__ __device__
323
+ void contiguous_storage<T,Alloc>
324
+ ::deallocate_on_allocator_mismatch(const contiguous_storage &other)
325
+ {
326
+ integral_constant<
327
+ bool,
328
+ allocator_traits<Alloc>::propagate_on_container_copy_assignment::value
329
+ > c;
330
+
331
+ deallocate_on_allocator_mismatch_dispatch(c, other);
332
+ } // end contiguous_storage::deallocate_on_allocator_mismatch
333
+
334
+ template<typename T, typename Alloc>
335
+ __host__ __device__
336
+ void contiguous_storage<T,Alloc>
337
+ ::destroy_on_allocator_mismatch(const contiguous_storage &other,
338
+ iterator first, iterator last)
339
+ {
340
+ integral_constant<
341
+ bool,
342
+ allocator_traits<Alloc>::propagate_on_container_copy_assignment::value
343
+ > c;
344
+
345
+ destroy_on_allocator_mismatch_dispatch(c, other, first, last);
346
+ } // end contiguous_storage::destroy_on_allocator_mismatch
347
+
348
+ __thrust_exec_check_disable__
349
+ template<typename T, typename Alloc>
350
+ __host__ __device__
351
+ void contiguous_storage<T,Alloc>
352
+ ::set_allocator(const Alloc &alloc)
353
+ {
354
+ m_allocator = alloc;
355
+ } // end contiguous_storage::set_allocator()
356
+
357
+ template<typename T, typename Alloc>
358
+ __host__ __device__
359
+ bool contiguous_storage<T,Alloc>
360
+ ::is_allocator_not_equal(const Alloc &alloc) const
361
+ {
362
+ return is_allocator_not_equal_dispatch(
363
+ integral_constant<
364
+ bool,
365
+ allocator_traits<Alloc>::is_always_equal::value
366
+ >(),
367
+ alloc);
368
+ } // end contiguous_storage::is_allocator_not_equal()
369
+
370
+ template<typename T, typename Alloc>
371
+ __host__ __device__
372
+ bool contiguous_storage<T,Alloc>
373
+ ::is_allocator_not_equal(const contiguous_storage<T,Alloc> &other) const
374
+ {
375
+ return is_allocator_not_equal(m_allocator, other.m_allocator);
376
+ } // end contiguous_storage::is_allocator_not_equal()
377
+
378
+ template<typename T, typename Alloc>
379
+ __host__ __device__
380
+ void contiguous_storage<T,Alloc>
381
+ ::propagate_allocator(const contiguous_storage &other)
382
+ {
383
+ integral_constant<
384
+ bool,
385
+ allocator_traits<Alloc>::propagate_on_container_copy_assignment::value
386
+ > c;
387
+
388
+ propagate_allocator_dispatch(c, other);
389
+ } // end contiguous_storage::propagate_allocator()
390
+
391
+ #if THRUST_CPP_DIALECT >= 2011
392
+ template<typename T, typename Alloc>
393
+ __host__ __device__
394
+ void contiguous_storage<T,Alloc>
395
+ ::propagate_allocator(contiguous_storage &other)
396
+ {
397
+ integral_constant<
398
+ bool,
399
+ allocator_traits<Alloc>::propagate_on_container_move_assignment::value
400
+ > c;
401
+
402
+ propagate_allocator_dispatch(c, other);
403
+ } // end contiguous_storage::propagate_allocator()
404
+
405
+ template<typename T, typename Alloc>
406
+ __host__ __device__
407
+ contiguous_storage<T,Alloc> &contiguous_storage<T,Alloc>
408
+ ::operator=(contiguous_storage &&other)
409
+ {
410
+ if (size() > 0)
411
+ {
412
+ deallocate();
413
+ }
414
+ propagate_allocator(other);
415
+ m_begin = std::move(other.m_begin);
416
+ m_size = std::move(other.m_size);
417
+
418
+ other.m_begin = pointer(static_cast<T*>(0));
419
+ other.m_size = 0;
420
+
421
+ return *this;
422
+ } // end contiguous_storage::propagate_allocator()
423
+ #endif
424
+
425
+ template<typename T, typename Alloc>
426
+ __host__ __device__
427
+ void contiguous_storage<T,Alloc>
428
+ ::swap_allocators(true_type, const Alloc &)
429
+ {
430
+ } // end contiguous_storage::swap_allocators()
431
+
432
+ template<typename T, typename Alloc>
433
+ __host__ __device__
434
+ void contiguous_storage<T,Alloc>
435
+ ::swap_allocators(false_type, Alloc &other)
436
+ {
437
+ NV_IF_TARGET(NV_IS_DEVICE, (
438
+ // allocators must be equal when swapping containers with allocators that propagate on swap
439
+ assert(!is_allocator_not_equal(other));
440
+ ), (
441
+ if (is_allocator_not_equal(other))
442
+ {
443
+ throw allocator_mismatch_on_swap();
444
+ }
445
+ ));
446
+
447
+ thrust::swap(m_allocator, other);
448
+ } // end contiguous_storage::swap_allocators()
449
+
450
+ template<typename T, typename Alloc>
451
+ __host__ __device__
452
+ bool contiguous_storage<T,Alloc>
453
+ ::is_allocator_not_equal_dispatch(true_type /*is_always_equal*/, const Alloc &) const
454
+ {
455
+ return false;
456
+ } // end contiguous_storage::is_allocator_not_equal_dispatch()
457
+
458
+ __thrust_exec_check_disable__
459
+ template<typename T, typename Alloc>
460
+ __host__ __device__
461
+ bool contiguous_storage<T,Alloc>
462
+ ::is_allocator_not_equal_dispatch(false_type /*!is_always_equal*/, const Alloc& other) const
463
+ {
464
+ return m_allocator != other;
465
+ } // end contiguous_storage::is_allocator_not_equal_dispatch()
466
+
467
+ __thrust_exec_check_disable__
468
+ template<typename T, typename Alloc>
469
+ __host__ __device__
470
+ void contiguous_storage<T,Alloc>
471
+ ::deallocate_on_allocator_mismatch_dispatch(true_type, const contiguous_storage &other)
472
+ {
473
+ if (m_allocator != other.m_allocator)
474
+ {
475
+ deallocate();
476
+ }
477
+ } // end contiguous_storage::deallocate_on_allocator_mismatch()
478
+
479
+ template<typename T, typename Alloc>
480
+ __host__ __device__
481
+ void contiguous_storage<T,Alloc>
482
+ ::deallocate_on_allocator_mismatch_dispatch(false_type, const contiguous_storage &)
483
+ {
484
+ } // end contiguous_storage::deallocate_on_allocator_mismatch()
485
+
486
+ __thrust_exec_check_disable__
487
+ template<typename T, typename Alloc>
488
+ __host__ __device__
489
+ void contiguous_storage<T,Alloc>
490
+ ::destroy_on_allocator_mismatch_dispatch(true_type, const contiguous_storage &other,
491
+ iterator first, iterator last)
492
+ {
493
+ if (m_allocator != other.m_allocator)
494
+ {
495
+ destroy(first, last);
496
+ }
497
+ } // end contiguous_storage::destroy_on_allocator_mismatch()
498
+
499
+ template<typename T, typename Alloc>
500
+ __host__ __device__
501
+ void contiguous_storage<T,Alloc>
502
+ ::destroy_on_allocator_mismatch_dispatch(false_type, const contiguous_storage &,
503
+ iterator, iterator)
504
+ {
505
+ } // end contiguous_storage::destroy_on_allocator_mismatch()
506
+
507
+ __thrust_exec_check_disable__
508
+ template<typename T, typename Alloc>
509
+ __host__ __device__
510
+ void contiguous_storage<T,Alloc>
511
+ ::propagate_allocator_dispatch(true_type, const contiguous_storage &other)
512
+ {
513
+ m_allocator = other.m_allocator;
514
+ } // end contiguous_storage::propagate_allocator()
515
+
516
+ template<typename T, typename Alloc>
517
+ __host__ __device__
518
+ void contiguous_storage<T,Alloc>
519
+ ::propagate_allocator_dispatch(false_type, const contiguous_storage &)
520
+ {
521
+ } // end contiguous_storage::propagate_allocator()
522
+
523
+ #if THRUST_CPP_DIALECT >= 2011
524
+ __thrust_exec_check_disable__
525
+ template<typename T, typename Alloc>
526
+ __host__ __device__
527
+ void contiguous_storage<T,Alloc>
528
+ ::propagate_allocator_dispatch(true_type, contiguous_storage &other)
529
+ {
530
+ m_allocator = std::move(other.m_allocator);
531
+ } // end contiguous_storage::propagate_allocator()
532
+
533
+ template<typename T, typename Alloc>
534
+ __host__ __device__
535
+ void contiguous_storage<T,Alloc>
536
+ ::propagate_allocator_dispatch(false_type, contiguous_storage &)
537
+ {
538
+ } // end contiguous_storage::propagate_allocator()
539
+ #endif
540
+
541
+ } // end detail
542
+
543
+ template<typename T, typename Alloc>
544
+ __host__ __device__
545
+ void swap(detail::contiguous_storage<T,Alloc> &lhs, detail::contiguous_storage<T,Alloc> &rhs)
546
+ {
547
+ lhs.swap(rhs);
548
+ } // end swap()
549
+
550
+ THRUST_NAMESPACE_END
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/copy.h ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/execution_policy.h>
21
+
22
+ THRUST_NAMESPACE_BEGIN
23
+
24
+ template<typename System,
25
+ typename InputIterator,
26
+ typename OutputIterator>
27
+ __host__ __device__
28
+ OutputIterator copy(const thrust::detail::execution_policy_base<System> &system,
29
+ InputIterator first,
30
+ InputIterator last,
31
+ OutputIterator result);
32
+
33
+ template<typename System,
34
+ typename InputIterator,
35
+ typename Size,
36
+ typename OutputIterator>
37
+ __host__ __device__
38
+ OutputIterator copy_n(const thrust::detail::execution_policy_base<System> &system,
39
+ InputIterator first,
40
+ Size n,
41
+ OutputIterator result);
42
+
43
+ template<typename InputIterator,
44
+ typename OutputIterator>
45
+ OutputIterator copy(InputIterator first,
46
+ InputIterator last,
47
+ OutputIterator result);
48
+
49
+ template<typename InputIterator,
50
+ typename Size,
51
+ typename OutputIterator>
52
+ OutputIterator copy_n(InputIterator first,
53
+ Size n,
54
+ OutputIterator result);
55
+
56
+
57
+ namespace detail
58
+ {
59
+
60
+
61
+ template<typename FromSystem,
62
+ typename ToSystem,
63
+ typename InputIterator,
64
+ typename OutputIterator>
65
+ __host__ __device__
66
+ OutputIterator two_system_copy(const thrust::execution_policy<FromSystem> &from_system,
67
+ const thrust::execution_policy<ToSystem> &two_system,
68
+ InputIterator first,
69
+ InputIterator last,
70
+ OutputIterator result);
71
+
72
+
73
+ template<typename FromSystem,
74
+ typename ToSystem,
75
+ typename InputIterator,
76
+ typename Size,
77
+ typename OutputIterator>
78
+ __host__ __device__
79
+ OutputIterator two_system_copy_n(const thrust::execution_policy<FromSystem> &from_system,
80
+ const thrust::execution_policy<ToSystem> &two_system,
81
+ InputIterator first,
82
+ Size n,
83
+ OutputIterator result);
84
+
85
+
86
+ } // end detail
87
+
88
+ THRUST_NAMESPACE_END
89
+
90
+ #include <thrust/detail/copy.inl>
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/copy_if.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/execution_policy.h>
21
+
22
+ THRUST_NAMESPACE_BEGIN
23
+
24
+ template<typename DerivedPolicy,
25
+ typename InputIterator,
26
+ typename OutputIterator,
27
+ typename Predicate>
28
+ __host__ __device__
29
+ OutputIterator copy_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
30
+ InputIterator first,
31
+ InputIterator last,
32
+ OutputIterator result,
33
+ Predicate pred);
34
+
35
+
36
+ template<typename DerivedPolicy,
37
+ typename InputIterator1,
38
+ typename InputIterator2,
39
+ typename OutputIterator,
40
+ typename Predicate>
41
+ __host__ __device__
42
+ OutputIterator copy_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
43
+ InputIterator1 first,
44
+ InputIterator1 last,
45
+ InputIterator2 stencil,
46
+ OutputIterator result,
47
+ Predicate pred);
48
+
49
+
50
+ template<typename InputIterator,
51
+ typename OutputIterator,
52
+ typename Predicate>
53
+ OutputIterator copy_if(InputIterator first,
54
+ InputIterator last,
55
+ OutputIterator result,
56
+ Predicate pred);
57
+
58
+
59
+ template<typename InputIterator1,
60
+ typename InputIterator2,
61
+ typename OutputIterator,
62
+ typename Predicate>
63
+ OutputIterator copy_if(InputIterator1 first,
64
+ InputIterator1 last,
65
+ InputIterator2 stencil,
66
+ OutputIterator result,
67
+ Predicate pred);
68
+
69
+ THRUST_NAMESPACE_END
70
+
71
+ #include <thrust/detail/copy_if.inl>
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/copy_if.inl ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/copy_if.h>
21
+ #include <thrust/iterator/iterator_traits.h>
22
+ #include <thrust/system/detail/generic/copy_if.h>
23
+ #include <thrust/system/detail/generic/select_system.h>
24
+ #include <thrust/system/detail/adl/copy_if.h>
25
+
26
+ THRUST_NAMESPACE_BEGIN
27
+
28
+ __thrust_exec_check_disable__
29
+ template<typename DerivedPolicy,
30
+ typename InputIterator,
31
+ typename OutputIterator,
32
+ typename Predicate>
33
+ __host__ __device__
34
+ OutputIterator copy_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
35
+ InputIterator first,
36
+ InputIterator last,
37
+ OutputIterator result,
38
+ Predicate pred)
39
+ {
40
+ using thrust::system::detail::generic::copy_if;
41
+ return copy_if(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, result, pred);
42
+ } // end copy_if()
43
+
44
+
45
+ __thrust_exec_check_disable__
46
+ template<typename DerivedPolicy,
47
+ typename InputIterator1,
48
+ typename InputIterator2,
49
+ typename OutputIterator,
50
+ typename Predicate>
51
+ __host__ __device__
52
+ OutputIterator copy_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
53
+ InputIterator1 first,
54
+ InputIterator1 last,
55
+ InputIterator2 stencil,
56
+ OutputIterator result,
57
+ Predicate pred)
58
+ {
59
+ using thrust::system::detail::generic::copy_if;
60
+ return copy_if(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, stencil, result, pred);
61
+ } // end copy_if()
62
+
63
+
64
+ template<typename InputIterator,
65
+ typename OutputIterator,
66
+ typename Predicate>
67
+ OutputIterator copy_if(InputIterator first,
68
+ InputIterator last,
69
+ OutputIterator result,
70
+ Predicate pred)
71
+ {
72
+ using thrust::system::detail::generic::select_system;
73
+
74
+ typedef typename thrust::iterator_system<InputIterator>::type System1;
75
+ typedef typename thrust::iterator_system<OutputIterator>::type System2;
76
+
77
+ System1 system1;
78
+ System2 system2;
79
+
80
+ return thrust::copy_if(select_system(system1,system2), first, last, result, pred);
81
+ } // end copy_if()
82
+
83
+
84
+ template<typename InputIterator1,
85
+ typename InputIterator2,
86
+ typename OutputIterator,
87
+ typename Predicate>
88
+ OutputIterator copy_if(InputIterator1 first,
89
+ InputIterator1 last,
90
+ InputIterator2 stencil,
91
+ OutputIterator result,
92
+ Predicate pred)
93
+ {
94
+ using thrust::system::detail::generic::select_system;
95
+
96
+ typedef typename thrust::iterator_system<InputIterator1>::type System1;
97
+ typedef typename thrust::iterator_system<InputIterator2>::type System2;
98
+ typedef typename thrust::iterator_system<OutputIterator>::type System3;
99
+
100
+ System1 system1;
101
+ System2 system2;
102
+ System3 system3;
103
+
104
+ return thrust::copy_if(select_system(system1,system2,system3), first, last, stencil, result, pred);
105
+ } // end copy_if()
106
+
107
+ THRUST_NAMESPACE_END
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/count.h ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/execution_policy.h>
21
+
22
+ THRUST_NAMESPACE_BEGIN
23
+
24
+ template<typename DerivedPolicy,
25
+ typename InputIterator,
26
+ typename EqualityComparable>
27
+ __host__ __device__
28
+ typename thrust::iterator_traits<InputIterator>::difference_type
29
+ count(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
30
+ InputIterator first,
31
+ InputIterator last,
32
+ const EqualityComparable& value);
33
+
34
+ template<typename DerivedPolicy,
35
+ typename InputIterator,
36
+ typename Predicate>
37
+ __host__ __device__
38
+ typename thrust::iterator_traits<InputIterator>::difference_type
39
+ count_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
40
+ InputIterator first,
41
+ InputIterator last,
42
+ Predicate pred);
43
+
44
+ template <typename InputIterator,
45
+ typename EqualityComparable>
46
+ typename thrust::iterator_traits<InputIterator>::difference_type
47
+ count(InputIterator first,
48
+ InputIterator last,
49
+ const EqualityComparable& value);
50
+
51
+ template <typename InputIterator,
52
+ typename Predicate>
53
+ typename thrust::iterator_traits<InputIterator>::difference_type
54
+ count_if(InputIterator first,
55
+ InputIterator last,
56
+ Predicate pred);
57
+
58
+ THRUST_NAMESPACE_END
59
+
60
+ #include <thrust/detail/count.inl>
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/dependencies_aware_execution_policy.h ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2018 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/cpp11_required.h>
21
+
22
+ #if THRUST_CPP_DIALECT >= 2011
23
+
24
+ #include <tuple>
25
+
26
+ #include <thrust/detail/execute_with_dependencies.h>
27
+
28
+ THRUST_NAMESPACE_BEGIN
29
+
30
+ namespace detail
31
+ {
32
+
33
+ template<template<typename> class ExecutionPolicyCRTPBase>
34
+ struct dependencies_aware_execution_policy
35
+ {
36
+ template<typename ...Dependencies>
37
+ __host__
38
+ thrust::detail::execute_with_dependencies<
39
+ ExecutionPolicyCRTPBase,
40
+ Dependencies...
41
+ >
42
+ after(Dependencies&& ...dependencies) const
43
+ {
44
+ return { capture_as_dependency(THRUST_FWD(dependencies))... };
45
+ }
46
+
47
+ template<typename ...Dependencies>
48
+ __host__
49
+ thrust::detail::execute_with_dependencies<
50
+ ExecutionPolicyCRTPBase,
51
+ Dependencies...
52
+ >
53
+ after(std::tuple<Dependencies...>& dependencies) const
54
+ {
55
+ return { capture_as_dependency(dependencies) };
56
+ }
57
+ template<typename ...Dependencies>
58
+ __host__
59
+ thrust::detail::execute_with_dependencies<
60
+ ExecutionPolicyCRTPBase,
61
+ Dependencies...
62
+ >
63
+ after(std::tuple<Dependencies...>&& dependencies) const
64
+ {
65
+ return { capture_as_dependency(std::move(dependencies)) };
66
+ }
67
+
68
+ template<typename ...Dependencies>
69
+ __host__
70
+ thrust::detail::execute_with_dependencies<
71
+ ExecutionPolicyCRTPBase,
72
+ Dependencies...
73
+ >
74
+ rebind_after(Dependencies&& ...dependencies) const
75
+ {
76
+ return { capture_as_dependency(THRUST_FWD(dependencies))... };
77
+ }
78
+
79
+ template<typename ...Dependencies>
80
+ __host__
81
+ thrust::detail::execute_with_dependencies<
82
+ ExecutionPolicyCRTPBase,
83
+ Dependencies...
84
+ >
85
+ rebind_after(std::tuple<Dependencies...>& dependencies) const
86
+ {
87
+ return { capture_as_dependency(dependencies) };
88
+ }
89
+ template<typename ...Dependencies>
90
+ __host__
91
+ thrust::detail::execute_with_dependencies<
92
+ ExecutionPolicyCRTPBase,
93
+ Dependencies...
94
+ >
95
+ rebind_after(std::tuple<Dependencies...>&& dependencies) const
96
+ {
97
+ return { capture_as_dependency(std::move(dependencies)) };
98
+ }
99
+ };
100
+
101
+ } // end detail
102
+
103
+ THRUST_NAMESPACE_END
104
+
105
+ #endif // THRUST_CPP_DIALECT >= 2011
106
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/device_malloc.inl ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/device_malloc.h>
21
+ #include <thrust/iterator/iterator_traits.h>
22
+ #include <thrust/system/detail/generic/select_system.h>
23
+ #include <thrust/detail/malloc_and_free.h>
24
+
25
+ THRUST_NAMESPACE_BEGIN
26
+
27
+ thrust::device_ptr<void> device_malloc(const std::size_t n)
28
+ {
29
+ using thrust::system::detail::generic::select_system;
30
+
31
+ typedef thrust::iterator_system< thrust::device_ptr<void> >::type system;
32
+
33
+ // XXX lower to select_system(system) here
34
+ system s;
35
+
36
+ return thrust::device_ptr<void>(thrust::malloc(s, n).get());
37
+ } // end device_malloc()
38
+
39
+
40
+ template<typename T>
41
+ thrust::device_ptr<T> device_malloc(const std::size_t n)
42
+ {
43
+ using thrust::system::detail::generic::select_system;
44
+
45
+ typedef thrust::iterator_system< thrust::device_ptr<void> >::type system;
46
+
47
+ // XXX lower to select_system(system) here
48
+ system s;
49
+
50
+ return thrust::device_ptr<T>(thrust::malloc<T>(s,n).get());
51
+ } // end device_malloc()
52
+
53
+ THRUST_NAMESPACE_END
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/device_ptr.inl ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/device_ptr.h>
20
+ #include <thrust/device_reference.h>
21
+ #include <thrust/detail/config.h>
22
+ #include <thrust/detail/type_traits.h>
23
+ #include <thrust/iterator/iterator_traits.h>
24
+
25
+ THRUST_NAMESPACE_BEGIN
26
+
27
+ template<typename T>
28
+ __host__ __device__
29
+ device_ptr<T> device_pointer_cast(T *ptr)
30
+ {
31
+ return device_ptr<T>(ptr);
32
+ } // end device_pointer_cast()
33
+
34
+ template<typename T>
35
+ __host__ __device__
36
+ device_ptr<T> device_pointer_cast(const device_ptr<T> &ptr)
37
+ {
38
+ return ptr;
39
+ } // end device_pointer_cast()
40
+
41
+
42
+ namespace detail
43
+ {
44
+
45
+ template<typename T>
46
+ struct is_device_ptr< thrust::device_ptr<T> >
47
+ : public true_type
48
+ {
49
+ }; // end is_device_ptr
50
+
51
+ #if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC) && (_MSC_VER <= 1400)
52
+ // XXX WAR MSVC 2005 problem with correctly implementing
53
+ // pointer_raw_pointer for device_ptr by specializing it here
54
+ template<typename T>
55
+ struct pointer_raw_pointer< thrust::device_ptr<T> >
56
+ {
57
+ typedef typename device_ptr<T>::raw_pointer type;
58
+ }; // end pointer_raw_pointer
59
+ #endif
60
+
61
+
62
+ } // end namespace detail
63
+
64
+ THRUST_NAMESPACE_END
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/distance.inl ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/advance.h>
20
+ #include <thrust/detail/config.h>
21
+ #include <thrust/system/detail/generic/distance.h>
22
+ #include <thrust/iterator/iterator_traits.h>
23
+
24
+ THRUST_NAMESPACE_BEGIN
25
+
26
+ __thrust_exec_check_disable__
27
+ template<typename InputIterator>
28
+ inline __host__ __device__
29
+ typename thrust::iterator_traits<InputIterator>::difference_type
30
+ distance(InputIterator first, InputIterator last)
31
+ {
32
+ return thrust::system::detail::generic::distance(first, last);
33
+ } // end distance()
34
+
35
+ THRUST_NAMESPACE_END
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/execute_with_allocator.h ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2018 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #include <thrust/detail/execute_with_allocator_fwd.h>
22
+ #include <thrust/pair.h>
23
+ #include <thrust/detail/raw_pointer_cast.h>
24
+ #include <thrust/detail/type_traits/pointer_traits.h>
25
+ #include <thrust/detail/allocator/allocator_traits.h>
26
+ #include <thrust/detail/integer_math.h>
27
+
28
+ THRUST_NAMESPACE_BEGIN
29
+
30
+ namespace detail
31
+ {
32
+
33
+ template <
34
+ typename T
35
+ , typename Allocator
36
+ , template <typename> class BaseSystem
37
+ >
38
+ __host__
39
+ thrust::pair<T*, std::ptrdiff_t>
40
+ get_temporary_buffer(
41
+ thrust::detail::execute_with_allocator<Allocator, BaseSystem>& system
42
+ , std::ptrdiff_t n
43
+ )
44
+ {
45
+ typedef typename thrust::detail::remove_reference<Allocator>::type naked_allocator;
46
+ typedef typename thrust::detail::allocator_traits<naked_allocator> alloc_traits;
47
+ typedef typename alloc_traits::void_pointer void_pointer;
48
+ typedef typename alloc_traits::size_type size_type;
49
+ typedef typename alloc_traits::value_type value_type;
50
+
51
+ // How many elements of type value_type do we need to accommodate n elements
52
+ // of type T?
53
+ size_type num_elements = divide_ri(sizeof(T) * n, sizeof(value_type));
54
+
55
+ void_pointer ptr = alloc_traits::allocate(system.get_allocator(), num_elements);
56
+
57
+ // Return the pointer and the number of elements of type T allocated.
58
+ return thrust::make_pair(thrust::reinterpret_pointer_cast<T*>(ptr),n);
59
+ }
60
+
61
+ template <
62
+ typename Pointer
63
+ , typename Allocator
64
+ , template <typename> class BaseSystem
65
+ >
66
+ __host__
67
+ void
68
+ return_temporary_buffer(
69
+ thrust::detail::execute_with_allocator<Allocator, BaseSystem>& system
70
+ , Pointer p
71
+ , std::ptrdiff_t n
72
+ )
73
+ {
74
+ typedef typename thrust::detail::remove_reference<Allocator>::type naked_allocator;
75
+ typedef typename thrust::detail::allocator_traits<naked_allocator> alloc_traits;
76
+ typedef typename alloc_traits::pointer pointer;
77
+ typedef typename alloc_traits::size_type size_type;
78
+ typedef typename alloc_traits::value_type value_type;
79
+ typedef typename thrust::detail::pointer_traits<Pointer>::element_type T;
80
+
81
+ size_type num_elements = divide_ri(sizeof(T) * n, sizeof(value_type));
82
+
83
+ pointer to_ptr = thrust::reinterpret_pointer_cast<pointer>(p);
84
+ alloc_traits::deallocate(system.get_allocator(), to_ptr, num_elements);
85
+ }
86
+
87
+ #if THRUST_CPP_DIALECT >= 2011
88
+
89
+ template <
90
+ typename T,
91
+ template <typename> class BaseSystem,
92
+ typename Allocator,
93
+ typename ...Dependencies
94
+ >
95
+ __host__
96
+ thrust::pair<T*, std::ptrdiff_t>
97
+ get_temporary_buffer(
98
+ thrust::detail::execute_with_allocator_and_dependencies<Allocator, BaseSystem, Dependencies...>& system,
99
+ std::ptrdiff_t n
100
+ )
101
+ {
102
+ typedef typename thrust::detail::remove_reference<Allocator>::type naked_allocator;
103
+ typedef typename thrust::detail::allocator_traits<naked_allocator> alloc_traits;
104
+ typedef typename alloc_traits::void_pointer void_pointer;
105
+ typedef typename alloc_traits::size_type size_type;
106
+ typedef typename alloc_traits::value_type value_type;
107
+
108
+ // How many elements of type value_type do we need to accommodate n elements
109
+ // of type T?
110
+ size_type num_elements = divide_ri(sizeof(T) * n, sizeof(value_type));
111
+
112
+ void_pointer ptr = alloc_traits::allocate(system.get_allocator(), num_elements);
113
+
114
+ // Return the pointer and the number of elements of type T allocated.
115
+ return thrust::make_pair(thrust::reinterpret_pointer_cast<T*>(ptr),n);
116
+ }
117
+
118
+ template <
119
+ typename Pointer,
120
+ template <typename> class BaseSystem,
121
+ typename Allocator,
122
+ typename ...Dependencies
123
+ >
124
+ __host__
125
+ void
126
+ return_temporary_buffer(
127
+ thrust::detail::execute_with_allocator_and_dependencies<Allocator, BaseSystem, Dependencies...>& system,
128
+ Pointer p,
129
+ std::ptrdiff_t n
130
+ )
131
+ {
132
+ typedef typename thrust::detail::remove_reference<Allocator>::type naked_allocator;
133
+ typedef typename thrust::detail::allocator_traits<naked_allocator> alloc_traits;
134
+ typedef typename alloc_traits::pointer pointer;
135
+ typedef typename alloc_traits::size_type size_type;
136
+ typedef typename alloc_traits::value_type value_type;
137
+ typedef typename thrust::detail::pointer_traits<Pointer>::element_type T;
138
+
139
+ size_type num_elements = divide_ri(sizeof(T) * n, sizeof(value_type));
140
+
141
+ pointer to_ptr = thrust::reinterpret_pointer_cast<pointer>(p);
142
+ alloc_traits::deallocate(system.get_allocator(), to_ptr, num_elements);
143
+ }
144
+
145
+ #endif
146
+
147
+ } // namespace detail
148
+
149
+ THRUST_NAMESPACE_END
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/execute_with_dependencies.h ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2018 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/cpp11_required.h>
21
+
22
+ #if THRUST_CPP_DIALECT >= 2011
23
+
24
+ #include <thrust/detail/type_deduction.h>
25
+ #include <thrust/type_traits/remove_cvref.h>
26
+
27
+ #include <tuple>
28
+ #include <type_traits>
29
+
30
+ THRUST_NAMESPACE_BEGIN
31
+
32
+ namespace detail
33
+ {
34
+
35
+ struct capture_as_dependency_fn
36
+ {
37
+ template<typename Dependency>
38
+ auto operator()(Dependency&& dependency) const
39
+ THRUST_DECLTYPE_RETURNS(capture_as_dependency(THRUST_FWD(dependency)))
40
+ };
41
+
42
+ // Default implementation: universal forwarding.
43
+ template<typename Dependency>
44
+ auto capture_as_dependency(Dependency&& dependency)
45
+ THRUST_DECLTYPE_RETURNS(THRUST_FWD(dependency))
46
+
47
+ template<typename... Dependencies>
48
+ auto capture_as_dependency(std::tuple<Dependencies...>& dependencies)
49
+ THRUST_DECLTYPE_RETURNS(
50
+ tuple_for_each(THRUST_FWD(dependencies), capture_as_dependency_fn{})
51
+ )
52
+
53
+ template<template<typename> class BaseSystem, typename... Dependencies>
54
+ struct execute_with_dependencies
55
+ : BaseSystem<execute_with_dependencies<BaseSystem, Dependencies...>>
56
+ {
57
+ private:
58
+ using super_t = BaseSystem<execute_with_dependencies<BaseSystem, Dependencies...>>;
59
+
60
+ std::tuple<remove_cvref_t<Dependencies>...> dependencies;
61
+
62
+ public:
63
+ __host__
64
+ execute_with_dependencies(super_t const &super, Dependencies && ...dependencies)
65
+ : super_t(super), dependencies(std::forward<Dependencies>(dependencies)...)
66
+ {
67
+ }
68
+
69
+ template <typename... UDependencies>
70
+ __host__
71
+ execute_with_dependencies(super_t const &super, UDependencies && ...deps)
72
+ : super_t(super), dependencies(THRUST_FWD(deps)...)
73
+ {
74
+ }
75
+
76
+ template <typename... UDependencies>
77
+ __host__
78
+ execute_with_dependencies(UDependencies && ...deps)
79
+ : dependencies(THRUST_FWD(deps)...)
80
+ {
81
+ }
82
+
83
+ template <typename... UDependencies>
84
+ __host__
85
+ execute_with_dependencies(super_t const &super, std::tuple<UDependencies...>&& deps)
86
+ : super_t(super), dependencies(std::move(deps))
87
+ {
88
+ }
89
+
90
+ template <typename... UDependencies>
91
+ __host__
92
+ execute_with_dependencies(std::tuple<UDependencies...>&& deps)
93
+ : dependencies(std::move(deps))
94
+ {
95
+ }
96
+
97
+ std::tuple<remove_cvref_t<Dependencies>...>
98
+ __host__
99
+ extract_dependencies()
100
+ {
101
+ return std::move(dependencies);
102
+ }
103
+
104
+ // Rebinding.
105
+ template<typename ...UDependencies>
106
+ __host__
107
+ execute_with_dependencies<BaseSystem, UDependencies...>
108
+ rebind_after(UDependencies&& ...udependencies) const
109
+ {
110
+ return { capture_as_dependency(THRUST_FWD(udependencies))... };
111
+ }
112
+
113
+ // Rebinding.
114
+ template<typename ...UDependencies>
115
+ __host__
116
+ execute_with_dependencies<BaseSystem, UDependencies...>
117
+ rebind_after(std::tuple<UDependencies...>& udependencies) const
118
+ {
119
+ return { capture_as_dependency(udependencies) };
120
+ }
121
+ template<typename ...UDependencies>
122
+ __host__
123
+ execute_with_dependencies<BaseSystem, UDependencies...>
124
+ rebind_after(std::tuple<UDependencies...>&& udependencies) const
125
+ {
126
+ return { capture_as_dependency(std::move(udependencies)) };
127
+ }
128
+ };
129
+
130
+ template<
131
+ typename Allocator,
132
+ template<typename> class BaseSystem,
133
+ typename... Dependencies
134
+ >
135
+ struct execute_with_allocator_and_dependencies
136
+ : BaseSystem<
137
+ execute_with_allocator_and_dependencies<
138
+ Allocator,
139
+ BaseSystem,
140
+ Dependencies...
141
+ >
142
+ >
143
+ {
144
+ private:
145
+ using super_t = BaseSystem<
146
+ execute_with_allocator_and_dependencies<
147
+ Allocator,
148
+ BaseSystem,
149
+ Dependencies...
150
+ >
151
+ >;
152
+
153
+ std::tuple<remove_cvref_t<Dependencies>...> dependencies;
154
+ Allocator alloc;
155
+
156
+ public:
157
+ template <typename... UDependencies>
158
+ __host__
159
+ execute_with_allocator_and_dependencies(super_t const &super, Allocator a, UDependencies && ...deps)
160
+ : super_t(super), dependencies(THRUST_FWD(deps)...), alloc(a)
161
+ {
162
+ }
163
+
164
+ template <typename... UDependencies>
165
+ __host__
166
+ execute_with_allocator_and_dependencies(Allocator a, UDependencies && ...deps)
167
+ : dependencies(THRUST_FWD(deps)...), alloc(a)
168
+ {
169
+ }
170
+
171
+ template <typename... UDependencies>
172
+ __host__
173
+ execute_with_allocator_and_dependencies(super_t const &super, Allocator a, std::tuple<UDependencies...>&& deps)
174
+ : super_t(super), dependencies(std::move(deps)), alloc(a)
175
+ {
176
+ }
177
+
178
+ template <typename... UDependencies>
179
+ __host__
180
+ execute_with_allocator_and_dependencies(Allocator a, std::tuple<UDependencies...>&& deps)
181
+ : dependencies(std::move(deps)), alloc(a)
182
+ {
183
+ }
184
+
185
+ std::tuple<remove_cvref_t<Dependencies>...>
186
+ __host__
187
+ extract_dependencies()
188
+ {
189
+ return std::move(dependencies);
190
+ }
191
+
192
+ __host__
193
+ typename std::add_lvalue_reference<Allocator>::type
194
+ get_allocator()
195
+ {
196
+ return alloc;
197
+ }
198
+
199
+ // Rebinding.
200
+ template<typename ...UDependencies>
201
+ __host__
202
+ execute_with_allocator_and_dependencies<Allocator, BaseSystem, UDependencies...>
203
+ rebind_after(UDependencies&& ...udependencies) const
204
+ {
205
+ return { alloc, capture_as_dependency(THRUST_FWD(udependencies))... };
206
+ }
207
+
208
+ // Rebinding.
209
+ template<typename ...UDependencies>
210
+ __host__
211
+ execute_with_allocator_and_dependencies<Allocator, BaseSystem, UDependencies...>
212
+ rebind_after(std::tuple<UDependencies...>& udependencies) const
213
+ {
214
+ return { alloc, capture_as_dependency(udependencies) };
215
+ }
216
+ template<typename ...UDependencies>
217
+ __host__
218
+ execute_with_allocator_and_dependencies<Allocator, BaseSystem, UDependencies...>
219
+ rebind_after(std::tuple<UDependencies...>&& udependencies) const
220
+ {
221
+ return { alloc, capture_as_dependency(std::move(udependencies)) };
222
+ }
223
+ };
224
+
225
+ template<template<typename> class BaseSystem, typename ...Dependencies>
226
+ __host__
227
+ std::tuple<remove_cvref_t<Dependencies>...>
228
+ extract_dependencies(thrust::detail::execute_with_dependencies<BaseSystem, Dependencies...>&& system)
229
+ {
230
+ return std::move(system).extract_dependencies();
231
+ }
232
+ template<template<typename> class BaseSystem, typename ...Dependencies>
233
+ __host__
234
+ std::tuple<remove_cvref_t<Dependencies>...>
235
+ extract_dependencies(thrust::detail::execute_with_dependencies<BaseSystem, Dependencies...>& system)
236
+ {
237
+ return std::move(system).extract_dependencies();
238
+ }
239
+
240
+ template<typename Allocator, template<typename> class BaseSystem, typename ...Dependencies>
241
+ __host__
242
+ std::tuple<remove_cvref_t<Dependencies>...>
243
+ extract_dependencies(thrust::detail::execute_with_allocator_and_dependencies<Allocator, BaseSystem, Dependencies...>&& system)
244
+ {
245
+ return std::move(system).extract_dependencies();
246
+ }
247
+ template<typename Allocator, template<typename> class BaseSystem, typename ...Dependencies>
248
+ __host__
249
+ std::tuple<remove_cvref_t<Dependencies>...>
250
+ extract_dependencies(thrust::detail::execute_with_allocator_and_dependencies<Allocator, BaseSystem, Dependencies...>& system)
251
+ {
252
+ return std::move(system).extract_dependencies();
253
+ }
254
+
255
+ template<typename System>
256
+ __host__
257
+ std::tuple<>
258
+ extract_dependencies(System &&)
259
+ {
260
+ return std::tuple<>{};
261
+ }
262
+
263
+ } // end detail
264
+
265
+ THRUST_NAMESPACE_END
266
+
267
+ #endif // THRUST_CPP_DIALECT >= 2011
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/extrema.inl ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/extrema.h>
21
+ #include <thrust/iterator/iterator_traits.h>
22
+ #include <thrust/system/detail/generic/select_system.h>
23
+ #include <thrust/system/detail/generic/extrema.h>
24
+ #include <thrust/system/detail/adl/extrema.h>
25
+
26
+ THRUST_NAMESPACE_BEGIN
27
+
28
+ __thrust_exec_check_disable__
29
+ template<typename DerivedPolicy, typename ForwardIterator>
30
+ __host__ __device__
31
+ ForwardIterator min_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last)
32
+ {
33
+ using thrust::system::detail::generic::min_element;
34
+ return min_element(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last);
35
+ } // end min_element()
36
+
37
+
38
+ __thrust_exec_check_disable__
39
+ template<typename DerivedPolicy, typename ForwardIterator, typename BinaryPredicate>
40
+ __host__ __device__
41
+ ForwardIterator min_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last, BinaryPredicate comp)
42
+ {
43
+ using thrust::system::detail::generic::min_element;
44
+ return min_element(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, comp);
45
+ } // end min_element()
46
+
47
+
48
+ __thrust_exec_check_disable__
49
+ template<typename DerivedPolicy, typename ForwardIterator>
50
+ __host__ __device__
51
+ ForwardIterator max_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last)
52
+ {
53
+ using thrust::system::detail::generic::max_element;
54
+ return max_element(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last);
55
+ } // end max_element()
56
+
57
+
58
+ __thrust_exec_check_disable__
59
+ template<typename DerivedPolicy, typename ForwardIterator, typename BinaryPredicate>
60
+ __host__ __device__
61
+ ForwardIterator max_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last, BinaryPredicate comp)
62
+ {
63
+ using thrust::system::detail::generic::max_element;
64
+ return max_element(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, comp);
65
+ } // end max_element()
66
+
67
+
68
+ __thrust_exec_check_disable__
69
+ template<typename DerivedPolicy, typename ForwardIterator>
70
+ __host__ __device__
71
+ thrust::pair<ForwardIterator,ForwardIterator> minmax_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last)
72
+ {
73
+ using thrust::system::detail::generic::minmax_element;
74
+ return minmax_element(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last);
75
+ } // end minmax_element()
76
+
77
+
78
+ __thrust_exec_check_disable__
79
+ template<typename DerivedPolicy, typename ForwardIterator, typename BinaryPredicate>
80
+ __host__ __device__
81
+ thrust::pair<ForwardIterator,ForwardIterator> minmax_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last, BinaryPredicate comp)
82
+ {
83
+ using thrust::system::detail::generic::minmax_element;
84
+ return minmax_element(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, comp);
85
+ } // end minmax_element()
86
+
87
+
88
+ template <typename ForwardIterator>
89
+ ForwardIterator min_element(ForwardIterator first, ForwardIterator last)
90
+ {
91
+ using thrust::system::detail::generic::select_system;
92
+
93
+ typedef typename thrust::iterator_system<ForwardIterator>::type System;
94
+
95
+ System system;
96
+
97
+ return thrust::min_element(select_system(system), first, last);
98
+ } // end min_element()
99
+
100
+
101
+ template <typename ForwardIterator, typename BinaryPredicate>
102
+ ForwardIterator min_element(ForwardIterator first, ForwardIterator last,
103
+ BinaryPredicate comp)
104
+ {
105
+ using thrust::system::detail::generic::select_system;
106
+
107
+ typedef typename thrust::iterator_system<ForwardIterator>::type System;
108
+
109
+ System system;
110
+
111
+ return thrust::min_element(select_system(system), first, last, comp);
112
+ } // end min_element()
113
+
114
+
115
+ template <typename ForwardIterator>
116
+ ForwardIterator max_element(ForwardIterator first, ForwardIterator last)
117
+ {
118
+ using thrust::system::detail::generic::select_system;
119
+
120
+ typedef typename thrust::iterator_system<ForwardIterator>::type System;
121
+
122
+ System system;
123
+
124
+ return thrust::max_element(select_system(system), first, last);
125
+ } // end max_element()
126
+
127
+
128
+ template <typename ForwardIterator, typename BinaryPredicate>
129
+ ForwardIterator max_element(ForwardIterator first, ForwardIterator last,
130
+ BinaryPredicate comp)
131
+ {
132
+ using thrust::system::detail::generic::select_system;
133
+
134
+ typedef typename thrust::iterator_system<ForwardIterator>::type System;
135
+
136
+ System system;
137
+
138
+ return thrust::max_element(select_system(system), first, last, comp);
139
+ } // end max_element()
140
+
141
+
142
+ template <typename ForwardIterator>
143
+ thrust::pair<ForwardIterator,ForwardIterator>
144
+ minmax_element(ForwardIterator first, ForwardIterator last)
145
+ {
146
+ using thrust::system::detail::generic::select_system;
147
+
148
+ typedef typename thrust::iterator_system<ForwardIterator>::type System;
149
+
150
+ System system;
151
+
152
+ return thrust::minmax_element(select_system(system), first, last);
153
+ } // end minmax_element()
154
+
155
+
156
+ template <typename ForwardIterator, typename BinaryPredicate>
157
+ thrust::pair<ForwardIterator,ForwardIterator>
158
+ minmax_element(ForwardIterator first, ForwardIterator last, BinaryPredicate comp)
159
+ {
160
+ using thrust::system::detail::generic::select_system;
161
+
162
+ typedef typename thrust::iterator_system<ForwardIterator>::type System;
163
+
164
+ System system;
165
+
166
+ return thrust::minmax_element(select_system(system), first, last, comp);
167
+ } // end minmax_element()
168
+
169
+ THRUST_NAMESPACE_END
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/functional/operators/arithmetic_operators.h ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/functional/actor.h>
21
+ #include <thrust/detail/functional/composite.h>
22
+ #include <thrust/detail/functional/operators/operator_adaptors.h>
23
+ #include <thrust/functional.h>
24
+
25
+ THRUST_NAMESPACE_BEGIN
26
+ namespace detail
27
+ {
28
+ namespace functional
29
+ {
30
+
31
+ template<typename Eval>
32
+ __host__ __device__
33
+ actor<
34
+ composite<
35
+ transparent_unary_operator<thrust::negate<>>,
36
+ actor<Eval>
37
+ >
38
+ >
39
+ __host__ __device__
40
+ operator-(const actor<Eval> &_1)
41
+ {
42
+ return compose(transparent_unary_operator<thrust::negate<>>(), _1);
43
+ } // end operator-()
44
+
45
+ // there's no standard unary_plus functional, so roll an ad hoc one here
46
+ struct unary_plus
47
+ {
48
+ using is_transparent = void;
49
+
50
+ __thrust_exec_check_disable__
51
+ template <typename T1>
52
+ __host__ __device__
53
+ constexpr auto operator()(T1&& t1) const
54
+ noexcept(noexcept(+THRUST_FWD(t1)))
55
+ THRUST_TRAILING_RETURN(decltype(+THRUST_FWD(t1)))
56
+ {
57
+ return +THRUST_FWD(t1);
58
+ }
59
+ };
60
+
61
+ template<typename Eval>
62
+ __host__ __device__
63
+ actor<
64
+ composite<
65
+ transparent_unary_operator<unary_plus>,
66
+ actor<Eval>
67
+ >
68
+ >
69
+ operator+(const actor<Eval> &_1)
70
+ {
71
+ return compose(transparent_unary_operator<unary_plus>(), _1);
72
+ } // end operator+()
73
+
74
+ template<typename T1, typename T2>
75
+ __host__ __device__
76
+ actor<
77
+ composite<
78
+ transparent_binary_operator<thrust::plus<>>,
79
+ actor<T1>,
80
+ typename as_actor<T2>::type
81
+ >
82
+ >
83
+ operator+(const actor<T1> &_1, const T2 &_2)
84
+ {
85
+ return compose(transparent_binary_operator<thrust::plus<>>(),
86
+ make_actor(_1),
87
+ make_actor(_2));
88
+ } // end operator+()
89
+
90
+ template<typename T1, typename T2>
91
+ __host__ __device__
92
+ actor<
93
+ composite<
94
+ transparent_binary_operator<thrust::plus<>>,
95
+ typename as_actor<T1>::type,
96
+ actor<T2>
97
+ >
98
+ >
99
+ operator+(const T1 &_1, const actor<T2> &_2)
100
+ {
101
+ return compose(transparent_binary_operator<thrust::plus<>>(),
102
+ make_actor(_1),
103
+ make_actor(_2));
104
+ } // end operator+()
105
+
106
+ template<typename T1, typename T2>
107
+ __host__ __device__
108
+ actor<
109
+ composite<
110
+ transparent_binary_operator<thrust::plus<>>,
111
+ actor<T1>,
112
+ actor<T2>
113
+ >
114
+ >
115
+ operator+(const actor<T1> &_1, const actor<T2> &_2)
116
+ {
117
+ return compose(transparent_binary_operator<thrust::plus<>>(),
118
+ make_actor(_1),
119
+ make_actor(_2));
120
+ } // end operator+()
121
+
122
+ template<typename T1, typename T2>
123
+ __host__ __device__
124
+ actor<
125
+ composite<
126
+ transparent_binary_operator<thrust::minus<>>,
127
+ typename as_actor<T1>::type,
128
+ actor<T2>
129
+ >
130
+ >
131
+ operator-(const T1 &_1, const actor<T2> &_2)
132
+ {
133
+ return compose(transparent_binary_operator<thrust::minus<>>(),
134
+ make_actor(_1),
135
+ make_actor(_2));
136
+ } // end operator-()
137
+
138
+ template<typename T1, typename T2>
139
+ __host__ __device__
140
+ actor<
141
+ composite<
142
+ transparent_binary_operator<thrust::minus<>>,
143
+ actor<T1>,
144
+ typename as_actor<T2>::type
145
+ >
146
+ >
147
+ operator-(const actor<T1> &_1, const T2 &_2)
148
+ {
149
+ return compose(transparent_binary_operator<thrust::minus<>>(),
150
+ make_actor(_1),
151
+ make_actor(_2));
152
+ } // end operator-()
153
+
154
+ template<typename T1, typename T2>
155
+ __host__ __device__
156
+ actor<
157
+ composite<
158
+ transparent_binary_operator<thrust::minus<>>,
159
+ actor<T1>,
160
+ actor<T2>
161
+ >
162
+ >
163
+ operator-(const actor<T1> &_1, const actor<T2> &_2)
164
+ {
165
+ return compose(transparent_binary_operator<thrust::minus<>>(),
166
+ make_actor(_1),
167
+ make_actor(_2));
168
+ } // end operator-()
169
+
170
+ template<typename T1, typename T2>
171
+ __host__ __device__
172
+ actor<
173
+ composite<
174
+ transparent_binary_operator<thrust::multiplies<>>,
175
+ typename as_actor<T1>::type,
176
+ actor<T2>
177
+ >
178
+ >
179
+ operator*(const T1 &_1, const actor<T2> &_2)
180
+ {
181
+ return compose(transparent_binary_operator<thrust::multiplies<>>(),
182
+ make_actor(_1),
183
+ make_actor(_2));
184
+ } // end operator*()
185
+
186
+ template<typename T1, typename T2>
187
+ __host__ __device__
188
+ actor<
189
+ composite<
190
+ transparent_binary_operator<thrust::multiplies<>>,
191
+ actor<T1>,
192
+ typename as_actor<T2>::type
193
+ >
194
+ >
195
+ operator*(const actor<T1> &_1, const T2 &_2)
196
+ {
197
+ return compose(transparent_binary_operator<thrust::multiplies<>>(),
198
+ make_actor(_1),
199
+ make_actor(_2));
200
+ } // end operator*()
201
+
202
+ template<typename T1, typename T2>
203
+ __host__ __device__
204
+ actor<
205
+ composite<
206
+ transparent_binary_operator<thrust::multiplies<>>,
207
+ actor<T1>,
208
+ actor<T2>
209
+ >
210
+ >
211
+ operator*(const actor<T1> &_1, const actor<T2> &_2)
212
+ {
213
+ return compose(transparent_binary_operator<thrust::multiplies<>>(),
214
+ make_actor(_1),
215
+ make_actor(_2));
216
+ } // end operator*()
217
+
218
+ template<typename T1, typename T2>
219
+ __host__ __device__
220
+ actor<
221
+ composite<
222
+ transparent_binary_operator<thrust::divides<>>,
223
+ actor<T1>,
224
+ typename as_actor<T2>::type
225
+ >
226
+ >
227
+ operator/(const actor<T1> &_1, const T2 &_2)
228
+ {
229
+ return compose(transparent_binary_operator<thrust::divides<>>(),
230
+ make_actor(_1),
231
+ make_actor(_2));
232
+ } // end operator/()
233
+
234
+ template<typename T1, typename T2>
235
+ __host__ __device__
236
+ actor<
237
+ composite<
238
+ transparent_binary_operator<thrust::divides<>>,
239
+ typename as_actor<T1>::type,
240
+ actor<T2>
241
+ >
242
+ >
243
+ operator/(const T1 &_1, const actor<T2> &_2)
244
+ {
245
+ return compose(transparent_binary_operator<thrust::divides<>>(),
246
+ make_actor(_1),
247
+ make_actor(_2));
248
+ } // end operator/()
249
+
250
+ template<typename T1, typename T2>
251
+ __host__ __device__
252
+ actor<
253
+ composite<
254
+ transparent_binary_operator<thrust::divides<>>,
255
+ actor<T1>,
256
+ actor<T2>
257
+ >
258
+ >
259
+ operator/(const actor<T1> &_1, const actor<T2> &_2)
260
+ {
261
+ return compose(transparent_binary_operator<thrust::divides<>>(),
262
+ make_actor(_1),
263
+ make_actor(_2));
264
+ } // end operator/()
265
+
266
+ template<typename T1, typename T2>
267
+ __host__ __device__
268
+ actor<
269
+ composite<
270
+ transparent_binary_operator<thrust::modulus<>>,
271
+ actor<T1>,
272
+ typename as_actor<T2>::type
273
+ >
274
+ >
275
+ operator%(const actor<T1> &_1, const T2 &_2)
276
+ {
277
+ return compose(transparent_binary_operator<thrust::modulus<>>(),
278
+ make_actor(_1),
279
+ make_actor(_2));
280
+ } // end operator%()
281
+
282
+ template<typename T1, typename T2>
283
+ __host__ __device__
284
+ actor<
285
+ composite<
286
+ transparent_binary_operator<thrust::modulus<>>,
287
+ typename as_actor<T1>::type,
288
+ actor<T2>
289
+ >
290
+ >
291
+ operator%(const T1 &_1, const actor<T2> &_2)
292
+ {
293
+ return compose(transparent_binary_operator<thrust::modulus<void>>(),
294
+ make_actor(_1),
295
+ make_actor(_2));
296
+ } // end operator%()
297
+
298
+ template<typename T1, typename T2>
299
+ __host__ __device__
300
+ actor<
301
+ composite<
302
+ transparent_binary_operator<thrust::modulus<>>,
303
+ actor<T1>,
304
+ actor<T2>
305
+ >
306
+ >
307
+ operator%(const actor<T1> &_1, const actor<T2> &_2)
308
+ {
309
+ return compose(transparent_binary_operator<thrust::modulus<>>(),
310
+ make_actor(_1),
311
+ make_actor(_2));
312
+ } // end operator%()
313
+
314
+ // there's no standard prefix_increment functional, so roll an ad hoc one here
315
+ struct prefix_increment
316
+ {
317
+ using is_transparent = void;
318
+
319
+ __thrust_exec_check_disable__
320
+ template <typename T1>
321
+ __host__ __device__
322
+ constexpr auto operator()(T1&& t1) const
323
+ noexcept(noexcept(++THRUST_FWD(t1)))
324
+ THRUST_TRAILING_RETURN(decltype(++THRUST_FWD(t1)))
325
+ {
326
+ return ++THRUST_FWD(t1);
327
+ }
328
+ }; // end prefix_increment
329
+
330
+ template<typename Eval>
331
+ __host__ __device__
332
+ actor<
333
+ composite<
334
+ transparent_unary_operator<prefix_increment>,
335
+ actor<Eval>
336
+ >
337
+ >
338
+ operator++(const actor<Eval> &_1)
339
+ {
340
+ return compose(transparent_unary_operator<prefix_increment>(), _1);
341
+ } // end operator++()
342
+
343
+
344
+ // there's no standard postfix_increment functional, so roll an ad hoc one here
345
+ struct postfix_increment
346
+ {
347
+ using is_transparent = void;
348
+
349
+ __thrust_exec_check_disable__
350
+ template <typename T1>
351
+ __host__ __device__
352
+ constexpr auto operator()(T1&& t1) const
353
+ noexcept(noexcept(THRUST_FWD(t1)++))
354
+ THRUST_TRAILING_RETURN(decltype(THRUST_FWD(t1)++))
355
+ {
356
+ return THRUST_FWD(t1)++;
357
+ }
358
+ }; // end postfix_increment
359
+
360
+ template<typename Eval>
361
+ __host__ __device__
362
+ actor<
363
+ composite<
364
+ transparent_unary_operator<postfix_increment>,
365
+ actor<Eval>
366
+ >
367
+ >
368
+ operator++(const actor<Eval> &_1, int)
369
+ {
370
+ return compose(transparent_unary_operator<postfix_increment>(), _1);
371
+ } // end operator++()
372
+
373
+
374
+ // there's no standard prefix_decrement functional, so roll an ad hoc one here
375
+ struct prefix_decrement
376
+ {
377
+ using is_transparent = void;
378
+
379
+ __thrust_exec_check_disable__
380
+ template <typename T1>
381
+ __host__ __device__
382
+ constexpr auto operator()(T1&& t1) const
383
+ noexcept(noexcept(--THRUST_FWD(t1)))
384
+ THRUST_TRAILING_RETURN(decltype(--THRUST_FWD(t1)))
385
+ {
386
+ return --THRUST_FWD(t1);
387
+ }
388
+ }; // end prefix_decrement
389
+
390
+ template<typename Eval>
391
+ __host__ __device__
392
+ actor<
393
+ composite<
394
+ transparent_unary_operator<prefix_decrement>,
395
+ actor<Eval>
396
+ >
397
+ >
398
+ operator--(const actor<Eval> &_1)
399
+ {
400
+ return compose(transparent_unary_operator<prefix_decrement>(), _1);
401
+ } // end operator--()
402
+
403
+
404
+ // there's no standard postfix_decrement functional, so roll an ad hoc one here
405
+ struct postfix_decrement
406
+ {
407
+ using is_transparent = void;
408
+
409
+ __thrust_exec_check_disable__
410
+ template <typename T1>
411
+ __host__ __device__
412
+ constexpr auto operator()(T1&& t1) const
413
+ noexcept(noexcept(THRUST_FWD(t1)--))
414
+ THRUST_TRAILING_RETURN(decltype(THRUST_FWD(t1)--))
415
+ {
416
+ return THRUST_FWD(t1)--;
417
+ }
418
+ }; // end prefix_increment
419
+
420
+ template<typename Eval>
421
+ __host__ __device__
422
+ actor<
423
+ composite<
424
+ transparent_unary_operator<postfix_decrement>,
425
+ actor<Eval>
426
+ >
427
+ >
428
+ operator--(const actor<Eval> &_1, int)
429
+ {
430
+ return compose(transparent_unary_operator<postfix_decrement>(), _1);
431
+ } // end operator--()
432
+
433
+ } // end functional
434
+ } // end detail
435
+ THRUST_NAMESPACE_END
436
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/functional/operators/assignment_operator.h ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/functional/actor.h>
21
+ #include <thrust/detail/functional/composite.h>
22
+ #include <thrust/detail/functional/operators/operator_adaptors.h>
23
+ #include <thrust/functional.h>
24
+
25
+ THRUST_NAMESPACE_BEGIN
26
+
27
+ // XXX WAR circular inclusion with this forward declaration
28
+ template<typename,typename,typename> struct binary_function;
29
+
30
+ namespace detail
31
+ {
32
+ namespace functional
33
+ {
34
+
35
+ // XXX WAR circular inclusion with this forward declaration
36
+ template<typename> struct as_actor;
37
+
38
+ // there's no standard assign functional, so roll an ad hoc one here
39
+ struct assign
40
+ {
41
+ using is_transparent = void;
42
+
43
+ __thrust_exec_check_disable__
44
+ template <typename T1, typename T2>
45
+ __host__ __device__
46
+ constexpr auto operator()(T1&& t1, T2&& t2) const
47
+ noexcept(noexcept(THRUST_FWD(t1) = THRUST_FWD(t2)))
48
+ THRUST_TRAILING_RETURN(decltype(THRUST_FWD(t1) = THRUST_FWD(t2)))
49
+ {
50
+ return THRUST_FWD(t1) = THRUST_FWD(t2);
51
+ }
52
+ };
53
+
54
+ template<typename Eval, typename T>
55
+ struct assign_result
56
+ {
57
+ typedef actor<
58
+ composite<
59
+ transparent_binary_operator<assign>,
60
+ actor<Eval>,
61
+ typename as_actor<T>::type
62
+ >
63
+ > type;
64
+ }; // end assign_result
65
+
66
+ template<typename Eval, typename T>
67
+ __host__ __device__
68
+ typename assign_result<Eval,T>::type
69
+ do_assign(const actor<Eval> &_1, const T &_2)
70
+ {
71
+ return compose(transparent_binary_operator<assign>(),
72
+ _1,
73
+ as_actor<T>::convert(_2));
74
+ } // end do_assign()
75
+
76
+ } // end functional
77
+ } // end detail
78
+ THRUST_NAMESPACE_END
79
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/functional/operators/bitwise_operators.h ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/functional/actor.h>
21
+ #include <thrust/detail/functional/composite.h>
22
+ #include <thrust/detail/functional/operators/operator_adaptors.h>
23
+ #include <thrust/functional.h>
24
+
25
+ THRUST_NAMESPACE_BEGIN
26
+ namespace detail
27
+ {
28
+ namespace functional
29
+ {
30
+
31
+ template<typename T1, typename T2>
32
+ __host__ __device__
33
+ actor<
34
+ composite<
35
+ transparent_binary_operator<bit_and<>>,
36
+ actor<T1>,
37
+ typename as_actor<T2>::type
38
+ >
39
+ >
40
+ operator&(const actor<T1> &_1, const T2 &_2)
41
+ {
42
+ return compose(transparent_binary_operator<bit_and<>>(),
43
+ make_actor(_1),
44
+ make_actor(_2));
45
+ } // end operator&()
46
+
47
+ template<typename T1, typename T2>
48
+ __host__ __device__
49
+ actor<
50
+ composite<
51
+ transparent_binary_operator<bit_and<>>,
52
+ typename as_actor<T1>::type,
53
+ actor<T2>
54
+ >
55
+ >
56
+ operator&(const T1 &_1, const actor<T2> &_2)
57
+ {
58
+ return compose(transparent_binary_operator<bit_and<>>(),
59
+ make_actor(_1),
60
+ make_actor(_2));
61
+ } // end operator&()
62
+
63
+ template<typename T1, typename T2>
64
+ __host__ __device__
65
+ actor<
66
+ composite<
67
+ transparent_binary_operator<bit_and<>>,
68
+ actor<T1>,
69
+ actor<T2>
70
+ >
71
+ >
72
+ operator&(const actor<T1> &_1, const actor<T2> &_2)
73
+ {
74
+ return compose(transparent_binary_operator<bit_and<>>(),
75
+ make_actor(_1),
76
+ make_actor(_2));
77
+ } // end operator&()
78
+
79
+ template<typename T1, typename T2>
80
+ __host__ __device__
81
+ actor<
82
+ composite<
83
+ transparent_binary_operator<bit_or<>>,
84
+ actor<T1>,
85
+ typename as_actor<T2>::type
86
+ >
87
+ >
88
+ operator|(const actor<T1> &_1, const T2 &_2)
89
+ {
90
+ return compose(transparent_binary_operator<bit_or<>>(),
91
+ make_actor(_1),
92
+ make_actor(_2));
93
+ } // end operator|()
94
+
95
+ template<typename T1, typename T2>
96
+ __host__ __device__
97
+ actor<
98
+ composite<
99
+ transparent_binary_operator<bit_or<>>,
100
+ typename as_actor<T1>::type,
101
+ actor<T2>
102
+ >
103
+ >
104
+ operator|(const T1 &_1, const actor<T2> &_2)
105
+ {
106
+ return compose(transparent_binary_operator<bit_or<>>(),
107
+ make_actor(_1),
108
+ make_actor(_2));
109
+ } // end operator|()
110
+
111
+ template<typename T1, typename T2>
112
+ __host__ __device__
113
+ actor<
114
+ composite<
115
+ transparent_binary_operator<bit_or<>>,
116
+ actor<T1>,
117
+ actor<T2>
118
+ >
119
+ >
120
+ operator|(const actor<T1> &_1, const actor<T2> &_2)
121
+ {
122
+ return compose(transparent_binary_operator<bit_or<>>(),
123
+ make_actor(_1),
124
+ make_actor(_2));
125
+ } // end operator|()
126
+
127
+ template<typename T1, typename T2>
128
+ __host__ __device__
129
+ actor<
130
+ composite<
131
+ transparent_binary_operator<bit_xor<>>,
132
+ actor<T1>,
133
+ typename as_actor<T2>::type
134
+ >
135
+ >
136
+ operator^(const actor<T1> &_1, const T2 &_2)
137
+ {
138
+ return compose(transparent_binary_operator<bit_xor<>>(),
139
+ make_actor(_1),
140
+ make_actor(_2));
141
+ } // end operator^()
142
+
143
+ template<typename T1, typename T2>
144
+ __host__ __device__
145
+ actor<
146
+ composite<
147
+ transparent_binary_operator<bit_xor<>>,
148
+ typename as_actor<T1>::type,
149
+ actor<T2>
150
+ >
151
+ >
152
+ operator^(const T1 &_1, const actor<T2> &_2)
153
+ {
154
+ return compose(transparent_binary_operator<bit_xor<>>(),
155
+ make_actor(_1),
156
+ make_actor(_2));
157
+ } // end operator^()
158
+
159
+ template<typename T1, typename T2>
160
+ __host__ __device__
161
+ actor<
162
+ composite<
163
+ transparent_binary_operator<bit_xor<>>,
164
+ actor<T1>,
165
+ actor<T2>
166
+ >
167
+ >
168
+ operator^(const actor<T1> &_1, const actor<T2> &_2)
169
+ {
170
+ return compose(transparent_binary_operator<bit_xor<>>(),
171
+ make_actor(_1),
172
+ make_actor(_2));
173
+ } // end operator^()
174
+
175
+
176
+ // there's no standard bit_not functional, so roll an ad hoc one here
177
+ struct bit_not
178
+ {
179
+ using is_transparent = void;
180
+
181
+ __thrust_exec_check_disable__
182
+ template <typename T1>
183
+ __host__ __device__
184
+ constexpr auto operator()(T1&& t1) const
185
+ noexcept(noexcept(~THRUST_FWD(t1)))
186
+ THRUST_TRAILING_RETURN(decltype(~THRUST_FWD(t1)))
187
+ {
188
+ return ~THRUST_FWD(t1);
189
+ }
190
+ }; // end prefix_increment
191
+
192
+ template<typename Eval>
193
+ __host__ __device__
194
+ actor<
195
+ composite<
196
+ transparent_unary_operator<bit_not>,
197
+ actor<Eval>
198
+ >
199
+ >
200
+ __host__ __device__
201
+ operator~(const actor<Eval> &_1)
202
+ {
203
+ return compose(transparent_unary_operator<bit_not>(), _1);
204
+ } // end operator~()
205
+
206
+ // there's no standard bit_lshift functional, so roll an ad hoc one here
207
+ struct bit_lshift
208
+ {
209
+ using is_transparent = void;
210
+
211
+ __thrust_exec_check_disable__
212
+ template <typename T1, typename T2>
213
+ __host__ __device__
214
+ constexpr auto operator()(T1&& t1, T2&& t2) const
215
+ noexcept(noexcept(THRUST_FWD(t1) << THRUST_FWD(t2)))
216
+ THRUST_TRAILING_RETURN(decltype(THRUST_FWD(t1) << THRUST_FWD(t2)))
217
+ {
218
+ return THRUST_FWD(t1) << THRUST_FWD(t2);
219
+ }
220
+ };
221
+
222
+ template<typename T1, typename T2>
223
+ __host__ __device__
224
+ actor<
225
+ composite<
226
+ transparent_binary_operator<bit_lshift>,
227
+ actor<T1>,
228
+ typename as_actor<T2>::type
229
+ >
230
+ >
231
+ operator<<(const actor<T1> &_1, const T2 &_2)
232
+ {
233
+ return compose(transparent_binary_operator<bit_lshift>(),
234
+ make_actor(_1),
235
+ make_actor(_2));
236
+ } // end operator<<()
237
+
238
+ template<typename T1, typename T2>
239
+ __host__ __device__
240
+ actor<
241
+ composite<
242
+ transparent_binary_operator<bit_lshift>,
243
+ typename as_actor<T1>::type,
244
+ actor<T2>
245
+ >
246
+ >
247
+ operator<<(const T1 &_1, const actor<T2> &_2)
248
+ {
249
+ return compose(transparent_binary_operator<bit_lshift>(),
250
+ make_actor(_1),
251
+ make_actor(_2));
252
+ } // end operator<<()
253
+
254
+ template<typename T1, typename T2>
255
+ __host__ __device__
256
+ actor<
257
+ composite<
258
+ transparent_binary_operator<bit_lshift>,
259
+ actor<T1>,
260
+ actor<T2>
261
+ >
262
+ >
263
+ operator<<(const actor<T1> &_1, const actor<T2> &_2)
264
+ {
265
+ return compose(transparent_binary_operator<bit_lshift>(),
266
+ make_actor(_1),
267
+ make_actor(_2));
268
+ } // end operator<<()
269
+
270
+ // there's no standard bit_rshift functional, so roll an ad hoc one here
271
+ struct bit_rshift
272
+ {
273
+ using is_transparent = void;
274
+
275
+ __thrust_exec_check_disable__
276
+ template <typename T1, typename T2>
277
+ __host__ __device__
278
+ constexpr auto operator()(T1& t1, T2&& t2) const
279
+ noexcept(noexcept(THRUST_FWD(t1) >> THRUST_FWD(t2)))
280
+ THRUST_TRAILING_RETURN(decltype(THRUST_FWD(t1) >> THRUST_FWD(t2)))
281
+ {
282
+ return THRUST_FWD(t1) >> THRUST_FWD(t2);
283
+ }
284
+ };
285
+
286
+
287
+ template<typename T1, typename T2>
288
+ __host__ __device__
289
+ actor<
290
+ composite<
291
+ transparent_binary_operator<bit_rshift>,
292
+ actor<T1>,
293
+ typename as_actor<T2>::type
294
+ >
295
+ >
296
+ operator>>(const actor<T1> &_1, const T2 &_2)
297
+ {
298
+ return compose(transparent_binary_operator<bit_rshift>(),
299
+ make_actor(_1),
300
+ make_actor(_2));
301
+ } // end operator>>()
302
+
303
+ template<typename T1, typename T2>
304
+ __host__ __device__
305
+ actor<
306
+ composite<
307
+ transparent_binary_operator<bit_rshift>,
308
+ typename as_actor<T1>::type,
309
+ actor<T2>
310
+ >
311
+ >
312
+ operator>>(const T1 &_1, const actor<T2> &_2)
313
+ {
314
+ return compose(transparent_binary_operator<bit_rshift>(),
315
+ make_actor(_1),
316
+ make_actor(_2));
317
+ } // end operator>>()
318
+
319
+ template<typename T1, typename T2>
320
+ __host__ __device__
321
+ actor<
322
+ composite<
323
+ transparent_binary_operator<bit_rshift>,
324
+ actor<T1>,
325
+ actor<T2>
326
+ >
327
+ >
328
+ operator>>(const actor<T1> &_1, const actor<T2> &_2)
329
+ {
330
+ return compose(transparent_binary_operator<bit_rshift>(),
331
+ make_actor(_1),
332
+ make_actor(_2));
333
+ } // end operator>>()
334
+
335
+ } // end functional
336
+ } // end detail
337
+ THRUST_NAMESPACE_END
338
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/functional/operators/compound_assignment_operators.h ADDED
@@ -0,0 +1,512 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/functional/actor.h>
21
+ #include <thrust/detail/functional/composite.h>
22
+ #include <thrust/detail/functional/operators/operator_adaptors.h>
23
+
24
+ THRUST_NAMESPACE_BEGIN
25
+ namespace detail
26
+ {
27
+ namespace functional
28
+ {
29
+
30
+ // there's no standard plus_equal functional, so roll an ad hoc one here
31
+ struct plus_equal
32
+ {
33
+ using is_transparent = void;
34
+
35
+ __thrust_exec_check_disable__
36
+ template <typename T1, typename T2>
37
+ __host__ __device__
38
+ constexpr auto operator()(T1&& t1, T2&& t2) const
39
+ noexcept(noexcept(THRUST_FWD(t1) += THRUST_FWD(t2)))
40
+ THRUST_TRAILING_RETURN(decltype(THRUST_FWD(t1) += THRUST_FWD(t2)))
41
+ {
42
+ return THRUST_FWD(t1) += THRUST_FWD(t2);
43
+ }
44
+ };
45
+
46
+ template<typename T1, typename T2>
47
+ __host__ __device__
48
+ actor<
49
+ composite<
50
+ transparent_binary_operator<plus_equal>,
51
+ actor<T1>,
52
+ typename as_actor<T2>::type
53
+ >
54
+ >
55
+ operator+=(const actor<T1> &_1, const T2 &_2)
56
+ {
57
+ return compose(transparent_binary_operator<plus_equal>(),
58
+ make_actor(_1),
59
+ make_actor(_2));
60
+ } // end operator+=()
61
+
62
+ template<typename T1, typename T2>
63
+ __host__ __device__
64
+ actor<
65
+ composite<
66
+ transparent_binary_operator<plus_equal>,
67
+ actor<T1>,
68
+ actor<T2>
69
+ >
70
+ >
71
+ operator+=(const actor<T1> &_1, const actor<T2> &_2)
72
+ {
73
+ return compose(transparent_binary_operator<plus_equal>(),
74
+ make_actor(_1),
75
+ make_actor(_2));
76
+ } // end operator+=()
77
+
78
+ // there's no standard minus_equal functional, so roll an ad hoc one here
79
+ struct minus_equal
80
+ {
81
+ using is_transparent = void;
82
+
83
+ __thrust_exec_check_disable__
84
+ template <typename T1, typename T2>
85
+ __host__ __device__
86
+ constexpr auto operator()(T1&& t1, T2&& t2) const
87
+ noexcept(noexcept(THRUST_FWD(t1) -= THRUST_FWD(t2)))
88
+ THRUST_TRAILING_RETURN(decltype(THRUST_FWD(t1) -= THRUST_FWD(t2)))
89
+ {
90
+ return THRUST_FWD(t1) -= THRUST_FWD(t2);
91
+ }
92
+ };
93
+
94
+ template<typename T1, typename T2>
95
+ __host__ __device__
96
+ actor<
97
+ composite<
98
+ transparent_binary_operator<minus_equal>,
99
+ actor<T1>,
100
+ typename as_actor<T2>::type
101
+ >
102
+ >
103
+ operator-=(const actor<T1> &_1, const T2 &_2)
104
+ {
105
+ return compose(transparent_binary_operator<minus_equal>(),
106
+ make_actor(_1),
107
+ make_actor(_2));
108
+ } // end operator-=()
109
+
110
+ template<typename T1, typename T2>
111
+ __host__ __device__
112
+ actor<
113
+ composite<
114
+ transparent_binary_operator<minus_equal>,
115
+ actor<T1>,
116
+ actor<T2>
117
+ >
118
+ >
119
+ operator-=(const actor<T1> &_1, const actor<T2> &_2)
120
+ {
121
+ return compose(transparent_binary_operator<minus_equal>(),
122
+ make_actor(_1),
123
+ make_actor(_2));
124
+ } // end operator-=()
125
+
126
+ // there's no standard multiplies_equal functional, so roll an ad hoc one here
127
+ struct multiplies_equal
128
+ {
129
+ using is_transparent = void;
130
+
131
+ __thrust_exec_check_disable__
132
+ template <typename T1, typename T2>
133
+ __host__ __device__
134
+ constexpr auto operator()(T1&& t1, T2&& t2) const
135
+ noexcept(noexcept(THRUST_FWD(t1) *= THRUST_FWD(t2)))
136
+ THRUST_TRAILING_RETURN(decltype(THRUST_FWD(t1) *= THRUST_FWD(t2)))
137
+ {
138
+ return THRUST_FWD(t1) *= THRUST_FWD(t2);
139
+ }
140
+ };
141
+
142
+ template<typename T1, typename T2>
143
+ __host__ __device__
144
+ actor<
145
+ composite<
146
+ transparent_binary_operator<multiplies_equal>,
147
+ actor<T1>,
148
+ typename as_actor<T2>::type
149
+ >
150
+ >
151
+ operator*=(const actor<T1> &_1, const T2 &_2)
152
+ {
153
+ return compose(transparent_binary_operator<multiplies_equal>(),
154
+ make_actor(_1),
155
+ make_actor(_2));
156
+ } // end operator*=()
157
+
158
+ template<typename T1, typename T2>
159
+ __host__ __device__
160
+ actor<
161
+ composite<
162
+ transparent_binary_operator<multiplies_equal>,
163
+ actor<T1>,
164
+ actor<T2>
165
+ >
166
+ >
167
+ operator*=(const actor<T1> &_1, const actor<T2> &_2)
168
+ {
169
+ return compose(transparent_binary_operator<multiplies_equal>(),
170
+ make_actor(_1),
171
+ make_actor(_2));
172
+ } // end operator*=()
173
+
174
+ // there's no standard divides_equal functional, so roll an ad hoc one here
175
+ struct divides_equal
176
+ {
177
+ using is_transparent = void;
178
+
179
+ __thrust_exec_check_disable__
180
+ template <typename T1, typename T2>
181
+ __host__ __device__
182
+ constexpr auto operator()(T1&& t1, T2&& t2) const
183
+ noexcept(noexcept(THRUST_FWD(t1) /= THRUST_FWD(t2)))
184
+ THRUST_TRAILING_RETURN(decltype(THRUST_FWD(t1) /= THRUST_FWD(t2)))
185
+ {
186
+ return THRUST_FWD(t1) /= THRUST_FWD(t2);
187
+ }
188
+ };
189
+
190
+ template<typename T1, typename T2>
191
+ __host__ __device__
192
+ actor<
193
+ composite<
194
+ transparent_binary_operator<divides_equal>,
195
+ actor<T1>,
196
+ typename as_actor<T2>::type
197
+ >
198
+ >
199
+ operator/=(const actor<T1> &_1, const T2 &_2)
200
+ {
201
+ return compose(transparent_binary_operator<divides_equal>(),
202
+ make_actor(_1),
203
+ make_actor(_2));
204
+ } // end operator/=()
205
+
206
+ template<typename T1, typename T2>
207
+ __host__ __device__
208
+ actor<
209
+ composite<
210
+ transparent_binary_operator<divides_equal>,
211
+ actor<T1>,
212
+ actor<T2>
213
+ >
214
+ >
215
+ operator/=(const actor<T1> &_1, const actor<T2> &_2)
216
+ {
217
+ return compose(transparent_binary_operator<divides_equal>(),
218
+ make_actor(_1),
219
+ make_actor(_2));
220
+ } // end operator/=()
221
+
222
+ // there's no standard modulus_equal functional, so roll an ad hoc one here
223
+ struct modulus_equal
224
+ {
225
+ using is_transparent = void;
226
+
227
+ __thrust_exec_check_disable__
228
+ template <typename T1, typename T2>
229
+ __host__ __device__
230
+ constexpr auto operator()(T1&& t1, T2&& t2) const
231
+ noexcept(noexcept(THRUST_FWD(t1) %= THRUST_FWD(t2)))
232
+ THRUST_TRAILING_RETURN(decltype(THRUST_FWD(t1) %= THRUST_FWD(t2)))
233
+ {
234
+ return THRUST_FWD(t1) %= THRUST_FWD(t2);
235
+ }
236
+ };
237
+
238
+ template<typename T1, typename T2>
239
+ __host__ __device__
240
+ actor<
241
+ composite<
242
+ transparent_binary_operator<modulus_equal>,
243
+ actor<T1>,
244
+ typename as_actor<T2>::type
245
+ >
246
+ >
247
+ operator%=(const actor<T1> &_1, const T2 &_2)
248
+ {
249
+ return compose(transparent_binary_operator<modulus_equal>(),
250
+ make_actor(_1),
251
+ make_actor(_2));
252
+ } // end operator%=()
253
+
254
+ template<typename T1, typename T2>
255
+ __host__ __device__
256
+ actor<
257
+ composite<
258
+ transparent_binary_operator<modulus_equal>,
259
+ actor<T1>,
260
+ actor<T2>
261
+ >
262
+ >
263
+ operator%=(const actor<T1> &_1, const actor<T2> &_2)
264
+ {
265
+ return compose(transparent_binary_operator<modulus_equal>(),
266
+ make_actor(_1),
267
+ make_actor(_2));
268
+ } // end operator%=()
269
+
270
+ // there's no standard bit_and_equal functional, so roll an ad hoc one here
271
+ struct bit_and_equal
272
+ {
273
+ using is_transparent = void;
274
+
275
+ __thrust_exec_check_disable__
276
+ template <typename T1, typename T2>
277
+ __host__ __device__
278
+ constexpr auto operator()(T1&& t1, T2&& t2) const
279
+ noexcept(noexcept(THRUST_FWD(t1) &= THRUST_FWD(t2)))
280
+ THRUST_TRAILING_RETURN(decltype(THRUST_FWD(t1) &= THRUST_FWD(t2)))
281
+ {
282
+ return THRUST_FWD(t1) &= THRUST_FWD(t2);
283
+ }
284
+ };
285
+
286
+ template<typename T1, typename T2>
287
+ __host__ __device__
288
+ actor<
289
+ composite<
290
+ transparent_binary_operator<bit_and_equal>,
291
+ actor<T1>,
292
+ typename as_actor<T2>::type
293
+ >
294
+ >
295
+ operator&=(const actor<T1> &_1, const T2 &_2)
296
+ {
297
+ return compose(transparent_binary_operator<bit_and_equal>(),
298
+ make_actor(_1),
299
+ make_actor(_2));
300
+ } // end operator&=()
301
+
302
+ template<typename T1, typename T2>
303
+ __host__ __device__
304
+ actor<
305
+ composite<
306
+ transparent_binary_operator<bit_and_equal>,
307
+ actor<T1>,
308
+ actor<T2>
309
+ >
310
+ >
311
+ operator&=(const actor<T1> &_1, const actor<T2> &_2)
312
+ {
313
+ return compose(transparent_binary_operator<bit_and_equal>(),
314
+ make_actor(_1),
315
+ make_actor(_2));
316
+ } // end operator&=()
317
+
318
+ // there's no standard bit_or_equal functional, so roll an ad hoc one here
319
+ struct bit_or_equal
320
+ {
321
+ using is_transparent = void;
322
+
323
+ __thrust_exec_check_disable__
324
+ template <typename T1, typename T2>
325
+ __host__ __device__
326
+ constexpr auto operator()(T1&& t1, T2&& t2) const
327
+ noexcept(noexcept(THRUST_FWD(t1) |= THRUST_FWD(t2)))
328
+ THRUST_TRAILING_RETURN(decltype(THRUST_FWD(t1) |= THRUST_FWD(t2)))
329
+ {
330
+ return THRUST_FWD(t1) |= THRUST_FWD(t2);
331
+ }
332
+ };
333
+
334
+ template<typename T1, typename T2>
335
+ __host__ __device__
336
+ actor<
337
+ composite<
338
+ transparent_binary_operator<bit_or_equal>,
339
+ actor<T1>,
340
+ typename as_actor<T2>::type
341
+ >
342
+ >
343
+ operator|=(const actor<T1> &_1, const T2 &_2)
344
+ {
345
+ return compose(transparent_binary_operator<bit_or_equal>(),
346
+ make_actor(_1),
347
+ make_actor(_2));
348
+ } // end operator|=()
349
+
350
+ template<typename T1, typename T2>
351
+ __host__ __device__
352
+ actor<
353
+ composite<
354
+ transparent_binary_operator<bit_or_equal>,
355
+ actor<T1>,
356
+ actor<T2>
357
+ >
358
+ >
359
+ operator|=(const actor<T1> &_1, const actor<T2> &_2)
360
+ {
361
+ return compose(transparent_binary_operator<bit_or_equal>(),
362
+ make_actor(_1),
363
+ make_actor(_2));
364
+ } // end operator|=()
365
+
366
+ // there's no standard bit_xor_equal functional, so roll an ad hoc one here
367
+ struct bit_xor_equal
368
+ {
369
+ using is_transparent = void;
370
+
371
+ __thrust_exec_check_disable__
372
+ template <typename T1, typename T2>
373
+ __host__ __device__
374
+ constexpr auto operator()(T1&& t1, T2&& t2) const
375
+ noexcept(noexcept(THRUST_FWD(t1) ^= THRUST_FWD(t2)))
376
+ THRUST_TRAILING_RETURN(decltype(THRUST_FWD(t1) ^= THRUST_FWD(t2)))
377
+ {
378
+ return THRUST_FWD(t1) ^= THRUST_FWD(t2);
379
+ }
380
+ };
381
+
382
+ template<typename T1, typename T2>
383
+ __host__ __device__
384
+ actor<
385
+ composite<
386
+ transparent_binary_operator<bit_xor_equal>,
387
+ actor<T1>,
388
+ typename as_actor<T2>::type
389
+ >
390
+ >
391
+ operator^=(const actor<T1> &_1, const T2 &_2)
392
+ {
393
+ return compose(transparent_binary_operator<bit_xor_equal>(),
394
+ make_actor(_1),
395
+ make_actor(_2));
396
+ } // end operator|=()
397
+
398
+ template<typename T1, typename T2>
399
+ __host__ __device__
400
+ actor<
401
+ composite<
402
+ transparent_binary_operator<bit_xor_equal>,
403
+ actor<T1>,
404
+ actor<T2>
405
+ >
406
+ >
407
+ operator^=(const actor<T1> &_1, const actor<T2> &_2)
408
+ {
409
+ return compose(transparent_binary_operator<bit_xor_equal>(),
410
+ make_actor(_1),
411
+ make_actor(_2));
412
+ } // end operator|=()
413
+
414
+ // there's no standard bit_lshift_equal functional, so roll an ad hoc one here
415
+ struct bit_lshift_equal
416
+ {
417
+ using is_transparent = void;
418
+
419
+ __thrust_exec_check_disable__
420
+ template <typename T1, typename T2>
421
+ __host__ __device__
422
+ constexpr auto operator()(T1&& t1, T2&& t2) const
423
+ noexcept(noexcept(THRUST_FWD(t1) <<= THRUST_FWD(t2)))
424
+ THRUST_TRAILING_RETURN(decltype(THRUST_FWD(t1) <<= THRUST_FWD(t2)))
425
+ {
426
+ return THRUST_FWD(t1) <<= THRUST_FWD(t2);
427
+ }
428
+ };
429
+ template<typename T1, typename T2>
430
+ __host__ __device__
431
+ actor<
432
+ composite<
433
+ transparent_binary_operator<bit_lshift_equal>,
434
+ actor<T1>,
435
+ typename as_actor<T2>::type
436
+ >
437
+ >
438
+ operator<<=(const actor<T1> &_1, const T2 &_2)
439
+ {
440
+ return compose(transparent_binary_operator<bit_lshift_equal>(),
441
+ make_actor(_1),
442
+ make_actor(_2));
443
+ } // end operator<<=()
444
+
445
+ template<typename T1, typename T2>
446
+ __host__ __device__
447
+ actor<
448
+ composite<
449
+ transparent_binary_operator<bit_lshift_equal>,
450
+ actor<T1>,
451
+ actor<T2>
452
+ >
453
+ >
454
+ operator<<=(const actor<T1> &_1, const actor<T2> &_2)
455
+ {
456
+ return compose(transparent_binary_operator<bit_lshift_equal>(),
457
+ make_actor(_1),
458
+ make_actor(_2));
459
+ } // end operator<<=()
460
+
461
+ // there's no standard bit_rshift_equal functional, so roll an ad hoc one here
462
+ struct bit_rshift_equal
463
+ {
464
+ using is_transparent = void;
465
+
466
+ __thrust_exec_check_disable__
467
+ template <typename T1, typename T2>
468
+ __host__ __device__
469
+ constexpr auto operator()(T1&& t1, T2&& t2) const
470
+ noexcept(noexcept(THRUST_FWD(t1) >>= THRUST_FWD(t2)))
471
+ THRUST_TRAILING_RETURN(decltype(THRUST_FWD(t1) >>= THRUST_FWD(t2)))
472
+ {
473
+ return THRUST_FWD(t1) >>= THRUST_FWD(t2);
474
+ }
475
+ };
476
+
477
+ template<typename T1, typename T2>
478
+ __host__ __device__
479
+ actor<
480
+ composite<
481
+ transparent_binary_operator<bit_rshift_equal>,
482
+ actor<T1>,
483
+ typename as_actor<T2>::type
484
+ >
485
+ >
486
+ operator>>=(const actor<T1> &_1, const T2 &_2)
487
+ {
488
+ return compose(transparent_binary_operator<bit_rshift_equal>(),
489
+ make_actor(_1),
490
+ make_actor(_2));
491
+ } // end operator>>=()
492
+
493
+ template<typename T1, typename T2>
494
+ __host__ __device__
495
+ actor<
496
+ composite<
497
+ transparent_binary_operator<bit_rshift_equal>,
498
+ actor<T1>,
499
+ actor<T2>
500
+ >
501
+ >
502
+ operator>>=(const actor<T1> &_1, const actor<T2> &_2)
503
+ {
504
+ return compose(transparent_binary_operator<bit_rshift_equal>(),
505
+ make_actor(_1),
506
+ make_actor(_2));
507
+ } // end operator>>=()
508
+
509
+ } // end functional
510
+ } // end detail
511
+ THRUST_NAMESPACE_END
512
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/functional/operators/logical_operators.h ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/functional/actor.h>
21
+ #include <thrust/detail/functional/composite.h>
22
+ #include <thrust/detail/functional/operators/operator_adaptors.h>
23
+ #include <thrust/functional.h>
24
+
25
+ THRUST_NAMESPACE_BEGIN
26
+ namespace detail
27
+ {
28
+ namespace functional
29
+ {
30
+
31
+ template<typename T1, typename T2>
32
+ __host__ __device__
33
+ actor<
34
+ composite<
35
+ transparent_binary_operator<thrust::logical_and<>>,
36
+ actor<T1>,
37
+ typename as_actor<T2>::type
38
+ >
39
+ >
40
+ operator&&(const actor<T1> &_1, const T2 &_2)
41
+ {
42
+ return compose(transparent_binary_operator<thrust::logical_and<>>(),
43
+ make_actor(_1),
44
+ make_actor(_2));
45
+ } // end operator&&()
46
+
47
+ template<typename T1, typename T2>
48
+ __host__ __device__
49
+ actor<
50
+ composite<
51
+ transparent_binary_operator<thrust::logical_and<>>,
52
+ typename as_actor<T1>::type,
53
+ actor<T2>
54
+ >
55
+ >
56
+ operator&&(const T1 &_1, const actor<T2> &_2)
57
+ {
58
+ return compose(transparent_binary_operator<thrust::logical_and<>>(),
59
+ make_actor(_1),
60
+ make_actor(_2));
61
+ } // end operator&&()
62
+
63
+ template<typename T1, typename T2>
64
+ __host__ __device__
65
+ actor<
66
+ composite<
67
+ transparent_binary_operator<thrust::logical_and<>>,
68
+ actor<T1>,
69
+ actor<T2>
70
+ >
71
+ >
72
+ operator&&(const actor<T1> &_1, const actor<T2> &_2)
73
+ {
74
+ return compose(transparent_binary_operator<thrust::logical_and<>>(),
75
+ make_actor(_1),
76
+ make_actor(_2));
77
+ } // end operator&&()
78
+
79
+ template<typename T1, typename T2>
80
+ __host__ __device__
81
+ actor<
82
+ composite<
83
+ transparent_binary_operator<thrust::logical_or<>>,
84
+ actor<T1>,
85
+ typename as_actor<T2>::type
86
+ >
87
+ >
88
+ operator||(const actor<T1> &_1, const T2 &_2)
89
+ {
90
+ return compose(transparent_binary_operator<thrust::logical_or<>>(),
91
+ make_actor(_1),
92
+ make_actor(_2));
93
+ } // end operator&&()
94
+
95
+ template<typename T1, typename T2>
96
+ __host__ __device__
97
+ actor<
98
+ composite<
99
+ transparent_binary_operator<thrust::logical_or<>>,
100
+ typename as_actor<T1>::type,
101
+ actor<T2>
102
+ >
103
+ >
104
+ operator||(const T1 &_1, const actor<T2> &_2)
105
+ {
106
+ return compose(transparent_binary_operator<thrust::logical_or<>>(),
107
+ make_actor(_1),
108
+ make_actor(_2));
109
+ } // end operator&&()
110
+
111
+ template<typename T1, typename T2>
112
+ __host__ __device__
113
+ actor<
114
+ composite<
115
+ transparent_binary_operator<thrust::logical_or<>>,
116
+ actor<T1>,
117
+ actor<T2>
118
+ >
119
+ >
120
+ operator||(const actor<T1> &_1, const actor<T2> &_2)
121
+ {
122
+ return compose(transparent_binary_operator<thrust::logical_or<>>(),
123
+ make_actor(_1),
124
+ make_actor(_2));
125
+ } // end operator&&()
126
+
127
+ template<typename Eval>
128
+ __host__ __device__
129
+ actor<
130
+ composite<
131
+ transparent_unary_operator<thrust::logical_not<>>,
132
+ actor<Eval>
133
+ >
134
+ >
135
+ operator!(const actor<Eval> &_1)
136
+ {
137
+ return compose(transparent_unary_operator<thrust::logical_not<>>(), _1);
138
+ } // end operator!()
139
+
140
+ } // end functional
141
+ } // end detail
142
+ THRUST_NAMESPACE_END
143
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/functional/operators/relational_operators.h ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/functional/actor.h>
21
+ #include <thrust/detail/functional/composite.h>
22
+ #include <thrust/detail/functional/operators/operator_adaptors.h>
23
+ #include <thrust/functional.h>
24
+
25
+ THRUST_NAMESPACE_BEGIN
26
+ namespace detail
27
+ {
28
+ namespace functional
29
+ {
30
+
31
+ template<typename T1, typename T2>
32
+ __host__ __device__
33
+ actor<
34
+ composite<
35
+ transparent_binary_operator<thrust::equal_to<>>,
36
+ actor<T1>,
37
+ typename as_actor<T2>::type
38
+ >
39
+ >
40
+ operator==(const actor<T1> &_1, const T2 &_2)
41
+ {
42
+ return compose(transparent_binary_operator<thrust::equal_to<>>(),
43
+ make_actor(_1),
44
+ make_actor(_2));
45
+ } // end operator==()
46
+
47
+ template<typename T1, typename T2>
48
+ __host__ __device__
49
+ actor<
50
+ composite<
51
+ transparent_binary_operator<thrust::equal_to<>>,
52
+ typename as_actor<T1>::type,
53
+ actor<T2>
54
+ >
55
+ >
56
+ operator==(const T1 &_1, const actor<T2> &_2)
57
+ {
58
+ return compose(transparent_binary_operator<thrust::equal_to<>>(),
59
+ make_actor(_1),
60
+ make_actor(_2));
61
+ } // end operator==()
62
+
63
+ template<typename T1, typename T2>
64
+ __host__ __device__
65
+ actor<
66
+ composite<
67
+ transparent_binary_operator<thrust::equal_to<>>,
68
+ actor<T1>,
69
+ actor<T2>
70
+ >
71
+ >
72
+ operator==(const actor<T1> &_1, const actor<T2> &_2)
73
+ {
74
+ return compose(transparent_binary_operator<thrust::equal_to<>>(),
75
+ make_actor(_1),
76
+ make_actor(_2));
77
+ } // end operator==()
78
+
79
+ template<typename T1, typename T2>
80
+ __host__ __device__
81
+ actor<
82
+ composite<
83
+ transparent_binary_operator<thrust::not_equal_to<>>,
84
+ actor<T1>,
85
+ typename as_actor<T2>::type
86
+ >
87
+ >
88
+ operator!=(const actor<T1> &_1, const T2 &_2)
89
+ {
90
+ return compose(transparent_binary_operator<thrust::not_equal_to<>>(),
91
+ make_actor(_1),
92
+ make_actor(_2));
93
+ } // end operator!=()
94
+
95
+ template<typename T1, typename T2>
96
+ __host__ __device__
97
+ actor<
98
+ composite<
99
+ transparent_binary_operator<thrust::not_equal_to<>>,
100
+ typename as_actor<T1>::type,
101
+ actor<T2>
102
+ >
103
+ >
104
+ operator!=(const T1 &_1, const actor<T2> &_2)
105
+ {
106
+ return compose(transparent_binary_operator<thrust::not_equal_to<>>(),
107
+ make_actor(_1),
108
+ make_actor(_2));
109
+ } // end operator!=()
110
+
111
+ template<typename T1, typename T2>
112
+ __host__ __device__
113
+ actor<
114
+ composite<
115
+ transparent_binary_operator<thrust::not_equal_to<>>,
116
+ actor<T1>,
117
+ actor<T2>
118
+ >
119
+ >
120
+ operator!=(const actor<T1> &_1, const actor<T2> &_2)
121
+ {
122
+ return compose(transparent_binary_operator<thrust::not_equal_to<>>(),
123
+ make_actor(_1),
124
+ make_actor(_2));
125
+ } // end operator!=()
126
+
127
+ template<typename T1, typename T2>
128
+ __host__ __device__
129
+ actor<
130
+ composite<
131
+ transparent_binary_operator<thrust::greater<>>,
132
+ actor<T1>,
133
+ typename as_actor<T2>::type
134
+ >
135
+ >
136
+ operator>(const actor<T1> &_1, const T2 &_2)
137
+ {
138
+ return compose(transparent_binary_operator<thrust::greater<>>(),
139
+ make_actor(_1),
140
+ make_actor(_2));
141
+ } // end operator>()
142
+
143
+ template<typename T1, typename T2>
144
+ __host__ __device__
145
+ actor<
146
+ composite<
147
+ transparent_binary_operator<thrust::greater<>>,
148
+ typename as_actor<T1>::type,
149
+ actor<T2>
150
+ >
151
+ >
152
+ operator>(const T1 &_1, const actor<T2> &_2)
153
+ {
154
+ return compose(transparent_binary_operator<thrust::greater<>>(),
155
+ make_actor(_1),
156
+ make_actor(_2));
157
+ } // end operator>()
158
+
159
+ template<typename T1, typename T2>
160
+ __host__ __device__
161
+ actor<
162
+ composite<
163
+ transparent_binary_operator<thrust::greater<>>,
164
+ actor<T1>,
165
+ actor<T2>
166
+ >
167
+ >
168
+ operator>(const actor<T1> &_1, const actor<T2> &_2)
169
+ {
170
+ return compose(transparent_binary_operator<thrust::greater<>>(),
171
+ make_actor(_1),
172
+ make_actor(_2));
173
+ } // end operator>()
174
+
175
+ template<typename T1, typename T2>
176
+ __host__ __device__
177
+ actor<
178
+ composite<
179
+ transparent_binary_operator<thrust::less<>>,
180
+ actor<T1>,
181
+ typename as_actor<T2>::type
182
+ >
183
+ >
184
+ operator<(const actor<T1> &_1, const T2 &_2)
185
+ {
186
+ return compose(transparent_binary_operator<thrust::less<>>(),
187
+ make_actor(_1),
188
+ make_actor(_2));
189
+ } // end operator<()
190
+
191
+ template<typename T1, typename T2>
192
+ __host__ __device__
193
+ actor<
194
+ composite<
195
+ transparent_binary_operator<thrust::less<>>,
196
+ typename as_actor<T1>::type,
197
+ actor<T2>
198
+ >
199
+ >
200
+ operator<(const T1 &_1, const actor<T2> &_2)
201
+ {
202
+ return compose(transparent_binary_operator<thrust::less<>>(),
203
+ make_actor(_1),
204
+ make_actor(_2));
205
+ } // end operator<()
206
+
207
+ template<typename T1, typename T2>
208
+ __host__ __device__
209
+ actor<
210
+ composite<
211
+ transparent_binary_operator<thrust::less<>>,
212
+ actor<T1>,
213
+ actor<T2>
214
+ >
215
+ >
216
+ operator<(const actor<T1> &_1, const actor<T2> &_2)
217
+ {
218
+ return compose(transparent_binary_operator<thrust::less<>>(),
219
+ make_actor(_1),
220
+ make_actor(_2));
221
+ } // end operator<()
222
+
223
+ template<typename T1, typename T2>
224
+ __host__ __device__
225
+ actor<
226
+ composite<
227
+ transparent_binary_operator<thrust::greater_equal<>>,
228
+ actor<T1>,
229
+ typename as_actor<T2>::type
230
+ >
231
+ >
232
+ operator>=(const actor<T1> &_1, const T2 &_2)
233
+ {
234
+ return compose(transparent_binary_operator<thrust::greater_equal<>>(),
235
+ make_actor(_1),
236
+ make_actor(_2));
237
+ } // end operator>=()
238
+
239
+ template<typename T1, typename T2>
240
+ __host__ __device__
241
+ actor<
242
+ composite<
243
+ transparent_binary_operator<thrust::greater_equal<>>,
244
+ typename as_actor<T1>::type,
245
+ actor<T2>
246
+ >
247
+ >
248
+ operator>=(const T1 &_1, const actor<T2> &_2)
249
+ {
250
+ return compose(transparent_binary_operator<thrust::greater_equal<>>(),
251
+ make_actor(_1),
252
+ make_actor(_2));
253
+ } // end operator>=()
254
+
255
+ template<typename T1, typename T2>
256
+ __host__ __device__
257
+ actor<
258
+ composite<
259
+ transparent_binary_operator<thrust::greater_equal<>>,
260
+ actor<T1>,
261
+ actor<T2>
262
+ >
263
+ >
264
+ operator>=(const actor<T1> &_1, const actor<T2> &_2)
265
+ {
266
+ return compose(transparent_binary_operator<thrust::greater_equal<>>(),
267
+ make_actor(_1),
268
+ make_actor(_2));
269
+ } // end operator>=()
270
+
271
+ template<typename T1, typename T2>
272
+ __host__ __device__
273
+ actor<
274
+ composite<
275
+ transparent_binary_operator<thrust::less_equal<>>,
276
+ actor<T1>,
277
+ typename as_actor<T2>::type
278
+ >
279
+ >
280
+ operator<=(const actor<T1> &_1, const T2 &_2)
281
+ {
282
+ return compose(transparent_binary_operator<thrust::less_equal<>>(),
283
+ make_actor(_1),
284
+ make_actor(_2));
285
+ } // end operator<=()
286
+
287
+ template<typename T1, typename T2>
288
+ __host__ __device__
289
+ actor<
290
+ composite<
291
+ transparent_binary_operator<thrust::less_equal<>>,
292
+ typename as_actor<T1>::type,
293
+ actor<T2>
294
+ >
295
+ >
296
+ operator<=(const T1 &_1, const actor<T2> &_2)
297
+ {
298
+ return compose(transparent_binary_operator<thrust::less_equal<>>(),
299
+ make_actor(_1),
300
+ make_actor(_2));
301
+ } // end operator<=()
302
+
303
+ template<typename T1, typename T2>
304
+ __host__ __device__
305
+ actor<
306
+ composite<
307
+ transparent_binary_operator<thrust::less_equal<>>,
308
+ actor<T1>,
309
+ actor<T2>
310
+ >
311
+ >
312
+ operator<=(const actor<T1> &_1, const actor<T2> &_2)
313
+ {
314
+ return compose(transparent_binary_operator<thrust::less_equal<>>(),
315
+ make_actor(_1),
316
+ make_actor(_2));
317
+ } // end operator<=()
318
+
319
+ } // end functional
320
+ } // end detail
321
+ THRUST_NAMESPACE_END
322
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/gather.inl ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+
21
+ #include <thrust/gather.h>
22
+ #include <thrust/iterator/iterator_traits.h>
23
+ #include <thrust/system/detail/generic/select_system.h>
24
+ #include <thrust/system/detail/generic/gather.h>
25
+ #include <thrust/system/detail/adl/gather.h>
26
+
27
+ THRUST_NAMESPACE_BEGIN
28
+
29
+ __thrust_exec_check_disable__
30
+ template<typename DerivedPolicy,
31
+ typename InputIterator,
32
+ typename RandomAccessIterator,
33
+ typename OutputIterator>
34
+ __host__ __device__
35
+ OutputIterator gather(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
36
+ InputIterator map_first,
37
+ InputIterator map_last,
38
+ RandomAccessIterator input_first,
39
+ OutputIterator result)
40
+ {
41
+ using thrust::system::detail::generic::gather;
42
+ return gather(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), map_first, map_last, input_first, result);
43
+ } // end gather()
44
+
45
+
46
+ __thrust_exec_check_disable__
47
+ template<typename DerivedPolicy,
48
+ typename InputIterator1,
49
+ typename InputIterator2,
50
+ typename RandomAccessIterator,
51
+ typename OutputIterator>
52
+ __host__ __device__
53
+ OutputIterator gather_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
54
+ InputIterator1 map_first,
55
+ InputIterator1 map_last,
56
+ InputIterator2 stencil,
57
+ RandomAccessIterator input_first,
58
+ OutputIterator result)
59
+ {
60
+ using thrust::system::detail::generic::gather_if;
61
+ return gather_if(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), map_first, map_last, stencil, input_first, result);
62
+ } // end gather_if()
63
+
64
+
65
+ __thrust_exec_check_disable__
66
+ template<typename DerivedPolicy,
67
+ typename InputIterator1,
68
+ typename InputIterator2,
69
+ typename RandomAccessIterator,
70
+ typename OutputIterator,
71
+ typename Predicate>
72
+ __host__ __device__
73
+ OutputIterator gather_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
74
+ InputIterator1 map_first,
75
+ InputIterator1 map_last,
76
+ InputIterator2 stencil,
77
+ RandomAccessIterator input_first,
78
+ OutputIterator result,
79
+ Predicate pred)
80
+ {
81
+ using thrust::system::detail::generic::gather_if;
82
+ return gather_if(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), map_first, map_last, stencil, input_first, result, pred);
83
+ } // end gather_if()
84
+
85
+
86
+ template<typename InputIterator,
87
+ typename RandomAccessIterator,
88
+ typename OutputIterator>
89
+ OutputIterator gather(InputIterator map_first,
90
+ InputIterator map_last,
91
+ RandomAccessIterator input_first,
92
+ OutputIterator result)
93
+ {
94
+ using thrust::system::detail::generic::select_system;
95
+
96
+ typedef typename thrust::iterator_system<InputIterator>::type System1;
97
+ typedef typename thrust::iterator_system<RandomAccessIterator>::type System2;
98
+ typedef typename thrust::iterator_system<OutputIterator>::type System3;
99
+
100
+ System1 system1;
101
+ System2 system2;
102
+ System3 system3;
103
+
104
+ return thrust::gather(select_system(system1,system2,system3), map_first, map_last, input_first, result);
105
+ } // end gather()
106
+
107
+
108
+ template<typename InputIterator1,
109
+ typename InputIterator2,
110
+ typename RandomAccessIterator,
111
+ typename OutputIterator>
112
+ OutputIterator gather_if(InputIterator1 map_first,
113
+ InputIterator1 map_last,
114
+ InputIterator2 stencil,
115
+ RandomAccessIterator input_first,
116
+ OutputIterator result)
117
+ {
118
+ using thrust::system::detail::generic::select_system;
119
+
120
+ typedef typename thrust::iterator_system<InputIterator1>::type System1;
121
+ typedef typename thrust::iterator_system<InputIterator2>::type System2;
122
+ typedef typename thrust::iterator_system<RandomAccessIterator>::type System3;
123
+ typedef typename thrust::iterator_system<OutputIterator>::type System4;
124
+
125
+ System1 system1;
126
+ System2 system2;
127
+ System3 system3;
128
+ System4 system4;
129
+
130
+ return thrust::gather_if(select_system(system1,system2,system3,system4), map_first, map_last, stencil, input_first, result);
131
+ } // end gather_if()
132
+
133
+
134
+ template<typename InputIterator1,
135
+ typename InputIterator2,
136
+ typename RandomAccessIterator,
137
+ typename OutputIterator,
138
+ typename Predicate>
139
+ OutputIterator gather_if(InputIterator1 map_first,
140
+ InputIterator1 map_last,
141
+ InputIterator2 stencil,
142
+ RandomAccessIterator input_first,
143
+ OutputIterator result,
144
+ Predicate pred)
145
+ {
146
+ using thrust::system::detail::generic::select_system;
147
+
148
+ typedef typename thrust::iterator_system<InputIterator1>::type System1;
149
+ typedef typename thrust::iterator_system<InputIterator2>::type System2;
150
+ typedef typename thrust::iterator_system<RandomAccessIterator>::type System3;
151
+ typedef typename thrust::iterator_system<OutputIterator>::type System4;
152
+
153
+ System1 system1;
154
+ System2 system2;
155
+ System3 system3;
156
+ System4 system4;
157
+
158
+ return thrust::gather_if(select_system(system1,system2,system3,system4), map_first, map_last, stencil, input_first, result, pred);
159
+ } // end gather_if()
160
+
161
+ THRUST_NAMESPACE_END
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/integer_math.h ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/detail/type_deduction.h>
21
+
22
+ #include <nv/target>
23
+
24
+ #include <limits>
25
+
26
+ THRUST_NAMESPACE_BEGIN
27
+ namespace detail
28
+ {
29
+
30
+ template <typename Integer>
31
+ __host__ __device__ __thrust_forceinline__
32
+ Integer clz(Integer x)
33
+ {
34
+ Integer result;
35
+
36
+ NV_IF_TARGET(NV_IS_DEVICE, (
37
+ result = ::__clz(x);
38
+ ), (
39
+ int num_bits = 8 * sizeof(Integer);
40
+ int num_bits_minus_one = num_bits - 1;
41
+ result = num_bits;
42
+ for (int i = num_bits_minus_one; i >= 0; --i)
43
+ {
44
+ if ((Integer(1) << i) & x)
45
+ {
46
+ result = num_bits_minus_one - i;
47
+ break;
48
+ }
49
+ }
50
+ ));
51
+
52
+ return result;
53
+ }
54
+
55
+ template <typename Integer>
56
+ __host__ __device__ __thrust_forceinline__
57
+ bool is_power_of_2(Integer x)
58
+ {
59
+ return 0 == (x & (x - 1));
60
+ }
61
+
62
+ template <typename Integer>
63
+ __host__ __device__ __thrust_forceinline__
64
+ bool is_odd(Integer x)
65
+ {
66
+ return 1 & x;
67
+ }
68
+
69
+ template <typename Integer>
70
+ __host__ __device__ __thrust_forceinline__
71
+ Integer log2(Integer x)
72
+ {
73
+ Integer num_bits = 8 * sizeof(Integer);
74
+ Integer num_bits_minus_one = num_bits - 1;
75
+
76
+ return num_bits_minus_one - clz(x);
77
+ }
78
+
79
+
80
+ template <typename Integer>
81
+ __host__ __device__ __thrust_forceinline__
82
+ Integer log2_ri(Integer x)
83
+ {
84
+ Integer result = log2(x);
85
+
86
+ // This is where we round up to the nearest log.
87
+ if (!is_power_of_2(x))
88
+ ++result;
89
+
90
+ return result;
91
+ }
92
+
93
+ // x/y rounding towards +infinity for integers
94
+ // Used to determine # of blocks/warps etc.
95
+ template <typename Integer0, typename Integer1>
96
+ __host__ __device__ __thrust_forceinline__
97
+ #if THRUST_CPP_DIALECT >= 2011
98
+ // FIXME: Should use common_type.
99
+ auto divide_ri(Integer0 const x, Integer1 const y)
100
+ THRUST_DECLTYPE_RETURNS((x + (y - 1)) / y)
101
+ #else
102
+ // FIXME: Should use common_type.
103
+ Integer0 divide_ri(Integer0 const x, Integer1 const y)
104
+ {
105
+ return (x + (y - 1)) / y;
106
+ }
107
+ #endif
108
+
109
+ // x/y rounding towards zero for integers.
110
+ // Used to determine # of blocks/warps etc.
111
+ template <typename Integer0, typename Integer1>
112
+ __host__ __device__ __thrust_forceinline__
113
+ #if THRUST_CPP_DIALECT >= 2011
114
+ auto divide_rz(Integer0 const x, Integer1 const y)
115
+ THRUST_DECLTYPE_RETURNS(x / y)
116
+ #else
117
+ // FIXME: Should use common_type.
118
+ Integer0 divide_rz(Integer0 const x, Integer1 const y)
119
+ {
120
+ return x / y;
121
+ }
122
+ #endif
123
+
124
+ // Round x towards infinity to the next multiple of y.
125
+ template <typename Integer0, typename Integer1>
126
+ __host__ __device__ __thrust_forceinline__
127
+ #if THRUST_CPP_DIALECT >= 2011
128
+ auto round_i(Integer0 const x, Integer1 const y)
129
+ THRUST_DECLTYPE_RETURNS(y * divide_ri(x, y))
130
+ #else
131
+ Integer0 round_i(Integer0 const x, Integer1 const y)
132
+ {
133
+ return y * divide_ri(x, y);
134
+ }
135
+ #endif
136
+
137
+ // Round x towards 0 to the next multiple of y.
138
+ template <typename Integer0, typename Integer1>
139
+ __host__ __device__ __thrust_forceinline__
140
+ #if THRUST_CPP_DIALECT >= 2011
141
+ auto round_z(Integer0 const x, Integer1 const y)
142
+ THRUST_DECLTYPE_RETURNS(y * divide_rz(x, y))
143
+ #else
144
+ Integer0 round_z(Integer0 const x, Integer1 const y)
145
+ {
146
+ return y * divide_rz(x, y);
147
+ }
148
+ #endif
149
+
150
+ } // end detail
151
+
152
+ THRUST_NAMESPACE_END
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/integer_traits.h ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <limits>
21
+ #include <limits.h>
22
+
23
+ THRUST_NAMESPACE_BEGIN
24
+
25
+ namespace detail
26
+ {
27
+
28
+ template<typename T>
29
+ class integer_traits
30
+ {
31
+ public:
32
+ static constexpr bool is_integral = false;
33
+ };
34
+
35
+ template<typename T, T min_val, T max_val>
36
+ class integer_traits_base
37
+ {
38
+ public:
39
+ static constexpr bool is_integral = true;
40
+ static constexpr T const_min = min_val;
41
+ static constexpr T const_max = max_val;
42
+ };
43
+
44
+
45
+ template<>
46
+ class integer_traits<bool>
47
+ : public std::numeric_limits<bool>,
48
+ public integer_traits_base<bool, false, true>
49
+ {};
50
+
51
+
52
+ template<>
53
+ class integer_traits<char>
54
+ : public std::numeric_limits<char>,
55
+ public integer_traits_base<char, CHAR_MIN, CHAR_MAX>
56
+ {};
57
+
58
+
59
+ template<>
60
+ class integer_traits<signed char>
61
+ : public std::numeric_limits<signed char>,
62
+ public integer_traits_base<signed char, SCHAR_MIN, SCHAR_MAX>
63
+ {};
64
+
65
+
66
+ template<>
67
+ class integer_traits<unsigned char>
68
+ : public std::numeric_limits<unsigned char>,
69
+ public integer_traits_base<unsigned char, 0, UCHAR_MAX>
70
+ {};
71
+
72
+
73
+ template<>
74
+ class integer_traits<short>
75
+ : public std::numeric_limits<short>,
76
+ public integer_traits_base<short, SHRT_MIN, SHRT_MAX>
77
+ {};
78
+
79
+
80
+ template<>
81
+ class integer_traits<unsigned short>
82
+ : public std::numeric_limits<unsigned short>,
83
+ public integer_traits_base<unsigned short, 0, USHRT_MAX>
84
+ {};
85
+
86
+
87
+ template<>
88
+ class integer_traits<int>
89
+ : public std::numeric_limits<int>,
90
+ public integer_traits_base<int, INT_MIN, INT_MAX>
91
+ {};
92
+
93
+
94
+ template<>
95
+ class integer_traits<unsigned int>
96
+ : public std::numeric_limits<unsigned int>,
97
+ public integer_traits_base<unsigned int, 0, UINT_MAX>
98
+ {};
99
+
100
+
101
+ template<>
102
+ class integer_traits<long>
103
+ : public std::numeric_limits<long>,
104
+ public integer_traits_base<long, LONG_MIN, LONG_MAX>
105
+ {};
106
+
107
+
108
+ template<>
109
+ class integer_traits<unsigned long>
110
+ : public std::numeric_limits<unsigned long>,
111
+ public integer_traits_base<unsigned long, 0, ULONG_MAX>
112
+ {};
113
+
114
+
115
+ template<>
116
+ class integer_traits<long long>
117
+ : public std::numeric_limits<long long>,
118
+ public integer_traits_base<long long, LLONG_MIN, LLONG_MAX>
119
+ {};
120
+
121
+
122
+ template<>
123
+ class integer_traits<unsigned long long>
124
+ : public std::numeric_limits<unsigned long long>,
125
+ public integer_traits_base<unsigned long long, 0, ULLONG_MAX>
126
+ {};
127
+
128
+ } // end detail
129
+
130
+ THRUST_NAMESPACE_END
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/logical.inl ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2008-2013 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ #include <thrust/detail/config.h>
20
+ #include <thrust/iterator/iterator_traits.h>
21
+ #include <thrust/system/detail/generic/select_system.h>
22
+ #include <thrust/system/detail/generic/logical.h>
23
+ #include <thrust/system/detail/adl/logical.h>
24
+
25
+ THRUST_NAMESPACE_BEGIN
26
+
27
+ __thrust_exec_check_disable__
28
+ template<typename DerivedPolicy, typename InputIterator, typename Predicate>
29
+ __host__ __device__
30
+ bool all_of(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, InputIterator first, InputIterator last, Predicate pred)
31
+ {
32
+ using thrust::system::detail::generic::all_of;
33
+ return all_of(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, pred);
34
+ } // end all_of()
35
+
36
+
37
+ __thrust_exec_check_disable__
38
+ template<typename DerivedPolicy, typename InputIterator, typename Predicate>
39
+ __host__ __device__
40
+ bool any_of(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, InputIterator first, InputIterator last, Predicate pred)
41
+ {
42
+ using thrust::system::detail::generic::any_of;
43
+ return any_of(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, pred);
44
+ } // end any_of()
45
+
46
+
47
+ __thrust_exec_check_disable__
48
+ template<typename DerivedPolicy, typename InputIterator, typename Predicate>
49
+ __host__ __device__
50
+ bool none_of(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, InputIterator first, InputIterator last, Predicate pred)
51
+ {
52
+ using thrust::system::detail::generic::none_of;
53
+ return none_of(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), first, last, pred);
54
+ } // end none_of()
55
+
56
+
57
+ template<typename InputIterator, typename Predicate>
58
+ bool all_of(InputIterator first, InputIterator last, Predicate pred)
59
+ {
60
+ using thrust::system::detail::generic::select_system;
61
+
62
+ typedef typename thrust::iterator_system<InputIterator>::type System;
63
+
64
+ System system;
65
+
66
+ return thrust::all_of(select_system(system), first, last, pred);
67
+ }
68
+
69
+
70
+ template<typename InputIterator, typename Predicate>
71
+ bool any_of(InputIterator first, InputIterator last, Predicate pred)
72
+ {
73
+ using thrust::system::detail::generic::select_system;
74
+
75
+ typedef typename thrust::iterator_system<InputIterator>::type System;
76
+
77
+ System system;
78
+
79
+ return thrust::any_of(select_system(system), first, last, pred);
80
+ }
81
+
82
+
83
+ template<typename InputIterator, typename Predicate>
84
+ bool none_of(InputIterator first, InputIterator last, Predicate pred)
85
+ {
86
+ using thrust::system::detail::generic::select_system;
87
+
88
+ typedef typename thrust::iterator_system<InputIterator>::type System;
89
+
90
+ System system;
91
+
92
+ return thrust::none_of(select_system(system), first, last, pred);
93
+ }
94
+
95
+ THRUST_NAMESPACE_END
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/memory_algorithms.h ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2018 NVIDIA Corporation
2
+ // Author: Bryce Adelstein Lelbach <brycelelbach@gmail.com>
3
+ //
4
+ // Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt)
5
+
6
+ // TODO: These need to be turned into proper Thrust algorithms (dispatch layer,
7
+ // backends, etc).
8
+
9
+ #pragma once
10
+
11
+ #include <thrust/detail/config.h>
12
+ #include <thrust/detail/type_traits.h>
13
+ #include <thrust/iterator/iterator_traits.h>
14
+ #include <thrust/detail/allocator/allocator_traits.h>
15
+ #include <thrust/detail/memory_wrapper.h>
16
+ #include <thrust/addressof.h>
17
+
18
+ #include <nv/target>
19
+
20
+ #include <utility>
21
+ #include <new>
22
+
23
+
24
+ THRUST_NAMESPACE_BEGIN
25
+
26
+ ///////////////////////////////////////////////////////////////////////////////
27
+
28
+ template <typename T>
29
+ __host__ __device__
30
+ void destroy_at(T* location)
31
+ {
32
+ location->~T();
33
+ }
34
+
35
+ template <typename Allocator, typename T>
36
+ __host__ __device__
37
+ void destroy_at(Allocator const& alloc, T* location)
38
+ {
39
+ typedef typename detail::allocator_traits<
40
+ typename detail::remove_cv<
41
+ typename detail::remove_reference<Allocator>::type
42
+ >::type
43
+ >::template rebind_traits<T>::other traits;
44
+
45
+ typename traits::allocator_type alloc_T(alloc);
46
+
47
+ traits::destroy(alloc_T, location);
48
+ }
49
+
50
+ template <typename ForwardIt>
51
+ __host__ __device__
52
+ ForwardIt destroy(ForwardIt first, ForwardIt last)
53
+ {
54
+ for (; first != last; ++first)
55
+ destroy_at(addressof(*first));
56
+
57
+ return first;
58
+ }
59
+
60
+ template <typename Allocator, typename ForwardIt>
61
+ __host__ __device__
62
+ ForwardIt destroy(Allocator const& alloc, ForwardIt first, ForwardIt last)
63
+ {
64
+ typedef typename iterator_traits<ForwardIt>::value_type T;
65
+ typedef typename detail::allocator_traits<
66
+ typename detail::remove_cv<
67
+ typename detail::remove_reference<Allocator>::type
68
+ >::type
69
+ >::template rebind_traits<T>::other traits;
70
+
71
+ typename traits::allocator_type alloc_T(alloc);
72
+
73
+ for (; first != last; ++first)
74
+ destroy_at(alloc_T, addressof(*first));
75
+
76
+ return first;
77
+ }
78
+
79
+ template <typename ForwardIt, typename Size>
80
+ __host__ __device__
81
+ ForwardIt destroy_n(ForwardIt first, Size n)
82
+ {
83
+ for (; n > 0; (void) ++first, --n)
84
+ destroy_at(addressof(*first));
85
+
86
+ return first;
87
+ }
88
+
89
+ template <typename Allocator, typename ForwardIt, typename Size>
90
+ __host__ __device__
91
+ ForwardIt destroy_n(Allocator const& alloc, ForwardIt first, Size n)
92
+ {
93
+ typedef typename iterator_traits<ForwardIt>::value_type T;
94
+ typedef typename detail::allocator_traits<
95
+ typename detail::remove_cv<
96
+ typename detail::remove_reference<Allocator>::type
97
+ >::type
98
+ >::template rebind_traits<T>::other traits;
99
+
100
+ typename traits::allocator_type alloc_T(alloc);
101
+
102
+ for (; n > 0; (void) ++first, --n)
103
+ destroy_at(alloc_T, addressof(*first));
104
+
105
+ return first;
106
+ }
107
+
108
+ template <typename ForwardIt, typename... Args>
109
+ __host__ __device__
110
+ void uninitialized_construct(
111
+ ForwardIt first, ForwardIt last, Args const&... args
112
+ )
113
+ {
114
+ using T = typename iterator_traits<ForwardIt>::value_type;
115
+
116
+ ForwardIt current = first;
117
+
118
+ // No exceptions in CUDA.
119
+ NV_IF_TARGET(NV_IS_HOST, (
120
+ try {
121
+ for (; current != last; ++current)
122
+ {
123
+ ::new (static_cast<void*>(addressof(*current))) T(args...);
124
+ }
125
+ } catch (...) {
126
+ destroy(first, current);
127
+ throw;
128
+ }
129
+ ), (
130
+ for (; current != last; ++current)
131
+ {
132
+ ::new (static_cast<void*>(addressof(*current))) T(args...);
133
+ }
134
+ ));
135
+ }
136
+
137
+ template <typename Allocator, typename ForwardIt, typename... Args>
138
+ void uninitialized_construct_with_allocator(
139
+ Allocator const& alloc, ForwardIt first, ForwardIt last, Args const&... args
140
+ )
141
+ {
142
+ using T = typename iterator_traits<ForwardIt>::value_type;
143
+ using traits = typename detail::allocator_traits<
144
+ typename std::remove_cv<
145
+ typename std::remove_reference<Allocator>::type
146
+ >::type
147
+ >::template rebind_traits<T>;
148
+
149
+ typename traits::allocator_type alloc_T(alloc);
150
+
151
+ ForwardIt current = first;
152
+
153
+ // No exceptions in CUDA.
154
+ NV_IF_TARGET(NV_IS_HOST, (
155
+ try {
156
+ for (; current != last; ++current)
157
+ {
158
+ traits::construct(alloc_T, addressof(*current), args...);
159
+ }
160
+ } catch (...) {
161
+ destroy(alloc_T, first, current);
162
+ throw;
163
+ }
164
+ ), (
165
+ for (; current != last; ++current)
166
+ {
167
+ traits::construct(alloc_T, addressof(*current), args...);
168
+ }
169
+ ));
170
+ }
171
+
172
+ template <typename ForwardIt, typename Size, typename... Args>
173
+ void uninitialized_construct_n(
174
+ ForwardIt first, Size n, Args const&... args
175
+ )
176
+ {
177
+ using T = typename iterator_traits<ForwardIt>::value_type;
178
+
179
+ ForwardIt current = first;
180
+
181
+ // No exceptions in CUDA.
182
+ NV_IF_TARGET(NV_IS_HOST, (
183
+ try {
184
+ for (; n > 0; ++current, --n)
185
+ {
186
+ ::new (static_cast<void*>(addressof(*current))) T(args...);
187
+ }
188
+ } catch (...) {
189
+ destroy(first, current);
190
+ throw;
191
+ }
192
+ ), (
193
+ for (; n > 0; ++current, --n)
194
+ {
195
+ ::new (static_cast<void*>(addressof(*current))) T(args...);
196
+ }
197
+ ));
198
+ }
199
+
200
+ template <typename Allocator, typename ForwardIt, typename Size, typename... Args>
201
+ void uninitialized_construct_n_with_allocator(
202
+ Allocator const& alloc, ForwardIt first, Size n, Args const&... args
203
+ )
204
+ {
205
+ using T = typename iterator_traits<ForwardIt>::value_type;
206
+ using traits = typename detail::allocator_traits<
207
+ typename std::remove_cv<
208
+ typename std::remove_reference<Allocator>::type
209
+ >::type
210
+ >::template rebind_traits<T>;
211
+
212
+ typename traits::allocator_type alloc_T(alloc);
213
+
214
+ ForwardIt current = first;
215
+
216
+ // No exceptions in CUDA.
217
+ NV_IF_TARGET(NV_IS_HOST, (
218
+ try {
219
+ for (; n > 0; (void) ++current, --n)
220
+ {
221
+ traits::construct(alloc_T, addressof(*current), args...);
222
+ }
223
+ } catch (...) {
224
+ destroy(alloc_T, first, current);
225
+ throw;
226
+ }
227
+ ), (
228
+ for (; n > 0; (void) ++current, --n)
229
+ {
230
+ traits::construct(alloc_T, addressof(*current), args...);
231
+ }
232
+ ));
233
+ }
234
+
235
+ ///////////////////////////////////////////////////////////////////////////////
236
+
237
+ THRUST_NAMESPACE_END
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/memory_wrapper.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020 NVIDIA Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #pragma once
18
+
19
+ // When a compiler uses Thrust as part of its implementation of Standard C++
20
+ // algorithms, a cycle of included files may result when Thrust code tries to
21
+ // use a standard algorithm. Having a macro that is defined only when Thrust
22
+ // is including an algorithms-related header gives the compiler a chance to
23
+ // detect and break the cycle of includes. (<memory> declares several standard
24
+ // algorithms, including all of the uninitialized_* algorithms. "_ALGORITHMS_"
25
+ // in the macro name is meant generically, not as a specific reference to
26
+ // the header <algorithms>.)
27
+
28
+ #define THRUST_INCLUDING_ALGORITHMS_HEADER
29
+ #include <memory>
30
+ #undef THRUST_INCLUDING_ALGORITHMS_HEADER