ZTWHHH commited on
Commit
963fa26
·
verified ·
1 Parent(s): a1a1c79

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__algorithm/swap_ranges.h +36 -0
  3. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__concepts/different_from.h +36 -0
  4. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__concepts/regular.h +53 -0
  5. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__concepts/same_as.h +40 -0
  6. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__concepts/semiregular.h +53 -0
  7. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/binary_function.h +56 -0
  8. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/bind_back.h +75 -0
  9. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/bind_front.h +78 -0
  10. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/binder1st.h +59 -0
  11. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/binder2nd.h +59 -0
  12. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/compose.h +56 -0
  13. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/default_searcher.h +148 -0
  14. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/function.h +1261 -0
  15. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/hash.h +692 -0
  16. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/identity.h +50 -0
  17. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/is_transparent.h +40 -0
  18. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/mem_fn.h +61 -0
  19. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/operations.h +622 -0
  20. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/perfect_forward.h +113 -0
  21. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/reference_wrapper.h +103 -0
  22. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/weak_result_type.h +297 -0
  23. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/array.h +30 -0
  24. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/get.h +99 -0
  25. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/hash.h +28 -0
  26. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/memory_resource.h +30 -0
  27. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/pair.h +28 -0
  28. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/span.h +36 -0
  29. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/string.h +114 -0
  30. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/string_view.h +54 -0
  31. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/tuple.h +32 -0
  32. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/data.h +55 -0
  33. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/empty.h +48 -0
  34. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/erase_if_container.h +43 -0
  35. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/front_insert_iterator.h +69 -0
  36. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/insert_iterator.h +71 -0
  37. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/next.h +48 -0
  38. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/ostreambuf_iterator.h +71 -0
  39. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/prev.h +47 -0
  40. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/reverse_iterator.h +186 -0
  41. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/compressed_pair.h +240 -0
  42. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/config.h +287 -0
  43. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/default_accessor.h +99 -0
  44. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/dynamic_extent.h +83 -0
  45. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/extents.h +579 -0
  46. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/full_extent_t.h +69 -0
  47. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/layout_left.h +270 -0
  48. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/layout_right.h +270 -0
  49. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/layout_stride.h +555 -0
  50. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/macros.h +639 -0
.gitattributes CHANGED
@@ -903,3 +903,4 @@ videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/te
903
  videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.so filter=lfs diff=lfs merge=lfs -text
904
  videochat2/lib/python3.10/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs -text
905
  videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/pywrap_calibration.so filter=lfs diff=lfs merge=lfs -text
 
 
903
  videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.so filter=lfs diff=lfs merge=lfs -text
904
  videochat2/lib/python3.10/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs -text
905
  videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/pywrap_calibration.so filter=lfs diff=lfs merge=lfs -text
906
+ videochat2/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_tf_session.so filter=lfs diff=lfs merge=lfs -text
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__algorithm/swap_ranges.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___ALGORITHM_SWAP_RANGES_H
11
+ #define _LIBCUDACXX___ALGORITHM_SWAP_RANGES_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__utility/swap.h"
18
+
19
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
20
+ #pragma GCC system_header
21
+ #endif
22
+
23
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
24
+
25
+ template <class _ForwardIterator1, class _ForwardIterator2>
26
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
27
+ _ForwardIterator2 swap_ranges(_ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2)
28
+ {
29
+ for(; __first1 != __last1; ++__first1, (void) ++__first2)
30
+ swap(*__first1, *__first2);
31
+ return __first2;
32
+ }
33
+
34
+ _LIBCUDACXX_END_NAMESPACE_STD
35
+
36
+ #endif // _LIBCUDACXX___ALGORITHM_SWAP_RANGES_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__concepts/different_from.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___CONCEPTS_DIFFERENT_FROM_H
11
+ #define _LIBCUDACXX___CONCEPTS_DIFFERENT_FROM_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif //__cuda_std__
16
+
17
+ #include "../__concepts/__concept_macros.h"
18
+ #include "../__concepts/same_as.h"
19
+ #include "../__type_traits/remove_cvref.h"
20
+
21
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
22
+ #pragma GCC system_header
23
+ #endif
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ #if _LIBCUDACXX_STD_VER > 11
28
+
29
+ template<class _Tp, class _Up>
30
+ _LIBCUDACXX_CONCEPT __different_from = !same_as<remove_cvref_t<_Tp>, remove_cvref_t<_Up>>;
31
+
32
+ #endif // _LIBCUDACXX_STD_VER > 11
33
+
34
+ _LIBCUDACXX_END_NAMESPACE_STD
35
+
36
+ #endif // _LIBCUDACXX___CONCEPTS_DIFFERENT_FROM_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__concepts/regular.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___CONCEPTS_REGULAR_H
11
+ #define _LIBCUDACXX___CONCEPTS_REGULAR_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif //__cuda_std__
16
+
17
+ #include "../__concepts/__concept_macros.h"
18
+ #include "../__concepts/equality_comparable.h"
19
+ #include "../__concepts/semiregular.h"
20
+
21
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
22
+ #pragma GCC system_header
23
+ #endif
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ #if _LIBCUDACXX_STD_VER > 17
28
+
29
+ // [concept.object]
30
+
31
+ template<class _Tp>
32
+ concept regular = semiregular<_Tp> && equality_comparable<_Tp>;
33
+
34
+ #elif _LIBCUDACXX_STD_VER > 11
35
+
36
+ // [concept.object]
37
+
38
+ template<class _Tp>
39
+ _LIBCUDACXX_CONCEPT_FRAGMENT(
40
+ __regular_,
41
+ requires()(
42
+ requires(semiregular<_Tp>),
43
+ requires(equality_comparable<_Tp>)
44
+ ));
45
+
46
+ template<class _Tp>
47
+ _LIBCUDACXX_CONCEPT regular = _LIBCUDACXX_FRAGMENT(__regular_, _Tp);
48
+
49
+ #endif // _LIBCUDACXX_STD_VER > 11
50
+
51
+ _LIBCUDACXX_END_NAMESPACE_STD
52
+
53
+ #endif // _LIBCUDACXX___CONCEPTS_REGULAR_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__concepts/same_as.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___CONCEPTS_SAME_AS_H
11
+ #define _LIBCUDACXX___CONCEPTS_SAME_AS_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif //__cuda_std__
16
+
17
+ #include "../__concepts/__concept_macros.h"
18
+ #include "../__type_traits/is_same.h"
19
+
20
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
21
+ #pragma GCC system_header
22
+ #endif
23
+
24
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
25
+
26
+ #if _LIBCUDACXX_STD_VER > 11
27
+
28
+ // [concept.same]
29
+
30
+ template<class _Tp, class _Up>
31
+ _LIBCUDACXX_CONCEPT __same_as_impl = _IsSame<_Tp, _Up>::value;
32
+
33
+ template<class _Tp, class _Up>
34
+ _LIBCUDACXX_CONCEPT same_as = __same_as_impl<_Tp, _Up> && __same_as_impl<_Up, _Tp>;
35
+
36
+ #endif // _LIBCUDACXX_STD_VER > 11
37
+
38
+ _LIBCUDACXX_END_NAMESPACE_STD
39
+
40
+ #endif // _LIBCUDACXX___CONCEPTS_SAME_AS_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__concepts/semiregular.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___CONCEPTS_SEMIREGULAR_H
11
+ #define _LIBCUDACXX___CONCEPTS_SEMIREGULAR_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif //__cuda_std__
16
+
17
+ #include "../__concepts/__concept_macros.h"
18
+ #include "../__concepts/constructible.h"
19
+ #include "../__concepts/copyable.h"
20
+
21
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
22
+ #pragma GCC system_header
23
+ #endif
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ #if _LIBCUDACXX_STD_VER > 17
28
+
29
+ // [concept.object]
30
+
31
+ template<class _Tp>
32
+ concept semiregular = copyable<_Tp> && default_initializable<_Tp>;
33
+
34
+ #elif _LIBCUDACXX_STD_VER > 11
35
+
36
+ // [concept.object]
37
+
38
+ template<class _Tp>
39
+ _LIBCUDACXX_CONCEPT_FRAGMENT(
40
+ __semiregular_,
41
+ requires()(
42
+ requires(copyable<_Tp>),
43
+ requires(default_initializable<_Tp>)
44
+ ));
45
+
46
+ template<class _Tp>
47
+ _LIBCUDACXX_CONCEPT semiregular = _LIBCUDACXX_FRAGMENT(__semiregular_, _Tp);
48
+
49
+ #endif // _LIBCUDACXX_STD_VER > 11
50
+
51
+ _LIBCUDACXX_END_NAMESPACE_STD
52
+
53
+ #endif // _LIBCUDACXX___CONCEPTS_SEMIREGULAR_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/binary_function.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___FUNCTIONAL_BINARY_FUNCTION_H
11
+ #define _LIBCUDACXX___FUNCTIONAL_BINARY_FUNCTION_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
18
+ #pragma GCC system_header
19
+ #endif
20
+
21
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
22
+
23
+ #if _LIBCUDACXX_STD_VER <= 14 || defined(_LIBCUDACXX_ENABLE_CXX17_REMOVED_UNARY_BINARY_FUNCTION)
24
+
25
+ template <class _Arg1, class _Arg2, class _Result>
26
+ struct _LIBCUDACXX_TEMPLATE_VIS _LIBCUDACXX_DEPRECATED_IN_CXX11 binary_function
27
+ {
28
+ typedef _Arg1 first_argument_type;
29
+ typedef _Arg2 second_argument_type;
30
+ typedef _Result result_type;
31
+ };
32
+
33
+ #endif // _LIBCUDACXX_STD_VER <= 14 || defined(_LIBCUDACXX_ENABLE_CXX17_REMOVED_UNARY_BINARY_FUNCTION)
34
+
35
+ template <class _Arg1, class _Arg2, class _Result> struct __binary_function_keep_layout_base {
36
+ #if _LIBCUDACXX_STD_VER <= 17 || defined(_LIBCUDACXX_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS)
37
+ using first_argument_type _LIBCUDACXX_DEPRECATED_IN_CXX17 = _Arg1;
38
+ using second_argument_type _LIBCUDACXX_DEPRECATED_IN_CXX17 = _Arg2;
39
+ using result_type _LIBCUDACXX_DEPRECATED_IN_CXX17 = _Result;
40
+ #endif
41
+ };
42
+
43
+ #if _LIBCUDACXX_STD_VER <= 14 || defined(_LIBCUDACXX_ENABLE_CXX17_REMOVED_UNARY_BINARY_FUNCTION)
44
+ _LIBCUDACXX_DIAGNOSTIC_PUSH
45
+ _LIBCUDACXX_CLANG_DIAGNOSTIC_IGNORED("-Wdeprecated-declarations")
46
+ template <class _Arg1, class _Arg2, class _Result>
47
+ using __binary_function = binary_function<_Arg1, _Arg2, _Result>;
48
+ _LIBCUDACXX_DIAGNOSTIC_POP
49
+ #else
50
+ template <class _Arg1, class _Arg2, class _Result>
51
+ using __binary_function = __binary_function_keep_layout_base<_Arg1, _Arg2, _Result>;
52
+ #endif
53
+
54
+ _LIBCUDACXX_END_NAMESPACE_STD
55
+
56
+ #endif // _LIBCUDACXX___FUNCTIONAL_BINARY_FUNCTION_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/bind_back.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___FUNCTIONAL_BIND_BACK_H
12
+ #define _LIBCUDACXX___FUNCTIONAL_BIND_BACK_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__functional/invoke.h"
19
+ #include "../__functional/perfect_forward.h"
20
+ #include "../__fwd/get.h"
21
+ #include "../__tuple_dir/tuple_size.h"
22
+ #include "../__type_traits/decay.h"
23
+ #include "../__type_traits/enable_if.h"
24
+ #include "../__type_traits/is_constructible.h"
25
+ #include "../__type_traits/is_move_constructible.h"
26
+ #include "../__utility/forward.h"
27
+ #include "../__utility/integer_sequence.h"
28
+
29
+ #include "../tuple"
30
+
31
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
32
+ #pragma GCC system_header
33
+ #endif
34
+
35
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
36
+
37
+ #if _LIBCUDACXX_STD_VER > 14
38
+
39
+ template <size_t _NBound, class = make_index_sequence<_NBound>>
40
+ struct __bind_back_op;
41
+
42
+ template <size_t _NBound, size_t ..._Ip>
43
+ struct __bind_back_op<_NBound, index_sequence<_Ip...>> {
44
+ template <class _Fn, class _BoundArgs, class... _Args>
45
+ _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY
46
+ constexpr auto operator()(_Fn&& __f, _BoundArgs&& __bound_args, _Args&&... __args) const
47
+ noexcept(noexcept(_CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Fn>(__f), _CUDA_VSTD::forward<_Args>(__args)..., _CUDA_VSTD::get<_Ip>(_CUDA_VSTD::forward<_BoundArgs>(__bound_args))...)))
48
+ -> decltype( _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Fn>(__f), _CUDA_VSTD::forward<_Args>(__args)..., _CUDA_VSTD::get<_Ip>(_CUDA_VSTD::forward<_BoundArgs>(__bound_args))...))
49
+ { return _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Fn>(__f), _CUDA_VSTD::forward<_Args>(__args)..., _CUDA_VSTD::get<_Ip>(_CUDA_VSTD::forward<_BoundArgs>(__bound_args))...); }
50
+ };
51
+
52
+ template <class _Fn, class _BoundArgs>
53
+ struct __bind_back_t : __perfect_forward<__bind_back_op<tuple_size_v<_BoundArgs>>, _Fn, _BoundArgs> {
54
+ using __perfect_forward<__bind_back_op<tuple_size_v<_BoundArgs>>, _Fn, _BoundArgs>::__perfect_forward;
55
+ };
56
+
57
+ template <class _Fn, class ..._Args, class = enable_if_t<
58
+ _And<
59
+ is_constructible<decay_t<_Fn>, _Fn>,
60
+ is_move_constructible<decay_t<_Fn>>,
61
+ is_constructible<decay_t<_Args>, _Args>...,
62
+ is_move_constructible<decay_t<_Args>>...
63
+ >::value
64
+ >>
65
+ _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY
66
+ constexpr auto __bind_back(_Fn&& __f, _Args&&... __args)
67
+ noexcept(noexcept(__bind_back_t<decay_t<_Fn>, tuple<decay_t<_Args>...>>(_CUDA_VSTD::forward<_Fn>(__f), _CUDA_VSTD::forward_as_tuple(_CUDA_VSTD::forward<_Args>(__args)...))))
68
+ -> decltype( __bind_back_t<decay_t<_Fn>, tuple<decay_t<_Args>...>>(_CUDA_VSTD::forward<_Fn>(__f), _CUDA_VSTD::forward_as_tuple(_CUDA_VSTD::forward<_Args>(__args)...)))
69
+ { return __bind_back_t<decay_t<_Fn>, tuple<decay_t<_Args>...>>(_CUDA_VSTD::forward<_Fn>(__f), _CUDA_VSTD::forward_as_tuple(_CUDA_VSTD::forward<_Args>(__args)...)); }
70
+
71
+ #endif // _LIBCUDACXX_STD_VER > 14
72
+
73
+ _LIBCUDACXX_END_NAMESPACE_STD
74
+
75
+ #endif // _LIBCUDACXX___FUNCTIONAL_BIND_BACK_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/bind_front.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___FUNCTIONAL_BIND_FRONT_H
12
+ #define _LIBCUDACXX___FUNCTIONAL_BIND_FRONT_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__concepts/__concept_macros.h"
19
+ #include "../__functional/invoke.h"
20
+ #include "../__functional/perfect_forward.h"
21
+ #include "../__type_traits/decay.h"
22
+ #include "../__type_traits/enable_if.h"
23
+ #include "../__type_traits/is_constructible.h"
24
+ #include "../__type_traits/is_move_constructible.h"
25
+ #include "../__type_traits/is_nothrow_constructible.h"
26
+ #include "../__utility/forward.h"
27
+
28
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
29
+ #pragma GCC system_header
30
+ #endif
31
+
32
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
33
+
34
+ #if _LIBCUDACXX_STD_VER > 14
35
+
36
+ struct __bind_front_op {
37
+ template <class ..._Args>
38
+ _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY
39
+ constexpr auto operator()(_Args&& ...__args) const
40
+ noexcept(noexcept(_CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Args>(__args)...)))
41
+ -> decltype( _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Args>(__args)...))
42
+ { return _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Args>(__args)...); }
43
+ };
44
+
45
+ template <class _Fn, class ..._BoundArgs>
46
+ struct __bind_front_t : __perfect_forward<__bind_front_op, _Fn, _BoundArgs...> {
47
+ using __base = __perfect_forward<__bind_front_op, _Fn, _BoundArgs...>;
48
+ #if defined(_LIBCUDACXX_COMPILER_NVRTC)
49
+ constexpr __bind_front_t() noexcept = default;
50
+
51
+ template<class... _Args>
52
+ _LIBCUDACXX_INLINE_VISIBILITY constexpr
53
+ __bind_front_t(_Args&&... __args) noexcept(noexcept(__base(_CUDA_VSTD::declval<_Args>()...)))
54
+ : __base(_CUDA_VSTD::forward<_Args>(__args)...)
55
+ {}
56
+ #else
57
+ using __base::__base;
58
+ #endif
59
+ };
60
+
61
+ template<class _Fn, class... _Args>
62
+ _LIBCUDACXX_CONCEPT __can_bind_front = is_constructible_v<decay_t<_Fn>, _Fn> &&
63
+ is_move_constructible_v<decay_t<_Fn>> &&
64
+ (is_constructible_v<decay_t<_Args>, _Args> && ...) &&
65
+ (is_move_constructible_v<decay_t<_Args>> && ... );
66
+
67
+ _LIBCUDACXX_TEMPLATE(class _Fn, class... _Args)
68
+ (requires __can_bind_front<_Fn, _Args...>)
69
+ _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY
70
+ constexpr auto bind_front(_Fn&& __f, _Args&&... __args) noexcept(is_nothrow_constructible_v<tuple<decay_t<_Args>...>, _Args&&...>) {
71
+ return __bind_front_t<decay_t<_Fn>, decay_t<_Args>...>(_CUDA_VSTD::forward<_Fn>(__f), _CUDA_VSTD::forward<_Args>(__args)...);
72
+ }
73
+
74
+ #endif // _LIBCUDACXX_STD_VER > 14
75
+
76
+ _LIBCUDACXX_END_NAMESPACE_STD
77
+
78
+ #endif // _LIBCUDACXX___FUNCTIONAL_BIND_FRONT_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/binder1st.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___FUNCTIONAL_BINDER1ST_H
12
+ #define _LIBCUDACXX___FUNCTIONAL_BINDER1ST_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__functional/unary_function.h"
19
+
20
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
21
+ #pragma GCC system_header
22
+ #endif
23
+
24
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
25
+
26
+ #if _LIBCUDACXX_STD_VER <= 14 || defined(_LIBCUDACXX_ENABLE_CXX17_REMOVED_BINDERS)
27
+
28
+ template <class __Operation>
29
+ class _LIBCUDACXX_TEMPLATE_VIS _LIBCUDACXX_DEPRECATED_IN_CXX11 binder1st
30
+ : public __unary_function<typename __Operation::second_argument_type, typename __Operation::result_type>
31
+ {
32
+ protected:
33
+ __Operation op;
34
+ typename __Operation::first_argument_type value;
35
+ public:
36
+ _LIBCUDACXX_INLINE_VISIBILITY binder1st(const __Operation& __x,
37
+ const typename __Operation::first_argument_type __y)
38
+ : op(__x), value(__y) {}
39
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
40
+ _LIBCUDACXX_INLINE_VISIBILITY typename __Operation::result_type operator()
41
+ (typename __Operation::second_argument_type& __x) const
42
+ {return op(value, __x);}
43
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
44
+ _LIBCUDACXX_INLINE_VISIBILITY typename __Operation::result_type operator()
45
+ (const typename __Operation::second_argument_type& __x) const
46
+ {return op(value, __x);}
47
+ };
48
+
49
+ template <class __Operation, class _Tp>
50
+ _LIBCUDACXX_DEPRECATED_IN_CXX11 inline _LIBCUDACXX_INLINE_VISIBILITY
51
+ binder1st<__Operation>
52
+ bind1st(const __Operation& __op, const _Tp& __x)
53
+ {return binder1st<__Operation>(__op, __x);}
54
+
55
+ #endif // _LIBCUDACXX_STD_VER <= 14 || defined(_LIBCUDACXX_ENABLE_CXX17_REMOVED_BINDERS)
56
+
57
+ _LIBCUDACXX_END_NAMESPACE_STD
58
+
59
+ #endif // _LIBCUDACXX___FUNCTIONAL_BINDER1ST_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/binder2nd.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___FUNCTIONAL_BINDER2ND_H
12
+ #define _LIBCUDACXX___FUNCTIONAL_BINDER2ND_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__functional/unary_function.h"
19
+
20
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
21
+ #pragma GCC system_header
22
+ #endif
23
+
24
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
25
+
26
+ #if _LIBCUDACXX_STD_VER <= 14 || defined(_LIBCUDACXX_ENABLE_CXX17_REMOVED_BINDERS)
27
+
28
+ template <class __Operation>
29
+ class _LIBCUDACXX_TEMPLATE_VIS _LIBCUDACXX_DEPRECATED_IN_CXX11 binder2nd
30
+ : public __unary_function<typename __Operation::first_argument_type, typename __Operation::result_type>
31
+ {
32
+ protected:
33
+ __Operation op;
34
+ typename __Operation::second_argument_type value;
35
+ public:
36
+ _LIBCUDACXX_INLINE_VISIBILITY
37
+ binder2nd(const __Operation& __x, const typename __Operation::second_argument_type __y)
38
+ : op(__x), value(__y) {}
39
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
40
+ _LIBCUDACXX_INLINE_VISIBILITY typename __Operation::result_type operator()
41
+ ( typename __Operation::first_argument_type& __x) const
42
+ {return op(__x, value);}
43
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
44
+ _LIBCUDACXX_INLINE_VISIBILITY typename __Operation::result_type operator()
45
+ (const typename __Operation::first_argument_type& __x) const
46
+ {return op(__x, value);}
47
+ };
48
+
49
+ template <class __Operation, class _Tp>
50
+ _LIBCUDACXX_DEPRECATED_IN_CXX11 inline _LIBCUDACXX_INLINE_VISIBILITY
51
+ binder2nd<__Operation>
52
+ bind2nd(const __Operation& __op, const _Tp& __x)
53
+ {return binder2nd<__Operation>(__op, __x);}
54
+
55
+ #endif // _LIBCUDACXX_STD_VER <= 14 || defined(_LIBCUDACXX_ENABLE_CXX17_REMOVED_BINDERS)
56
+
57
+ _LIBCUDACXX_END_NAMESPACE_STD
58
+
59
+ #endif // _LIBCUDACXX___FUNCTIONAL_BINDER2ND_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/compose.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___FUNCTIONAL_COMPOSE_H
12
+ #define _LIBCUDACXX___FUNCTIONAL_COMPOSE_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__functional/invoke.h"
19
+ #include "../__functional/perfect_forward.h"
20
+ #include "../__type_traits/decay.h"
21
+ #include "../__utility/forward.h"
22
+
23
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
24
+ #pragma GCC system_header
25
+ #endif
26
+
27
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
28
+
29
+ #if _LIBCUDACXX_STD_VER > 14
30
+
31
+ struct __compose_op {
32
+ template<class _Fn1, class _Fn2, class ..._Args>
33
+ _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY
34
+ constexpr auto operator()(_Fn1&& __f1, _Fn2&& __f2, _Args&&... __args) const
35
+ noexcept(noexcept(_CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Fn1>(__f1), _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Fn2>(__f2), _CUDA_VSTD::forward<_Args>(__args)...))))
36
+ -> decltype( _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Fn1>(__f1), _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Fn2>(__f2), _CUDA_VSTD::forward<_Args>(__args)...)))
37
+ { return _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Fn1>(__f1), _CUDA_VSTD::invoke(_CUDA_VSTD::forward<_Fn2>(__f2), _CUDA_VSTD::forward<_Args>(__args)...)); }
38
+ };
39
+
40
+ template <class _Fn1, class _Fn2>
41
+ struct __compose_t : __perfect_forward<__compose_op, _Fn1, _Fn2> {
42
+ using __perfect_forward<__compose_op, _Fn1, _Fn2>::__perfect_forward;
43
+ };
44
+
45
+ template <class _Fn1, class _Fn2>
46
+ _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY
47
+ constexpr auto __compose(_Fn1&& __f1, _Fn2&& __f2)
48
+ noexcept(noexcept(__compose_t<decay_t<_Fn1>, decay_t<_Fn2>>(_CUDA_VSTD::forward<_Fn1>(__f1), _CUDA_VSTD::forward<_Fn2>(__f2))))
49
+ -> decltype( __compose_t<decay_t<_Fn1>, decay_t<_Fn2>>(_CUDA_VSTD::forward<_Fn1>(__f1), _CUDA_VSTD::forward<_Fn2>(__f2)))
50
+ { return __compose_t<decay_t<_Fn1>, decay_t<_Fn2>>(_CUDA_VSTD::forward<_Fn1>(__f1), _CUDA_VSTD::forward<_Fn2>(__f2)); }
51
+
52
+ #endif // _LIBCUDACXX_STD_VER > 14
53
+
54
+ _LIBCUDACXX_END_NAMESPACE_STD
55
+
56
+ #endif // _LIBCUDACXX___FUNCTIONAL_COMPOSE_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/default_searcher.h ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___FUNCTIONAL_DEFAULT_SEARCHER_H
12
+ #define _LIBCUDACXX___FUNCTIONAL_DEFAULT_SEARCHER_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ // #include "../__algorithm/search.h"
19
+ #include "../__functional/identity.h"
20
+ #include "../__functional/operations.h"
21
+ #include "../__iterator/iterator_traits.h"
22
+ #include "../__utility/pair.h"
23
+
24
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
25
+ #pragma GCC system_header
26
+ #endif
27
+
28
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
29
+
30
+ template <class _BinaryPredicate, class _ForwardIterator1, class _ForwardIterator2>
31
+ _LIBCUDACXX_HOST_DEVICE _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
32
+ pair<_ForwardIterator1, _ForwardIterator1>
33
+ __search(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
34
+ _ForwardIterator2 __first2, _ForwardIterator2 __last2, _BinaryPredicate __pred,
35
+ forward_iterator_tag, forward_iterator_tag)
36
+ {
37
+ if (__first2 == __last2)
38
+ return _CUDA_VSTD::make_pair(__first1, __first1); // Everything matches an empty sequence
39
+ while (true)
40
+ {
41
+ // Find first element in sequence 1 that matchs *__first2, with a mininum of loop checks
42
+ while (true)
43
+ {
44
+ if (__first1 == __last1) // return __last1 if no element matches *__first2
45
+ return _CUDA_VSTD::make_pair(__last1, __last1);
46
+ if (__pred(*__first1, *__first2))
47
+ break;
48
+ ++__first1;
49
+ }
50
+ // *__first1 matches *__first2, now match elements after here
51
+ _ForwardIterator1 __m1 = __first1;
52
+ _ForwardIterator2 __m2 = __first2;
53
+ while (true)
54
+ {
55
+ if (++__m2 == __last2) // If pattern exhausted, __first1 is the answer (works for 1 element pattern)
56
+ return _CUDA_VSTD::make_pair(__first1, __m1);
57
+ if (++__m1 == __last1) // Otherwise if source exhaused, pattern not found
58
+ return _CUDA_VSTD::make_pair(__last1, __last1);
59
+ if (!__pred(*__m1, *__m2)) // if there is a mismatch, restart with a new __first1
60
+ {
61
+ ++__first1;
62
+ break;
63
+ } // else there is a match, check next elements
64
+ }
65
+ }
66
+ }
67
+
68
+ template <class _BinaryPredicate, class _RandomAccessIterator1, class _RandomAccessIterator2>
69
+ _LIBCUDACXX_HOST_DEVICE _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
70
+ pair<_RandomAccessIterator1, _RandomAccessIterator1>
71
+ __search(_RandomAccessIterator1 __first1, _RandomAccessIterator1 __last1,
72
+ _RandomAccessIterator2 __first2, _RandomAccessIterator2 __last2, _BinaryPredicate __pred,
73
+ random_access_iterator_tag, random_access_iterator_tag)
74
+ {
75
+ typedef typename iterator_traits<_RandomAccessIterator1>::difference_type _Diff1;
76
+ typedef typename iterator_traits<_RandomAccessIterator2>::difference_type _Diff2;
77
+ // Take advantage of knowing source and pattern lengths. Stop short when source is smaller than pattern
78
+ const _Diff2 __len2 = __last2 - __first2;
79
+ if (__len2 == 0)
80
+ return _CUDA_VSTD::make_pair(__first1, __first1);
81
+ const _Diff1 __len1 = __last1 - __first1;
82
+ if (__len1 < __len2)
83
+ return _CUDA_VSTD::make_pair(__last1, __last1);
84
+ const _RandomAccessIterator1 __s = __last1 - (__len2 - 1); // Start of pattern match can't go beyond here
85
+
86
+ while (true)
87
+ {
88
+ while (true)
89
+ {
90
+ if (__first1 == __s)
91
+ return _CUDA_VSTD::make_pair(__last1, __last1);
92
+ if (__pred(*__first1, *__first2))
93
+ break;
94
+ ++__first1;
95
+ }
96
+
97
+ _RandomAccessIterator1 __m1 = __first1;
98
+ _RandomAccessIterator2 __m2 = __first2;
99
+ while (true)
100
+ {
101
+ if (++__m2 == __last2)
102
+ return _CUDA_VSTD::make_pair(__first1, __first1 + __len2);
103
+ ++__m1; // no need to check range on __m1 because __s guarantees we have enough source
104
+ if (!__pred(*__m1, *__m2))
105
+ {
106
+ ++__first1;
107
+ break;
108
+ }
109
+ }
110
+ }
111
+ }
112
+
113
+ #ifndef __cuda_std__
114
+
115
+ #if _LIBCUDACXX_STD_VER > 14
116
+
117
+ // default searcher
118
+ template<class _ForwardIterator, class _BinaryPredicate = equal_to<>>
119
+ class _LIBCUDACXX_TEMPLATE_VIS default_searcher {
120
+ public:
121
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
122
+ default_searcher(_ForwardIterator __f, _ForwardIterator __l,
123
+ _BinaryPredicate __p = _BinaryPredicate())
124
+ : __first_(__f), __last_(__l), __pred_(__p) {}
125
+
126
+ template <typename _ForwardIterator2>
127
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
128
+ pair<_ForwardIterator2, _ForwardIterator2>
129
+ operator () (_ForwardIterator2 __f, _ForwardIterator2 __l) const
130
+ {
131
+ return _CUDA_VSTD::__search(__f, __l, __first_, __last_, __pred_,
132
+ typename _CUDA_VSTD::iterator_traits<_ForwardIterator>::iterator_category(),
133
+ typename _CUDA_VSTD::iterator_traits<_ForwardIterator2>::iterator_category());
134
+ }
135
+
136
+ private:
137
+ _ForwardIterator __first_;
138
+ _ForwardIterator __last_;
139
+ _BinaryPredicate __pred_;
140
+ };
141
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(default_searcher);
142
+
143
+ #endif // _LIBCUDACXX_STD_VER > 14
144
+ #endif // __cuda_std__
145
+
146
+ _LIBCUDACXX_END_NAMESPACE_STD
147
+
148
+ #endif // _LIBCUDACXX___FUNCTIONAL_DEFAULT_SEARCHER_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/function.h ADDED
@@ -0,0 +1,1261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___FUNCTIONAL_FUNCTION_H
12
+ #define _LIBCUDACXX___FUNCTIONAL_FUNCTION_H
13
+
14
+ #ifndef __cuda_std__
15
+
16
+ #ifndef __cuda_std__
17
+ #include <__config>
18
+ #include <exception>
19
+ #include <memory>
20
+ #include <new>
21
+ #include <typeinfo>
22
+ #endif // __cuda_std__
23
+
24
+ #include "__assert"
25
+ #include "../__debug"
26
+ #include "../__functional_base"
27
+ #include "../__functional/binary_function.h"
28
+ #include "../__functional/invoke.h"
29
+ #include "../__functional/unary_function.h"
30
+ #include "../__iterator/iterator_traits.h"
31
+ #include "../__type_traits/conditional.h"
32
+ #include "../__type_traits/decay.h"
33
+ #include "../__type_traits/enable_if.h"
34
+ #include "../__type_traits/is_nothrow_copy_constructible.h"
35
+ #include "../__type_traits/is_same.h"
36
+ #include "../__type_traits/is_scalar.h"
37
+ #include "../__type_traits/is_trivially_copy_constructible.h"
38
+ #include "../__type_traits/is_trivially_destructible.h"
39
+ #include "../__type_traits/is_void.h"
40
+ #include "../__type_traits/remove_cvref.h"
41
+ #include "../__utility/forward.h"
42
+ #include "../__utility/move.h"
43
+ #include "../__utility/piecewise_construct.h"
44
+ #include "../__utility/swap.h"
45
+ #include "../tuple"
46
+
47
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
48
+ #pragma GCC system_header
49
+ #endif
50
+
51
+ #ifndef _LIBCUDACXX_CXX03_LANG
52
+
53
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
54
+
55
+ // bad_function_call
56
+
57
+ class _LIBCUDACXX_EXCEPTION_ABI bad_function_call
58
+ : public exception
59
+ {
60
+ #ifdef _LIBCUDACXX_ABI_BAD_FUNCTION_CALL_KEY_FUNCTION
61
+ public:
62
+ virtual ~bad_function_call() _NOEXCEPT;
63
+
64
+ virtual const char* what() const _NOEXCEPT;
65
+ #endif
66
+ };
67
+
68
+ _LIBCUDACXX_NORETURN inline _LIBCUDACXX_INLINE_VISIBILITY
69
+ void __throw_bad_function_call()
70
+ {
71
+ #ifndef _LIBCUDACXX_NO_EXCEPTIONS
72
+ throw bad_function_call();
73
+ #else
74
+ _CUDA_VSTD::abort();
75
+ #endif
76
+ }
77
+
78
+ template<class _Fp> class _LIBCUDACXX_TEMPLATE_VIS function; // undefined
79
+
80
+ namespace __function
81
+ {
82
+
83
+ template<class _Rp>
84
+ struct __maybe_derive_from_unary_function
85
+ {
86
+ };
87
+
88
+ template<class _Rp, class _A1>
89
+ struct __maybe_derive_from_unary_function<_Rp(_A1)>
90
+ : public __unary_function<_A1, _Rp>
91
+ {
92
+ };
93
+
94
+ template<class _Rp>
95
+ struct __maybe_derive_from_binary_function
96
+ {
97
+ };
98
+
99
+ template<class _Rp, class _A1, class _A2>
100
+ struct __maybe_derive_from_binary_function<_Rp(_A1, _A2)>
101
+ : public __binary_function<_A1, _A2, _Rp>
102
+ {
103
+ };
104
+
105
+ template <class _Fp>
106
+ _LIBCUDACXX_INLINE_VISIBILITY
107
+ bool __not_null(_Fp const&) { return true; }
108
+
109
+ template <class _Fp>
110
+ _LIBCUDACXX_INLINE_VISIBILITY
111
+ bool __not_null(_Fp* __ptr) { return __ptr; }
112
+
113
+ template <class _Ret, class _Class>
114
+ _LIBCUDACXX_INLINE_VISIBILITY
115
+ bool __not_null(_Ret _Class::*__ptr) { return __ptr; }
116
+
117
+ template <class _Fp>
118
+ _LIBCUDACXX_INLINE_VISIBILITY
119
+ bool __not_null(function<_Fp> const& __f) { return !!__f; }
120
+
121
+ #ifdef _LIBCUDACXX_HAS_EXTENSION_BLOCKS
122
+ template <class _Rp, class ..._Args>
123
+ _LIBCUDACXX_INLINE_VISIBILITY
124
+ bool __not_null(_Rp (^__p)(_Args...)) { return __p; }
125
+ #endif
126
+
127
+ } // namespace __function
128
+
129
+ #ifndef _LIBCUDACXX_CXX03_LANG
130
+
131
+ namespace __function {
132
+
133
+ // __alloc_func holds a functor and an allocator.
134
+
135
+ template <class _Fp, class _Ap, class _FB> class __alloc_func;
136
+ template <class _Fp, class _FB>
137
+ class __default_alloc_func;
138
+
139
+ template <class _Fp, class _Ap, class _Rp, class... _ArgTypes>
140
+ class __alloc_func<_Fp, _Ap, _Rp(_ArgTypes...)>
141
+ {
142
+ __compressed_pair<_Fp, _Ap> __f_;
143
+
144
+ public:
145
+ typedef _LIBCUDACXX_NODEBUG_TYPE _Fp _Target;
146
+ typedef _LIBCUDACXX_NODEBUG_TYPE _Ap _Alloc;
147
+
148
+ _LIBCUDACXX_INLINE_VISIBILITY
149
+ const _Target& __target() const { return __f_.first(); }
150
+
151
+ // WIN32 APIs may define __allocator, so use __get_allocator instead.
152
+ _LIBCUDACXX_INLINE_VISIBILITY
153
+ const _Alloc& __get_allocator() const { return __f_.second(); }
154
+
155
+ _LIBCUDACXX_INLINE_VISIBILITY
156
+ explicit __alloc_func(_Target&& __f)
157
+ : __f_(piecewise_construct, _CUDA_VSTD::forward_as_tuple(_CUDA_VSTD::move(__f)),
158
+ _CUDA_VSTD::forward_as_tuple())
159
+ {
160
+ }
161
+
162
+ _LIBCUDACXX_INLINE_VISIBILITY
163
+ explicit __alloc_func(const _Target& __f, const _Alloc& __a)
164
+ : __f_(piecewise_construct, _CUDA_VSTD::forward_as_tuple(__f),
165
+ _CUDA_VSTD::forward_as_tuple(__a))
166
+ {
167
+ }
168
+
169
+ _LIBCUDACXX_INLINE_VISIBILITY
170
+ explicit __alloc_func(const _Target& __f, _Alloc&& __a)
171
+ : __f_(piecewise_construct, _CUDA_VSTD::forward_as_tuple(__f),
172
+ _CUDA_VSTD::forward_as_tuple(_CUDA_VSTD::move(__a)))
173
+ {
174
+ }
175
+
176
+ _LIBCUDACXX_INLINE_VISIBILITY
177
+ explicit __alloc_func(_Target&& __f, _Alloc&& __a)
178
+ : __f_(piecewise_construct, _CUDA_VSTD::forward_as_tuple(_CUDA_VSTD::move(__f)),
179
+ _CUDA_VSTD::forward_as_tuple(_CUDA_VSTD::move(__a)))
180
+ {
181
+ }
182
+
183
+ _LIBCUDACXX_INLINE_VISIBILITY
184
+ _Rp operator()(_ArgTypes&&... __arg)
185
+ {
186
+ typedef __invoke_void_return_wrapper<_Rp> _Invoker;
187
+ return _Invoker::__call(__f_.first(),
188
+ _CUDA_VSTD::forward<_ArgTypes>(__arg)...);
189
+ }
190
+
191
+ _LIBCUDACXX_INLINE_VISIBILITY
192
+ __alloc_func* __clone() const
193
+ {
194
+ typedef allocator_traits<_Alloc> __alloc_traits;
195
+ typedef typename __rebind_alloc_helper<__alloc_traits, __alloc_func>::type _AA;
196
+ _AA __a(__f_.second());
197
+ typedef __allocator_destructor<_AA> _Dp;
198
+ unique_ptr<__alloc_func, _Dp> __hold(__a.allocate(1), _Dp(__a, 1));
199
+ ::new ((void*)__hold.get()) __alloc_func(__f_.first(), _Alloc(__a));
200
+ return __hold.release();
201
+ }
202
+
203
+ _LIBCUDACXX_INLINE_VISIBILITY
204
+ void destroy() _NOEXCEPT { __f_.~__compressed_pair<_Target, _Alloc>(); }
205
+
206
+ static void __destroy_and_delete(__alloc_func* __f) {
207
+ typedef allocator_traits<_Alloc> __alloc_traits;
208
+ typedef typename __rebind_alloc_helper<__alloc_traits, __alloc_func>::type _FunAlloc;
209
+ _FunAlloc __a(__f->__get_allocator());
210
+ __f->destroy();
211
+ __a.deallocate(__f, 1);
212
+ }
213
+ };
214
+
215
+ template <class _Fp, class _Rp, class... _ArgTypes>
216
+ class __default_alloc_func<_Fp, _Rp(_ArgTypes...)> {
217
+ _Fp __f_;
218
+
219
+ public:
220
+ typedef _LIBCUDACXX_NODEBUG_TYPE _Fp _Target;
221
+
222
+ _LIBCUDACXX_INLINE_VISIBILITY
223
+ const _Target& __target() const { return __f_; }
224
+
225
+ _LIBCUDACXX_INLINE_VISIBILITY
226
+ explicit __default_alloc_func(_Target&& __f) : __f_(_CUDA_VSTD::move(__f)) {}
227
+
228
+ _LIBCUDACXX_INLINE_VISIBILITY
229
+ explicit __default_alloc_func(const _Target& __f) : __f_(__f) {}
230
+
231
+ _LIBCUDACXX_INLINE_VISIBILITY
232
+ _Rp operator()(_ArgTypes&&... __arg) {
233
+ typedef __invoke_void_return_wrapper<_Rp> _Invoker;
234
+ return _Invoker::__call(__f_, _CUDA_VSTD::forward<_ArgTypes>(__arg)...);
235
+ }
236
+
237
+ _LIBCUDACXX_INLINE_VISIBILITY
238
+ __default_alloc_func* __clone() const {
239
+ __builtin_new_allocator::__holder_t __hold =
240
+ __builtin_new_allocator::__allocate_type<__default_alloc_func>(1);
241
+ __default_alloc_func* __res =
242
+ ::new ((void*)__hold.get()) __default_alloc_func(__f_);
243
+ (void)__hold.release();
244
+ return __res;
245
+ }
246
+
247
+ _LIBCUDACXX_INLINE_VISIBILITY
248
+ void destroy() _NOEXCEPT { __f_.~_Target(); }
249
+
250
+ static void __destroy_and_delete(__default_alloc_func* __f) {
251
+ __f->destroy();
252
+ __builtin_new_allocator::__deallocate_type<__default_alloc_func>(__f, 1);
253
+ }
254
+ };
255
+
256
+ // __base provides an abstract interface for copyable functors.
257
+
258
+ template<class _Fp> class _LIBCUDACXX_TEMPLATE_VIS __base;
259
+
260
+ template<class _Rp, class ..._ArgTypes>
261
+ class __base<_Rp(_ArgTypes...)>
262
+ {
263
+ __base(const __base&);
264
+ __base& operator=(const __base&);
265
+ public:
266
+ _LIBCUDACXX_INLINE_VISIBILITY __base() {}
267
+ _LIBCUDACXX_INLINE_VISIBILITY virtual ~__base() {}
268
+ virtual __base* __clone() const = 0;
269
+ virtual void __clone(__base*) const = 0;
270
+ virtual void destroy() _NOEXCEPT = 0;
271
+ virtual void destroy_deallocate() _NOEXCEPT = 0;
272
+ virtual _Rp operator()(_ArgTypes&& ...) = 0;
273
+ #ifndef _LIBCUDACXX_NO_RTTI
274
+ virtual const void* target(const type_info&) const _NOEXCEPT = 0;
275
+ virtual const type_info& target_type() const _NOEXCEPT = 0;
276
+ #endif // _LIBCUDACXX_NO_RTTI
277
+ };
278
+
279
+ // __func implements __base for a given functor type.
280
+
281
+ template<class _FD, class _Alloc, class _FB> class __func;
282
+
283
+ template<class _Fp, class _Alloc, class _Rp, class ..._ArgTypes>
284
+ class __func<_Fp, _Alloc, _Rp(_ArgTypes...)>
285
+ : public __base<_Rp(_ArgTypes...)>
286
+ {
287
+ __alloc_func<_Fp, _Alloc, _Rp(_ArgTypes...)> __f_;
288
+ public:
289
+ _LIBCUDACXX_INLINE_VISIBILITY
290
+ explicit __func(_Fp&& __f)
291
+ : __f_(_CUDA_VSTD::move(__f)) {}
292
+
293
+ _LIBCUDACXX_INLINE_VISIBILITY
294
+ explicit __func(const _Fp& __f, const _Alloc& __a)
295
+ : __f_(__f, __a) {}
296
+
297
+ _LIBCUDACXX_INLINE_VISIBILITY
298
+ explicit __func(const _Fp& __f, _Alloc&& __a)
299
+ : __f_(__f, _CUDA_VSTD::move(__a)) {}
300
+
301
+ _LIBCUDACXX_INLINE_VISIBILITY
302
+ explicit __func(_Fp&& __f, _Alloc&& __a)
303
+ : __f_(_CUDA_VSTD::move(__f), _CUDA_VSTD::move(__a)) {}
304
+
305
+ virtual __base<_Rp(_ArgTypes...)>* __clone() const;
306
+ virtual void __clone(__base<_Rp(_ArgTypes...)>*) const;
307
+ virtual void destroy() _NOEXCEPT;
308
+ virtual void destroy_deallocate() _NOEXCEPT;
309
+ virtual _Rp operator()(_ArgTypes&&... __arg);
310
+ #ifndef _LIBCUDACXX_NO_RTTI
311
+ virtual const void* target(const type_info&) const _NOEXCEPT;
312
+ virtual const type_info& target_type() const _NOEXCEPT;
313
+ #endif // _LIBCUDACXX_NO_RTTI
314
+ };
315
+
316
+ template<class _Fp, class _Alloc, class _Rp, class ..._ArgTypes>
317
+ __base<_Rp(_ArgTypes...)>*
318
+ __func<_Fp, _Alloc, _Rp(_ArgTypes...)>::__clone() const
319
+ {
320
+ typedef allocator_traits<_Alloc> __alloc_traits;
321
+ typedef typename __rebind_alloc_helper<__alloc_traits, __func>::type _Ap;
322
+ _Ap __a(__f_.__get_allocator());
323
+ typedef __allocator_destructor<_Ap> _Dp;
324
+ unique_ptr<__func, _Dp> __hold(__a.allocate(1), _Dp(__a, 1));
325
+ ::new ((void*)__hold.get()) __func(__f_.__target(), _Alloc(__a));
326
+ return __hold.release();
327
+ }
328
+
329
+ template<class _Fp, class _Alloc, class _Rp, class ..._ArgTypes>
330
+ void
331
+ __func<_Fp, _Alloc, _Rp(_ArgTypes...)>::__clone(__base<_Rp(_ArgTypes...)>* __p) const
332
+ {
333
+ ::new ((void*)__p) __func(__f_.__target(), __f_.__get_allocator());
334
+ }
335
+
336
+ template<class _Fp, class _Alloc, class _Rp, class ..._ArgTypes>
337
+ void
338
+ __func<_Fp, _Alloc, _Rp(_ArgTypes...)>::destroy() _NOEXCEPT
339
+ {
340
+ __f_.destroy();
341
+ }
342
+
343
+ template<class _Fp, class _Alloc, class _Rp, class ..._ArgTypes>
344
+ void
345
+ __func<_Fp, _Alloc, _Rp(_ArgTypes...)>::destroy_deallocate() _NOEXCEPT
346
+ {
347
+ typedef allocator_traits<_Alloc> __alloc_traits;
348
+ typedef typename __rebind_alloc_helper<__alloc_traits, __func>::type _Ap;
349
+ _Ap __a(__f_.__get_allocator());
350
+ __f_.destroy();
351
+ __a.deallocate(this, 1);
352
+ }
353
+
354
+ template<class _Fp, class _Alloc, class _Rp, class ..._ArgTypes>
355
+ _Rp
356
+ __func<_Fp, _Alloc, _Rp(_ArgTypes...)>::operator()(_ArgTypes&& ... __arg)
357
+ {
358
+ return __f_(_CUDA_VSTD::forward<_ArgTypes>(__arg)...);
359
+ }
360
+
361
+ #ifndef _LIBCUDACXX_NO_RTTI
362
+
363
+ template<class _Fp, class _Alloc, class _Rp, class ..._ArgTypes>
364
+ const void*
365
+ __func<_Fp, _Alloc, _Rp(_ArgTypes...)>::target(const type_info& __ti) const _NOEXCEPT
366
+ {
367
+ if (__ti == typeid(_Fp))
368
+ return _CUDA_VSTD::addressof(__f_.__target());
369
+ return nullptr;
370
+ }
371
+
372
+ template<class _Fp, class _Alloc, class _Rp, class ..._ArgTypes>
373
+ const type_info&
374
+ __func<_Fp, _Alloc, _Rp(_ArgTypes...)>::target_type() const _NOEXCEPT
375
+ {
376
+ return typeid(_Fp);
377
+ }
378
+
379
+ #endif // _LIBCUDACXX_NO_RTTI
380
+
381
+ // __value_func creates a value-type from a __func.
382
+
383
+ template <class _Fp> class __value_func;
384
+
385
+ template <class _Rp, class... _ArgTypes> class __value_func<_Rp(_ArgTypes...)>
386
+ {
387
+ typename aligned_storage<3 * sizeof(void*)>::type __buf_;
388
+
389
+ typedef __base<_Rp(_ArgTypes...)> __func;
390
+ __func* __f_;
391
+
392
+ _LIBCUDACXX_NO_CFI static __func* __as_base(void* __p)
393
+ {
394
+ return reinterpret_cast<__func*>(__p);
395
+ }
396
+
397
+ public:
398
+ _LIBCUDACXX_INLINE_VISIBILITY
399
+ __value_func() _NOEXCEPT : __f_(nullptr) {}
400
+
401
+ template <class _Fp, class _Alloc>
402
+ _LIBCUDACXX_INLINE_VISIBILITY __value_func(_Fp&& __f, const _Alloc& __a)
403
+ : __f_(nullptr)
404
+ {
405
+ typedef allocator_traits<_Alloc> __alloc_traits;
406
+ typedef __function::__func<_Fp, _Alloc, _Rp(_ArgTypes...)> _Fun;
407
+ typedef typename __rebind_alloc_helper<__alloc_traits, _Fun>::type _FunAlloc;
408
+
409
+ if (__function::__not_null(__f))
410
+ {
411
+ _FunAlloc __af(__a);
412
+ if (sizeof(_Fun) <= sizeof(__buf_) &&
413
+ is_nothrow_copy_constructible<_Fp>::value &&
414
+ is_nothrow_copy_constructible<_FunAlloc>::value)
415
+ {
416
+ __f_ =
417
+ ::new ((void*)&__buf_) _Fun(_CUDA_VSTD::move(__f), _Alloc(__af));
418
+ }
419
+ else
420
+ {
421
+ typedef __allocator_destructor<_FunAlloc> _Dp;
422
+ unique_ptr<__func, _Dp> __hold(__af.allocate(1), _Dp(__af, 1));
423
+ ::new ((void*)__hold.get()) _Fun(_CUDA_VSTD::move(__f), _Alloc(__a));
424
+ __f_ = __hold.release();
425
+ }
426
+ }
427
+ }
428
+
429
+ template <class _Fp,
430
+ class = __enable_if_t<!is_same<__decay_t<_Fp>, __value_func>::value>>
431
+ _LIBCUDACXX_INLINE_VISIBILITY explicit __value_func(_Fp&& __f)
432
+ : __value_func(_CUDA_VSTD::forward<_Fp>(__f), allocator<_Fp>()) {}
433
+
434
+ _LIBCUDACXX_INLINE_VISIBILITY
435
+ __value_func(const __value_func& __f)
436
+ {
437
+ if (__f.__f_ == nullptr)
438
+ __f_ = nullptr;
439
+ else if ((void*)__f.__f_ == &__f.__buf_)
440
+ {
441
+ __f_ = __as_base(&__buf_);
442
+ __f.__f_->__clone(__f_);
443
+ }
444
+ else
445
+ __f_ = __f.__f_->__clone();
446
+ }
447
+
448
+ _LIBCUDACXX_INLINE_VISIBILITY
449
+ __value_func(__value_func&& __f) _NOEXCEPT
450
+ {
451
+ if (__f.__f_ == nullptr)
452
+ __f_ = nullptr;
453
+ else if ((void*)__f.__f_ == &__f.__buf_)
454
+ {
455
+ __f_ = __as_base(&__buf_);
456
+ __f.__f_->__clone(__f_);
457
+ }
458
+ else
459
+ {
460
+ __f_ = __f.__f_;
461
+ __f.__f_ = nullptr;
462
+ }
463
+ }
464
+
465
+ _LIBCUDACXX_INLINE_VISIBILITY
466
+ ~__value_func()
467
+ {
468
+ if ((void*)__f_ == &__buf_)
469
+ __f_->destroy();
470
+ else if (__f_)
471
+ __f_->destroy_deallocate();
472
+ }
473
+
474
+ _LIBCUDACXX_INLINE_VISIBILITY
475
+ __value_func& operator=(__value_func&& __f)
476
+ {
477
+ *this = nullptr;
478
+ if (__f.__f_ == nullptr)
479
+ __f_ = nullptr;
480
+ else if ((void*)__f.__f_ == &__f.__buf_)
481
+ {
482
+ __f_ = __as_base(&__buf_);
483
+ __f.__f_->__clone(__f_);
484
+ }
485
+ else
486
+ {
487
+ __f_ = __f.__f_;
488
+ __f.__f_ = nullptr;
489
+ }
490
+ return *this;
491
+ }
492
+
493
+ _LIBCUDACXX_INLINE_VISIBILITY
494
+ __value_func& operator=(nullptr_t)
495
+ {
496
+ __func* __f = __f_;
497
+ __f_ = nullptr;
498
+ if ((void*)__f == &__buf_)
499
+ __f->destroy();
500
+ else if (__f)
501
+ __f->destroy_deallocate();
502
+ return *this;
503
+ }
504
+
505
+ _LIBCUDACXX_INLINE_VISIBILITY
506
+ _Rp operator()(_ArgTypes&&... __args) const
507
+ {
508
+ if (__f_ == nullptr)
509
+ __throw_bad_function_call();
510
+ return (*__f_)(_CUDA_VSTD::forward<_ArgTypes>(__args)...);
511
+ }
512
+
513
+ _LIBCUDACXX_INLINE_VISIBILITY
514
+ void swap(__value_func& __f) _NOEXCEPT
515
+ {
516
+ if (&__f == this)
517
+ return;
518
+ if ((void*)__f_ == &__buf_ && (void*)__f.__f_ == &__f.__buf_)
519
+ {
520
+ typename aligned_storage<sizeof(__buf_)>::type __tempbuf;
521
+ __func* __t = __as_base(&__tempbuf);
522
+ __f_->__clone(__t);
523
+ __f_->destroy();
524
+ __f_ = nullptr;
525
+ __f.__f_->__clone(__as_base(&__buf_));
526
+ __f.__f_->destroy();
527
+ __f.__f_ = nullptr;
528
+ __f_ = __as_base(&__buf_);
529
+ __t->__clone(__as_base(&__f.__buf_));
530
+ __t->destroy();
531
+ __f.__f_ = __as_base(&__f.__buf_);
532
+ }
533
+ else if ((void*)__f_ == &__buf_)
534
+ {
535
+ __f_->__clone(__as_base(&__f.__buf_));
536
+ __f_->destroy();
537
+ __f_ = __f.__f_;
538
+ __f.__f_ = __as_base(&__f.__buf_);
539
+ }
540
+ else if ((void*)__f.__f_ == &__f.__buf_)
541
+ {
542
+ __f.__f_->__clone(__as_base(&__buf_));
543
+ __f.__f_->destroy();
544
+ __f.__f_ = __f_;
545
+ __f_ = __as_base(&__buf_);
546
+ }
547
+ else
548
+ _CUDA_VSTD::swap(__f_, __f.__f_);
549
+ }
550
+
551
+ _LIBCUDACXX_INLINE_VISIBILITY
552
+ _LIBCUDACXX_EXPLICIT operator bool() const _NOEXCEPT { return __f_ != nullptr; }
553
+
554
+ #ifndef _LIBCUDACXX_NO_RTTI
555
+ _LIBCUDACXX_INLINE_VISIBILITY
556
+ const type_info& target_type() const _NOEXCEPT
557
+ {
558
+ if (__f_ == nullptr)
559
+ return typeid(void);
560
+ return __f_->target_type();
561
+ }
562
+
563
+ template <typename _Tp>
564
+ _LIBCUDACXX_INLINE_VISIBILITY const _Tp* target() const _NOEXCEPT
565
+ {
566
+ if (__f_ == nullptr)
567
+ return nullptr;
568
+ return (const _Tp*)__f_->target(typeid(_Tp));
569
+ }
570
+ #endif // _LIBCUDACXX_NO_RTTI
571
+ };
572
+
573
+ // Storage for a functor object, to be used with __policy to manage copy and
574
+ // destruction.
575
+ union __policy_storage
576
+ {
577
+ mutable char __small[sizeof(void*) * 2];
578
+ void* __large;
579
+ };
580
+
581
+ // True if _Fun can safely be held in __policy_storage.__small.
582
+ template <typename _Fun>
583
+ struct __use_small_storage
584
+ : public integral_constant<
585
+ bool, sizeof(_Fun) <= sizeof(__policy_storage) &&
586
+ _LIBCUDACXX_ALIGNOF(_Fun) <= _LIBCUDACXX_ALIGNOF(__policy_storage) &&
587
+ is_trivially_copy_constructible<_Fun>::value &&
588
+ is_trivially_destructible<_Fun>::value> {};
589
+
590
+ // Policy contains information about how to copy, destroy, and move the
591
+ // underlying functor. You can think of it as a vtable of sorts.
592
+ struct __policy
593
+ {
594
+ // Used to copy or destroy __large values. null for trivial objects.
595
+ void* (*const __clone)(const void*);
596
+ void (*const __destroy)(void*);
597
+
598
+ // True if this is the null policy (no value).
599
+ const bool __is_null;
600
+
601
+ // The target type. May be null if RTTI is disabled.
602
+ const type_info* const __type_info;
603
+
604
+ // Returns a pointer to a static policy object suitable for the functor
605
+ // type.
606
+ template <typename _Fun>
607
+ _LIBCUDACXX_INLINE_VISIBILITY static const __policy* __create()
608
+ {
609
+ return __choose_policy<_Fun>(__use_small_storage<_Fun>());
610
+ }
611
+
612
+ _LIBCUDACXX_INLINE_VISIBILITY
613
+ static const __policy* __create_empty()
614
+ {
615
+ static const _LIBCUDACXX_CONSTEXPR __policy __policy_ = {nullptr, nullptr,
616
+ true,
617
+ #ifndef _LIBCUDACXX_NO_RTTI
618
+ &typeid(void)
619
+ #else
620
+ nullptr
621
+ #endif
622
+ };
623
+ return &__policy_;
624
+ }
625
+
626
+ private:
627
+ template <typename _Fun> static void* __large_clone(const void* __s)
628
+ {
629
+ const _Fun* __f = static_cast<const _Fun*>(__s);
630
+ return __f->__clone();
631
+ }
632
+
633
+ template <typename _Fun>
634
+ static void __large_destroy(void* __s) {
635
+ _Fun::__destroy_and_delete(static_cast<_Fun*>(__s));
636
+ }
637
+
638
+ template <typename _Fun>
639
+ _LIBCUDACXX_INLINE_VISIBILITY static const __policy*
640
+ __choose_policy(/* is_small = */ false_type) {
641
+ static const _LIBCUDACXX_CONSTEXPR __policy __policy_ = {
642
+ &__large_clone<_Fun>, &__large_destroy<_Fun>, false,
643
+ #ifndef _LIBCUDACXX_NO_RTTI
644
+ &typeid(typename _Fun::_Target)
645
+ #else
646
+ nullptr
647
+ #endif
648
+ };
649
+ return &__policy_;
650
+ }
651
+
652
+ template <typename _Fun>
653
+ _LIBCUDACXX_INLINE_VISIBILITY static const __policy*
654
+ __choose_policy(/* is_small = */ true_type)
655
+ {
656
+ static const _LIBCUDACXX_CONSTEXPR __policy __policy_ = {
657
+ nullptr, nullptr, false,
658
+ #ifndef _LIBCUDACXX_NO_RTTI
659
+ &typeid(typename _Fun::_Target)
660
+ #else
661
+ nullptr
662
+ #endif
663
+ };
664
+ return &__policy_;
665
+ }
666
+ };
667
+
668
+ // Used to choose between perfect forwarding or pass-by-value. Pass-by-value is
669
+ // faster for types that can be passed in registers.
670
+ template <typename _Tp>
671
+ using __fast_forward = __conditional_t<is_scalar<_Tp>::value, _Tp, _Tp&&>;
672
+
673
+ // __policy_invoker calls an instance of __alloc_func held in __policy_storage.
674
+
675
+ template <class _Fp> struct __policy_invoker;
676
+
677
+ template <class _Rp, class... _ArgTypes>
678
+ struct __policy_invoker<_Rp(_ArgTypes...)>
679
+ {
680
+ typedef _Rp (*__Call)(const __policy_storage*,
681
+ __fast_forward<_ArgTypes>...);
682
+
683
+ __Call __call_;
684
+
685
+ // Creates an invoker that throws bad_function_call.
686
+ _LIBCUDACXX_INLINE_VISIBILITY
687
+ __policy_invoker() : __call_(&__call_empty) {}
688
+
689
+ // Creates an invoker that calls the given instance of __func.
690
+ template <typename _Fun>
691
+ _LIBCUDACXX_INLINE_VISIBILITY static __policy_invoker __create()
692
+ {
693
+ return __policy_invoker(&__call_impl<_Fun>);
694
+ }
695
+
696
+ private:
697
+ _LIBCUDACXX_INLINE_VISIBILITY
698
+ explicit __policy_invoker(__Call __c) : __call_(__c) {}
699
+
700
+ static _Rp __call_empty(const __policy_storage*,
701
+ __fast_forward<_ArgTypes>...)
702
+ {
703
+ __throw_bad_function_call();
704
+ }
705
+
706
+ template <typename _Fun>
707
+ static _Rp __call_impl(const __policy_storage* __buf,
708
+ __fast_forward<_ArgTypes>... __args)
709
+ {
710
+ _Fun* __f = reinterpret_cast<_Fun*>(__use_small_storage<_Fun>::value
711
+ ? &__buf->__small
712
+ : __buf->__large);
713
+ return (*__f)(_CUDA_VSTD::forward<_ArgTypes>(__args)...);
714
+ }
715
+ };
716
+
717
+ // __policy_func uses a __policy and __policy_invoker to create a type-erased,
718
+ // copyable functor.
719
+
720
+ template <class _Fp> class __policy_func;
721
+
722
+ template <class _Rp, class... _ArgTypes> class __policy_func<_Rp(_ArgTypes...)>
723
+ {
724
+ // Inline storage for small objects.
725
+ __policy_storage __buf_;
726
+
727
+ // Calls the value stored in __buf_. This could technically be part of
728
+ // policy, but storing it here eliminates a level of indirection inside
729
+ // operator().
730
+ typedef __function::__policy_invoker<_Rp(_ArgTypes...)> __invoker;
731
+ __invoker __invoker_;
732
+
733
+ // The policy that describes how to move / copy / destroy __buf_. Never
734
+ // null, even if the function is empty.
735
+ const __policy* __policy_;
736
+
737
+ public:
738
+ _LIBCUDACXX_INLINE_VISIBILITY
739
+ __policy_func() : __policy_(__policy::__create_empty()) {}
740
+
741
+ template <class _Fp, class _Alloc>
742
+ _LIBCUDACXX_INLINE_VISIBILITY __policy_func(_Fp&& __f, const _Alloc& __a)
743
+ : __policy_(__policy::__create_empty())
744
+ {
745
+ typedef __alloc_func<_Fp, _Alloc, _Rp(_ArgTypes...)> _Fun;
746
+ typedef allocator_traits<_Alloc> __alloc_traits;
747
+ typedef typename __rebind_alloc_helper<__alloc_traits, _Fun>::type _FunAlloc;
748
+
749
+ if (__function::__not_null(__f))
750
+ {
751
+ __invoker_ = __invoker::template __create<_Fun>();
752
+ __policy_ = __policy::__create<_Fun>();
753
+
754
+ _FunAlloc __af(__a);
755
+ if (__use_small_storage<_Fun>())
756
+ {
757
+ ::new ((void*)&__buf_.__small)
758
+ _Fun(_CUDA_VSTD::move(__f), _Alloc(__af));
759
+ }
760
+ else
761
+ {
762
+ typedef __allocator_destructor<_FunAlloc> _Dp;
763
+ unique_ptr<_Fun, _Dp> __hold(__af.allocate(1), _Dp(__af, 1));
764
+ ::new ((void*)__hold.get())
765
+ _Fun(_CUDA_VSTD::move(__f), _Alloc(__af));
766
+ __buf_.__large = __hold.release();
767
+ }
768
+ }
769
+ }
770
+
771
+ template <class _Fp, class = __enable_if_t<!is_same<__decay_t<_Fp>, __policy_func>::value>>
772
+ _LIBCUDACXX_INLINE_VISIBILITY explicit __policy_func(_Fp&& __f)
773
+ : __policy_(__policy::__create_empty()) {
774
+ typedef __default_alloc_func<_Fp, _Rp(_ArgTypes...)> _Fun;
775
+
776
+ if (__function::__not_null(__f)) {
777
+ __invoker_ = __invoker::template __create<_Fun>();
778
+ __policy_ = __policy::__create<_Fun>();
779
+ if (__use_small_storage<_Fun>()) {
780
+ ::new ((void*)&__buf_.__small) _Fun(_CUDA_VSTD::move(__f));
781
+ } else {
782
+ __builtin_new_allocator::__holder_t __hold =
783
+ __builtin_new_allocator::__allocate_type<_Fun>(1);
784
+ __buf_.__large = ::new ((void*)__hold.get()) _Fun(_CUDA_VSTD::move(__f));
785
+ (void)__hold.release();
786
+ }
787
+ }
788
+ }
789
+
790
+ _LIBCUDACXX_INLINE_VISIBILITY
791
+ __policy_func(const __policy_func& __f)
792
+ : __buf_(__f.__buf_), __invoker_(__f.__invoker_),
793
+ __policy_(__f.__policy_)
794
+ {
795
+ if (__policy_->__clone)
796
+ __buf_.__large = __policy_->__clone(__f.__buf_.__large);
797
+ }
798
+
799
+ _LIBCUDACXX_INLINE_VISIBILITY
800
+ __policy_func(__policy_func&& __f)
801
+ : __buf_(__f.__buf_), __invoker_(__f.__invoker_),
802
+ __policy_(__f.__policy_)
803
+ {
804
+ if (__policy_->__destroy)
805
+ {
806
+ __f.__policy_ = __policy::__create_empty();
807
+ __f.__invoker_ = __invoker();
808
+ }
809
+ }
810
+
811
+ _LIBCUDACXX_INLINE_VISIBILITY
812
+ ~__policy_func()
813
+ {
814
+ if (__policy_->__destroy)
815
+ __policy_->__destroy(__buf_.__large);
816
+ }
817
+
818
+ _LIBCUDACXX_INLINE_VISIBILITY
819
+ __policy_func& operator=(__policy_func&& __f)
820
+ {
821
+ *this = nullptr;
822
+ __buf_ = __f.__buf_;
823
+ __invoker_ = __f.__invoker_;
824
+ __policy_ = __f.__policy_;
825
+ __f.__policy_ = __policy::__create_empty();
826
+ __f.__invoker_ = __invoker();
827
+ return *this;
828
+ }
829
+
830
+ _LIBCUDACXX_INLINE_VISIBILITY
831
+ __policy_func& operator=(nullptr_t)
832
+ {
833
+ const __policy* __p = __policy_;
834
+ __policy_ = __policy::__create_empty();
835
+ __invoker_ = __invoker();
836
+ if (__p->__destroy)
837
+ __p->__destroy(__buf_.__large);
838
+ return *this;
839
+ }
840
+
841
+ _LIBCUDACXX_INLINE_VISIBILITY
842
+ _Rp operator()(_ArgTypes&&... __args) const
843
+ {
844
+ return __invoker_.__call_(_CUDA_VSTD::addressof(__buf_),
845
+ _CUDA_VSTD::forward<_ArgTypes>(__args)...);
846
+ }
847
+
848
+ _LIBCUDACXX_INLINE_VISIBILITY
849
+ void swap(__policy_func& __f)
850
+ {
851
+ _CUDA_VSTD::swap(__invoker_, __f.__invoker_);
852
+ _CUDA_VSTD::swap(__policy_, __f.__policy_);
853
+ _CUDA_VSTD::swap(__buf_, __f.__buf_);
854
+ }
855
+
856
+ _LIBCUDACXX_INLINE_VISIBILITY
857
+ explicit operator bool() const _NOEXCEPT
858
+ {
859
+ return !__policy_->__is_null;
860
+ }
861
+
862
+ #ifndef _LIBCUDACXX_NO_RTTI
863
+ _LIBCUDACXX_INLINE_VISIBILITY
864
+ const type_info& target_type() const _NOEXCEPT
865
+ {
866
+ return *__policy_->__type_info;
867
+ }
868
+
869
+ template <typename _Tp>
870
+ _LIBCUDACXX_INLINE_VISIBILITY const _Tp* target() const _NOEXCEPT
871
+ {
872
+ if (__policy_->__is_null || typeid(_Tp) != *__policy_->__type_info)
873
+ return nullptr;
874
+ if (__policy_->__clone) // Out of line storage.
875
+ return reinterpret_cast<const _Tp*>(__buf_.__large);
876
+ else
877
+ return reinterpret_cast<const _Tp*>(&__buf_.__small);
878
+ }
879
+ #endif // _LIBCUDACXX_NO_RTTI
880
+ };
881
+
882
+ #if defined(_LIBCUDACXX_HAS_BLOCKS_RUNTIME)
883
+
884
+ extern "C" void *_Block_copy(const void *);
885
+ extern "C" void _Block_release(const void *);
886
+
887
+ template<class _Rp1, class ..._ArgTypes1, class _Alloc, class _Rp, class ..._ArgTypes>
888
+ class __func<_Rp1(^)(_ArgTypes1...), _Alloc, _Rp(_ArgTypes...)>
889
+ : public __base<_Rp(_ArgTypes...)>
890
+ {
891
+ typedef _Rp1(^__block_type)(_ArgTypes1...);
892
+ __block_type __f_;
893
+
894
+ public:
895
+ _LIBCUDACXX_INLINE_VISIBILITY
896
+ explicit __func(__block_type const& __f)
897
+ #ifdef _LIBCUDACXX_HAS_OBJC_ARC
898
+ : __f_(__f)
899
+ #else
900
+ : __f_(reinterpret_cast<__block_type>(__f ? _Block_copy(__f) : nullptr))
901
+ #endif
902
+ { }
903
+
904
+ // [TODO] add && to save on a retain
905
+
906
+ _LIBCUDACXX_INLINE_VISIBILITY
907
+ explicit __func(__block_type __f, const _Alloc& /* unused */)
908
+ #ifdef _LIBCUDACXX_HAS_OBJC_ARC
909
+ : __f_(__f)
910
+ #else
911
+ : __f_(reinterpret_cast<__block_type>(__f ? _Block_copy(__f) : nullptr))
912
+ #endif
913
+ { }
914
+
915
+ virtual __base<_Rp(_ArgTypes...)>* __clone() const {
916
+ _LIBCUDACXX_ASSERT(false,
917
+ "Block pointers are just pointers, so they should always fit into "
918
+ "std::function's small buffer optimization. This function should "
919
+ "never be invoked.");
920
+ return nullptr;
921
+ }
922
+
923
+ virtual void __clone(__base<_Rp(_ArgTypes...)>* __p) const {
924
+ ::new ((void*)__p) __func(__f_);
925
+ }
926
+
927
+ virtual void destroy() _NOEXCEPT {
928
+ #ifndef _LIBCUDACXX_HAS_OBJC_ARC
929
+ if (__f_)
930
+ _Block_release(__f_);
931
+ #endif
932
+ __f_ = 0;
933
+ }
934
+
935
+ virtual void destroy_deallocate() _NOEXCEPT {
936
+ _LIBCUDACXX_ASSERT(false,
937
+ "Block pointers are just pointers, so they should always fit into "
938
+ "std::function's small buffer optimization. This function should "
939
+ "never be invoked.");
940
+ }
941
+
942
+ virtual _Rp operator()(_ArgTypes&& ... __arg) {
943
+ return _CUDA_VSTD::__invoke(__f_, _CUDA_VSTD::forward<_ArgTypes>(__arg)...);
944
+ }
945
+
946
+ #ifndef _LIBCUDACXX_NO_RTTI
947
+ virtual const void* target(type_info const& __ti) const _NOEXCEPT {
948
+ if (__ti == typeid(__func::__block_type))
949
+ return &__f_;
950
+ return (const void*)nullptr;
951
+ }
952
+
953
+ virtual const type_info& target_type() const _NOEXCEPT {
954
+ return typeid(__func::__block_type);
955
+ }
956
+ #endif // _LIBCUDACXX_NO_RTTI
957
+ };
958
+
959
+ #endif // _LIBCUDACXX_HAS_EXTENSION_BLOCKS
960
+
961
+ } // namespace __function
962
+
963
+ template<class _Rp, class ..._ArgTypes>
964
+ class _LIBCUDACXX_TEMPLATE_VIS function<_Rp(_ArgTypes...)>
965
+ : public __function::__maybe_derive_from_unary_function<_Rp(_ArgTypes...)>,
966
+ public __function::__maybe_derive_from_binary_function<_Rp(_ArgTypes...)>
967
+ {
968
+ #ifndef _LIBCUDACXX_ABI_OPTIMIZED_FUNCTION
969
+ typedef __function::__value_func<_Rp(_ArgTypes...)> __func;
970
+ #else
971
+ typedef __function::__policy_func<_Rp(_ArgTypes...)> __func;
972
+ #endif
973
+
974
+ __func __f_;
975
+
976
+ template <class _Fp, bool = _And<
977
+ _IsNotSame<__remove_cvref_t<_Fp>, function>,
978
+ __invokable<_Fp, _ArgTypes...>
979
+ >::value>
980
+ struct __callable;
981
+ template <class _Fp>
982
+ struct __callable<_Fp, true>
983
+ {
984
+ static const bool value = is_void<_Rp>::value ||
985
+ __is_core_convertible<typename __invoke_of<_Fp, _ArgTypes...>::type,
986
+ _Rp>::value;
987
+ };
988
+ template <class _Fp>
989
+ struct __callable<_Fp, false>
990
+ {
991
+ static const bool value = false;
992
+ };
993
+
994
+ template <class _Fp>
995
+ using _EnableIfLValueCallable = __enable_if_t<__callable<_Fp&>::value>;
996
+ public:
997
+ typedef _Rp result_type;
998
+
999
+ // construct/copy/destroy:
1000
+ _LIBCUDACXX_INLINE_VISIBILITY
1001
+ function() _NOEXCEPT { }
1002
+ _LIBCUDACXX_INLINE_VISIBILITY
1003
+ function(nullptr_t) _NOEXCEPT {}
1004
+ function(const function&);
1005
+ function(function&&) _NOEXCEPT;
1006
+ template<class _Fp, class = _EnableIfLValueCallable<_Fp>>
1007
+ function(_Fp);
1008
+
1009
+ #if _LIBCUDACXX_STD_VER <= 14
1010
+ template<class _Alloc>
1011
+ _LIBCUDACXX_INLINE_VISIBILITY
1012
+ function(allocator_arg_t, const _Alloc&) _NOEXCEPT {}
1013
+ template<class _Alloc>
1014
+ _LIBCUDACXX_INLINE_VISIBILITY
1015
+ function(allocator_arg_t, const _Alloc&, nullptr_t) _NOEXCEPT {}
1016
+ template<class _Alloc>
1017
+ function(allocator_arg_t, const _Alloc&, const function&);
1018
+ template<class _Alloc>
1019
+ function(allocator_arg_t, const _Alloc&, function&&);
1020
+ template<class _Fp, class _Alloc, class = _EnableIfLValueCallable<_Fp>>
1021
+ function(allocator_arg_t, const _Alloc& __a, _Fp __f);
1022
+ #endif
1023
+
1024
+ function& operator=(const function&);
1025
+ function& operator=(function&&) _NOEXCEPT;
1026
+ function& operator=(nullptr_t) _NOEXCEPT;
1027
+ template<class _Fp, class = _EnableIfLValueCallable<__decay_t<_Fp>>>
1028
+ function& operator=(_Fp&&);
1029
+
1030
+ ~function();
1031
+
1032
+ // function modifiers:
1033
+ void swap(function&) _NOEXCEPT;
1034
+
1035
+ #if _LIBCUDACXX_STD_VER <= 14
1036
+ template<class _Fp, class _Alloc>
1037
+ _LIBCUDACXX_INLINE_VISIBILITY
1038
+ void assign(_Fp&& __f, const _Alloc& __a)
1039
+ {function(allocator_arg, __a, _CUDA_VSTD::forward<_Fp>(__f)).swap(*this);}
1040
+ #endif
1041
+
1042
+ // function capacity:
1043
+ _LIBCUDACXX_INLINE_VISIBILITY
1044
+ _LIBCUDACXX_EXPLICIT operator bool() const _NOEXCEPT {
1045
+ return static_cast<bool>(__f_);
1046
+ }
1047
+
1048
+ // deleted overloads close possible hole in the type system
1049
+ template<class _R2, class... _ArgTypes2>
1050
+ bool operator==(const function<_R2(_ArgTypes2...)>&) const = delete;
1051
+ template<class _R2, class... _ArgTypes2>
1052
+ bool operator!=(const function<_R2(_ArgTypes2...)>&) const = delete;
1053
+ public:
1054
+ // function invocation:
1055
+ _Rp operator()(_ArgTypes...) const;
1056
+
1057
+ #ifndef _LIBCUDACXX_NO_RTTI
1058
+ // function target access:
1059
+ const type_info& target_type() const _NOEXCEPT;
1060
+ template <typename _Tp> _Tp* target() _NOEXCEPT;
1061
+ template <typename _Tp> const _Tp* target() const _NOEXCEPT;
1062
+ #endif // _LIBCUDACXX_NO_RTTI
1063
+ };
1064
+
1065
+ #if _LIBCUDACXX_STD_VER > 14
1066
+ template<class _Rp, class ..._Ap>
1067
+ function(_Rp(*)(_Ap...)) -> function<_Rp(_Ap...)>;
1068
+
1069
+ template<class _Fp>
1070
+ struct __strip_signature;
1071
+
1072
+ template<class _Rp, class _Gp, class ..._Ap>
1073
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...)> { using type = _Rp(_Ap...); };
1074
+ template<class _Rp, class _Gp, class ..._Ap>
1075
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...) const> { using type = _Rp(_Ap...); };
1076
+ template<class _Rp, class _Gp, class ..._Ap>
1077
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...) volatile> { using type = _Rp(_Ap...); };
1078
+ template<class _Rp, class _Gp, class ..._Ap>
1079
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...) const volatile> { using type = _Rp(_Ap...); };
1080
+
1081
+ template<class _Rp, class _Gp, class ..._Ap>
1082
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...) &> { using type = _Rp(_Ap...); };
1083
+ template<class _Rp, class _Gp, class ..._Ap>
1084
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...) const &> { using type = _Rp(_Ap...); };
1085
+ template<class _Rp, class _Gp, class ..._Ap>
1086
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...) volatile &> { using type = _Rp(_Ap...); };
1087
+ template<class _Rp, class _Gp, class ..._Ap>
1088
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...) const volatile &> { using type = _Rp(_Ap...); };
1089
+
1090
+ template<class _Rp, class _Gp, class ..._Ap>
1091
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...) noexcept> { using type = _Rp(_Ap...); };
1092
+ template<class _Rp, class _Gp, class ..._Ap>
1093
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...) const noexcept> { using type = _Rp(_Ap...); };
1094
+ template<class _Rp, class _Gp, class ..._Ap>
1095
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...) volatile noexcept> { using type = _Rp(_Ap...); };
1096
+ template<class _Rp, class _Gp, class ..._Ap>
1097
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...) const volatile noexcept> { using type = _Rp(_Ap...); };
1098
+
1099
+ template<class _Rp, class _Gp, class ..._Ap>
1100
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...) & noexcept> { using type = _Rp(_Ap...); };
1101
+ template<class _Rp, class _Gp, class ..._Ap>
1102
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...) const & noexcept> { using type = _Rp(_Ap...); };
1103
+ template<class _Rp, class _Gp, class ..._Ap>
1104
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...) volatile & noexcept> { using type = _Rp(_Ap...); };
1105
+ template<class _Rp, class _Gp, class ..._Ap>
1106
+ struct __strip_signature<_Rp (_Gp::*) (_Ap...) const volatile & noexcept> { using type = _Rp(_Ap...); };
1107
+
1108
+ template<class _Fp, class _Stripped = typename __strip_signature<decltype(&_Fp::operator())>::type>
1109
+ function(_Fp) -> function<_Stripped>;
1110
+ #endif // _LIBCUDACXX_STD_VER > 14
1111
+
1112
+ template<class _Rp, class ..._ArgTypes>
1113
+ function<_Rp(_ArgTypes...)>::function(const function& __f) : __f_(__f.__f_) {}
1114
+
1115
+ #if _LIBCUDACXX_STD_VER <= 14
1116
+ template<class _Rp, class ..._ArgTypes>
1117
+ template <class _Alloc>
1118
+ function<_Rp(_ArgTypes...)>::function(allocator_arg_t, const _Alloc&,
1119
+ const function& __f) : __f_(__f.__f_) {}
1120
+ #endif
1121
+
1122
+ template <class _Rp, class... _ArgTypes>
1123
+ function<_Rp(_ArgTypes...)>::function(function&& __f) _NOEXCEPT
1124
+ : __f_(_CUDA_VSTD::move(__f.__f_)) {}
1125
+
1126
+ #if _LIBCUDACXX_STD_VER <= 14
1127
+ template<class _Rp, class ..._ArgTypes>
1128
+ template <class _Alloc>
1129
+ function<_Rp(_ArgTypes...)>::function(allocator_arg_t, const _Alloc&,
1130
+ function&& __f)
1131
+ : __f_(_CUDA_VSTD::move(__f.__f_)) {}
1132
+ #endif
1133
+
1134
+ template <class _Rp, class... _ArgTypes>
1135
+ template <class _Fp, class>
1136
+ function<_Rp(_ArgTypes...)>::function(_Fp __f) : __f_(_CUDA_VSTD::move(__f)) {}
1137
+
1138
+ #if _LIBCUDACXX_STD_VER <= 14
1139
+ template <class _Rp, class... _ArgTypes>
1140
+ template <class _Fp, class _Alloc, class>
1141
+ function<_Rp(_ArgTypes...)>::function(allocator_arg_t, const _Alloc& __a,
1142
+ _Fp __f)
1143
+ : __f_(_CUDA_VSTD::move(__f), __a) {}
1144
+ #endif
1145
+
1146
+ template<class _Rp, class ..._ArgTypes>
1147
+ function<_Rp(_ArgTypes...)>&
1148
+ function<_Rp(_ArgTypes...)>::operator=(const function& __f)
1149
+ {
1150
+ function(__f).swap(*this);
1151
+ return *this;
1152
+ }
1153
+
1154
+ template<class _Rp, class ..._ArgTypes>
1155
+ function<_Rp(_ArgTypes...)>&
1156
+ function<_Rp(_ArgTypes...)>::operator=(function&& __f) _NOEXCEPT
1157
+ {
1158
+ __f_ = _CUDA_VSTD::move(__f.__f_);
1159
+ return *this;
1160
+ }
1161
+
1162
+ template<class _Rp, class ..._ArgTypes>
1163
+ function<_Rp(_ArgTypes...)>&
1164
+ function<_Rp(_ArgTypes...)>::operator=(nullptr_t) _NOEXCEPT
1165
+ {
1166
+ __f_ = nullptr;
1167
+ return *this;
1168
+ }
1169
+
1170
+ template<class _Rp, class ..._ArgTypes>
1171
+ template <class _Fp, class>
1172
+ function<_Rp(_ArgTypes...)>&
1173
+ function<_Rp(_ArgTypes...)>::operator=(_Fp&& __f)
1174
+ {
1175
+ function(_CUDA_VSTD::forward<_Fp>(__f)).swap(*this);
1176
+ return *this;
1177
+ }
1178
+
1179
+ template<class _Rp, class ..._ArgTypes>
1180
+ function<_Rp(_ArgTypes...)>::~function() {}
1181
+
1182
+ template<class _Rp, class ..._ArgTypes>
1183
+ void
1184
+ function<_Rp(_ArgTypes...)>::swap(function& __f) _NOEXCEPT
1185
+ {
1186
+ __f_.swap(__f.__f_);
1187
+ }
1188
+
1189
+ template<class _Rp, class ..._ArgTypes>
1190
+ _Rp
1191
+ function<_Rp(_ArgTypes...)>::operator()(_ArgTypes... __arg) const
1192
+ {
1193
+ return __f_(_CUDA_VSTD::forward<_ArgTypes>(__arg)...);
1194
+ }
1195
+
1196
+ #ifndef _LIBCUDACXX_NO_RTTI
1197
+
1198
+ template<class _Rp, class ..._ArgTypes>
1199
+ const type_info&
1200
+ function<_Rp(_ArgTypes...)>::target_type() const _NOEXCEPT
1201
+ {
1202
+ return __f_.target_type();
1203
+ }
1204
+
1205
+ template<class _Rp, class ..._ArgTypes>
1206
+ template <typename _Tp>
1207
+ _Tp*
1208
+ function<_Rp(_ArgTypes...)>::target() _NOEXCEPT
1209
+ {
1210
+ return (_Tp*)(__f_.template target<_Tp>());
1211
+ }
1212
+
1213
+ template<class _Rp, class ..._ArgTypes>
1214
+ template <typename _Tp>
1215
+ const _Tp*
1216
+ function<_Rp(_ArgTypes...)>::target() const _NOEXCEPT
1217
+ {
1218
+ return __f_.template target<_Tp>();
1219
+ }
1220
+
1221
+ #endif // _LIBCUDACXX_NO_RTTI
1222
+
1223
+ template <class _Rp, class... _ArgTypes>
1224
+ inline _LIBCUDACXX_INLINE_VISIBILITY
1225
+ bool
1226
+ operator==(const function<_Rp(_ArgTypes...)>& __f, nullptr_t) _NOEXCEPT {return !__f;}
1227
+
1228
+ template <class _Rp, class... _ArgTypes>
1229
+ inline _LIBCUDACXX_INLINE_VISIBILITY
1230
+ bool
1231
+ operator==(nullptr_t, const function<_Rp(_ArgTypes...)>& __f) _NOEXCEPT {return !__f;}
1232
+
1233
+ template <class _Rp, class... _ArgTypes>
1234
+ inline _LIBCUDACXX_INLINE_VISIBILITY
1235
+ bool
1236
+ operator!=(const function<_Rp(_ArgTypes...)>& __f, nullptr_t) _NOEXCEPT {return (bool)__f;}
1237
+
1238
+ template <class _Rp, class... _ArgTypes>
1239
+ inline _LIBCUDACXX_INLINE_VISIBILITY
1240
+ bool
1241
+ operator!=(nullptr_t, const function<_Rp(_ArgTypes...)>& __f) _NOEXCEPT {return (bool)__f;}
1242
+
1243
+ template <class _Rp, class... _ArgTypes>
1244
+ inline _LIBCUDACXX_INLINE_VISIBILITY
1245
+ void
1246
+ swap(function<_Rp(_ArgTypes...)>& __x, function<_Rp(_ArgTypes...)>& __y) _NOEXCEPT
1247
+ {return __x.swap(__y);}
1248
+
1249
+ #else // _LIBCUDACXX_CXX03_LANG
1250
+
1251
+ #include <__functional_03>
1252
+
1253
+ #endif
1254
+
1255
+ _LIBCUDACXX_END_NAMESPACE_STD
1256
+
1257
+ #endif // _LIBCUDACXX_CXX03_LANG
1258
+
1259
+ #endif // __cuda_std__
1260
+
1261
+ #endif // _LIBCUDACXX___FUNCTIONAL_FUNCTION_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/hash.h ADDED
@@ -0,0 +1,692 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___FUNCTIONAL_HASH_H
11
+ #define _LIBCUDACXX___FUNCTIONAL_HASH_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #include <cstring>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__functional/invoke.h"
19
+ #include "../__functional/unary_function.h"
20
+ #include "../__fwd/hash.h"
21
+ #include "../__type_traits/enable_if.h"
22
+ #include "../__type_traits/integral_constant.h"
23
+ #include "../__type_traits/is_enum.h"
24
+ #include "../__type_traits/is_copy_constructible.h"
25
+ #include "../__type_traits/is_default_constructible.h"
26
+ #include "../__type_traits/is_move_constructible.h"
27
+ #include "../__type_traits/underlying_type.h"
28
+ #include "../__utility/forward.h"
29
+ #include "../__utility/move.h"
30
+ #include "../__utility/pair.h"
31
+ #include "../__utility/swap.h"
32
+ #include "../cstdint"
33
+
34
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
35
+ #pragma GCC system_header
36
+ #endif
37
+
38
+ #ifndef __cuda_std__
39
+
40
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
41
+
42
+ template <class _Size>
43
+ inline _LIBCUDACXX_INLINE_VISIBILITY
44
+ _Size
45
+ __loadword(const void* __p)
46
+ {
47
+ _Size __r;
48
+ std::memcpy(&__r, __p, sizeof(__r));
49
+ return __r;
50
+ }
51
+
52
+ // We use murmur2 when size_t is 32 bits, and cityhash64 when size_t
53
+ // is 64 bits. This is because cityhash64 uses 64bit x 64bit
54
+ // multiplication, which can be very slow on 32-bit systems.
55
+ template <class _Size, size_t = sizeof(_Size)*__CHAR_BIT__>
56
+ struct __murmur2_or_cityhash;
57
+
58
+ template <class _Size>
59
+ struct __murmur2_or_cityhash<_Size, 32>
60
+ {
61
+ inline _Size operator()(const void* __key, _Size __len)
62
+ _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK;
63
+ };
64
+
65
+ // murmur2
66
+ template <class _Size>
67
+ _Size
68
+ __murmur2_or_cityhash<_Size, 32>::operator()(const void* __key, _Size __len)
69
+ {
70
+ const _Size __m = 0x5bd1e995;
71
+ const _Size __r = 24;
72
+ _Size __h = __len;
73
+ const unsigned char* __data = static_cast<const unsigned char*>(__key);
74
+ for (; __len >= 4; __data += 4, __len -= 4)
75
+ {
76
+ _Size __k = __loadword<_Size>(__data);
77
+ __k *= __m;
78
+ __k ^= __k >> __r;
79
+ __k *= __m;
80
+ __h *= __m;
81
+ __h ^= __k;
82
+ }
83
+ switch (__len)
84
+ {
85
+ case 3:
86
+ __h ^= static_cast<_Size>(__data[2] << 16);
87
+ _LIBCUDACXX_FALLTHROUGH();
88
+ case 2:
89
+ __h ^= static_cast<_Size>(__data[1] << 8);
90
+ _LIBCUDACXX_FALLTHROUGH();
91
+ case 1:
92
+ __h ^= __data[0];
93
+ __h *= __m;
94
+ }
95
+ __h ^= __h >> 13;
96
+ __h *= __m;
97
+ __h ^= __h >> 15;
98
+ return __h;
99
+ }
100
+
101
+ template <class _Size>
102
+ struct __murmur2_or_cityhash<_Size, 64>
103
+ {
104
+ inline _Size operator()(const void* __key, _Size __len) _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK;
105
+
106
+ private:
107
+ // Some primes between 2^63 and 2^64.
108
+ static const _Size __k0 = 0xc3a5c85c97cb3127ULL;
109
+ static const _Size __k1 = 0xb492b66fbe98f273ULL;
110
+ static const _Size __k2 = 0x9ae16a3b2f90404fULL;
111
+ static const _Size __k3 = 0xc949d7c7509e6557ULL;
112
+
113
+ static _Size __rotate(_Size __val, int __shift) {
114
+ return __shift == 0 ? __val : ((__val >> __shift) | (__val << (64 - __shift)));
115
+ }
116
+
117
+ static _Size __rotate_by_at_least_1(_Size __val, int __shift) {
118
+ return (__val >> __shift) | (__val << (64 - __shift));
119
+ }
120
+
121
+ static _Size __shift_mix(_Size __val) {
122
+ return __val ^ (__val >> 47);
123
+ }
124
+
125
+ static _Size __hash_len_16(_Size __u, _Size __v)
126
+ _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK
127
+ {
128
+ const _Size __mul = 0x9ddfea08eb382d69ULL;
129
+ _Size __a = (__u ^ __v) * __mul;
130
+ __a ^= (__a >> 47);
131
+ _Size __b = (__v ^ __a) * __mul;
132
+ __b ^= (__b >> 47);
133
+ __b *= __mul;
134
+ return __b;
135
+ }
136
+
137
+ static _Size __hash_len_0_to_16(const char* __s, _Size __len)
138
+ _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK
139
+ {
140
+ if (__len > 8) {
141
+ const _Size __a = __loadword<_Size>(__s);
142
+ const _Size __b = __loadword<_Size>(__s + __len - 8);
143
+ return __hash_len_16(__a, __rotate_by_at_least_1(__b + __len, __len)) ^ __b;
144
+ }
145
+ if (__len >= 4) {
146
+ const uint32_t __a = __loadword<uint32_t>(__s);
147
+ const uint32_t __b = __loadword<uint32_t>(__s + __len - 4);
148
+ return __hash_len_16(__len + (static_cast<_Size>(__a) << 3), __b);
149
+ }
150
+ if (__len > 0) {
151
+ const unsigned char __a = static_cast<unsigned char>(__s[0]);
152
+ const unsigned char __b = static_cast<unsigned char>(__s[__len >> 1]);
153
+ const unsigned char __c = static_cast<unsigned char>(__s[__len - 1]);
154
+ const uint32_t __y = static_cast<uint32_t>(__a) +
155
+ (static_cast<uint32_t>(__b) << 8);
156
+ const uint32_t __z = __len + (static_cast<uint32_t>(__c) << 2);
157
+ return __shift_mix(__y * __k2 ^ __z * __k3) * __k2;
158
+ }
159
+ return __k2;
160
+ }
161
+
162
+ static _Size __hash_len_17_to_32(const char *__s, _Size __len)
163
+ _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK
164
+ {
165
+ const _Size __a = __loadword<_Size>(__s) * __k1;
166
+ const _Size __b = __loadword<_Size>(__s + 8);
167
+ const _Size __c = __loadword<_Size>(__s + __len - 8) * __k2;
168
+ const _Size __d = __loadword<_Size>(__s + __len - 16) * __k0;
169
+ return __hash_len_16(__rotate(__a - __b, 43) + __rotate(__c, 30) + __d,
170
+ __a + __rotate(__b ^ __k3, 20) - __c + __len);
171
+ }
172
+
173
+ // Return a 16-byte hash for 48 bytes. Quick and dirty.
174
+ // Callers do best to use "random-looking" values for a and b.
175
+ static pair<_Size, _Size> __weak_hash_len_32_with_seeds(
176
+ _Size __w, _Size __x, _Size __y, _Size __z, _Size __a, _Size __b)
177
+ _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK
178
+ {
179
+ __a += __w;
180
+ __b = __rotate(__b + __a + __z, 21);
181
+ const _Size __c = __a;
182
+ __a += __x;
183
+ __a += __y;
184
+ __b += __rotate(__a, 44);
185
+ return pair<_Size, _Size>(__a + __z, __b + __c);
186
+ }
187
+
188
+ // Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty.
189
+ static pair<_Size, _Size> __weak_hash_len_32_with_seeds(
190
+ const char* __s, _Size __a, _Size __b)
191
+ _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK
192
+ {
193
+ return __weak_hash_len_32_with_seeds(__loadword<_Size>(__s),
194
+ __loadword<_Size>(__s + 8),
195
+ __loadword<_Size>(__s + 16),
196
+ __loadword<_Size>(__s + 24),
197
+ __a,
198
+ __b);
199
+ }
200
+
201
+ // Return an 8-byte hash for 33 to 64 bytes.
202
+ static _Size __hash_len_33_to_64(const char *__s, size_t __len)
203
+ _LIBCUDACXX_DISABLE_UBSAN_UNSIGNED_INTEGER_CHECK
204
+ {
205
+ _Size __z = __loadword<_Size>(__s + 24);
206
+ _Size __a = __loadword<_Size>(__s) +
207
+ (__len + __loadword<_Size>(__s + __len - 16)) * __k0;
208
+ _Size __b = __rotate(__a + __z, 52);
209
+ _Size __c = __rotate(__a, 37);
210
+ __a += __loadword<_Size>(__s + 8);
211
+ __c += __rotate(__a, 7);
212
+ __a += __loadword<_Size>(__s + 16);
213
+ _Size __vf = __a + __z;
214
+ _Size __vs = __b + __rotate(__a, 31) + __c;
215
+ __a = __loadword<_Size>(__s + 16) + __loadword<_Size>(__s + __len - 32);
216
+ __z += __loadword<_Size>(__s + __len - 8);
217
+ __b = __rotate(__a + __z, 52);
218
+ __c = __rotate(__a, 37);
219
+ __a += __loadword<_Size>(__s + __len - 24);
220
+ __c += __rotate(__a, 7);
221
+ __a += __loadword<_Size>(__s + __len - 16);
222
+ _Size __wf = __a + __z;
223
+ _Size __ws = __b + __rotate(__a, 31) + __c;
224
+ _Size __r = __shift_mix((__vf + __ws) * __k2 + (__wf + __vs) * __k0);
225
+ return __shift_mix(__r * __k0 + __vs) * __k2;
226
+ }
227
+ };
228
+
229
+ // cityhash64
230
+ template <class _Size>
231
+ _Size
232
+ __murmur2_or_cityhash<_Size, 64>::operator()(const void* __key, _Size __len)
233
+ {
234
+ const char* __s = static_cast<const char*>(__key);
235
+ if (__len <= 32) {
236
+ if (__len <= 16) {
237
+ return __hash_len_0_to_16(__s, __len);
238
+ } else {
239
+ return __hash_len_17_to_32(__s, __len);
240
+ }
241
+ } else if (__len <= 64) {
242
+ return __hash_len_33_to_64(__s, __len);
243
+ }
244
+
245
+ // For strings over 64 bytes we hash the end first, and then as we
246
+ // loop we keep 56 bytes of state: v, w, x, y, and z.
247
+ _Size __x = __loadword<_Size>(__s + __len - 40);
248
+ _Size __y = __loadword<_Size>(__s + __len - 16) +
249
+ __loadword<_Size>(__s + __len - 56);
250
+ _Size __z = __hash_len_16(__loadword<_Size>(__s + __len - 48) + __len,
251
+ __loadword<_Size>(__s + __len - 24));
252
+ pair<_Size, _Size> __v = __weak_hash_len_32_with_seeds(__s + __len - 64, __len, __z);
253
+ pair<_Size, _Size> __w = __weak_hash_len_32_with_seeds(__s + __len - 32, __y + __k1, __x);
254
+ __x = __x * __k1 + __loadword<_Size>(__s);
255
+
256
+ // Decrease len to the nearest multiple of 64, and operate on 64-byte chunks.
257
+ __len = (__len - 1) & ~static_cast<_Size>(63);
258
+ do {
259
+ __x = __rotate(__x + __y + __v.first + __loadword<_Size>(__s + 8), 37) * __k1;
260
+ __y = __rotate(__y + __v.second + __loadword<_Size>(__s + 48), 42) * __k1;
261
+ __x ^= __w.second;
262
+ __y += __v.first + __loadword<_Size>(__s + 40);
263
+ __z = __rotate(__z + __w.first, 33) * __k1;
264
+ __v = __weak_hash_len_32_with_seeds(__s, __v.second * __k1, __x + __w.first);
265
+ __w = __weak_hash_len_32_with_seeds(__s + 32, __z + __w.second,
266
+ __y + __loadword<_Size>(__s + 16));
267
+ _CUDA_VSTD::swap(__z, __x);
268
+ __s += 64;
269
+ __len -= 64;
270
+ } while (__len != 0);
271
+ return __hash_len_16(
272
+ __hash_len_16(__v.first, __w.first) + __shift_mix(__y) * __k1 + __z,
273
+ __hash_len_16(__v.second, __w.second) + __x);
274
+ }
275
+
276
+ template <class _Tp, size_t = sizeof(_Tp) / sizeof(size_t)>
277
+ struct __scalar_hash;
278
+
279
+ template <class _Tp>
280
+ struct __scalar_hash<_Tp, 0>
281
+ : public __unary_function<_Tp, size_t>
282
+ {
283
+ _LIBCUDACXX_INLINE_VISIBILITY
284
+ size_t operator()(_Tp __v) const _NOEXCEPT
285
+ {
286
+ union
287
+ {
288
+ _Tp __t;
289
+ size_t __a;
290
+ } __u;
291
+ __u.__a = 0;
292
+ __u.__t = __v;
293
+ return __u.__a;
294
+ }
295
+ };
296
+
297
+ template <class _Tp>
298
+ struct __scalar_hash<_Tp, 1>
299
+ : public __unary_function<_Tp, size_t>
300
+ {
301
+ _LIBCUDACXX_INLINE_VISIBILITY
302
+ size_t operator()(_Tp __v) const _NOEXCEPT
303
+ {
304
+ union
305
+ {
306
+ _Tp __t;
307
+ size_t __a;
308
+ } __u;
309
+ __u.__t = __v;
310
+ return __u.__a;
311
+ }
312
+ };
313
+
314
+ template <class _Tp>
315
+ struct __scalar_hash<_Tp, 2>
316
+ : public __unary_function<_Tp, size_t>
317
+ {
318
+ _LIBCUDACXX_INLINE_VISIBILITY
319
+ size_t operator()(_Tp __v) const _NOEXCEPT
320
+ {
321
+ union
322
+ {
323
+ _Tp __t;
324
+ struct
325
+ {
326
+ size_t __a;
327
+ size_t __b;
328
+ } __s;
329
+ } __u;
330
+ __u.__t = __v;
331
+ return __murmur2_or_cityhash<size_t>()(&__u, sizeof(__u));
332
+ }
333
+ };
334
+
335
+ template <class _Tp>
336
+ struct __scalar_hash<_Tp, 3>
337
+ : public __unary_function<_Tp, size_t>
338
+ {
339
+ _LIBCUDACXX_INLINE_VISIBILITY
340
+ size_t operator()(_Tp __v) const _NOEXCEPT
341
+ {
342
+ union
343
+ {
344
+ _Tp __t;
345
+ struct
346
+ {
347
+ size_t __a;
348
+ size_t __b;
349
+ size_t __c;
350
+ } __s;
351
+ } __u;
352
+ __u.__t = __v;
353
+ return __murmur2_or_cityhash<size_t>()(&__u, sizeof(__u));
354
+ }
355
+ };
356
+
357
+ template <class _Tp>
358
+ struct __scalar_hash<_Tp, 4>
359
+ : public __unary_function<_Tp, size_t>
360
+ {
361
+ _LIBCUDACXX_INLINE_VISIBILITY
362
+ size_t operator()(_Tp __v) const _NOEXCEPT
363
+ {
364
+ union
365
+ {
366
+ _Tp __t;
367
+ struct
368
+ {
369
+ size_t __a;
370
+ size_t __b;
371
+ size_t __c;
372
+ size_t __d;
373
+ } __s;
374
+ } __u;
375
+ __u.__t = __v;
376
+ return __murmur2_or_cityhash<size_t>()(&__u, sizeof(__u));
377
+ }
378
+ };
379
+
380
+ struct _PairT {
381
+ size_t first;
382
+ size_t second;
383
+ };
384
+
385
+ _LIBCUDACXX_INLINE_VISIBILITY
386
+ inline size_t __hash_combine(size_t __lhs, size_t __rhs) _NOEXCEPT {
387
+ typedef __scalar_hash<_PairT> _HashT;
388
+ const _PairT __p = {__lhs, __rhs};
389
+ return _HashT()(__p);
390
+ }
391
+
392
+ template<class _Tp>
393
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<_Tp*>
394
+ : public __unary_function<_Tp*, size_t>
395
+ {
396
+ _LIBCUDACXX_INLINE_VISIBILITY
397
+ size_t operator()(_Tp* __v) const _NOEXCEPT
398
+ {
399
+ union
400
+ {
401
+ _Tp* __t;
402
+ size_t __a;
403
+ } __u;
404
+ __u.__t = __v;
405
+ return __murmur2_or_cityhash<size_t>()(&__u, sizeof(__u));
406
+ }
407
+ };
408
+
409
+ template <>
410
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<bool>
411
+ : public __unary_function<bool, size_t>
412
+ {
413
+ _LIBCUDACXX_INLINE_VISIBILITY
414
+ size_t operator()(bool __v) const _NOEXCEPT {return static_cast<size_t>(__v);}
415
+ };
416
+
417
+ template <>
418
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<char>
419
+ : public __unary_function<char, size_t>
420
+ {
421
+ _LIBCUDACXX_INLINE_VISIBILITY
422
+ size_t operator()(char __v) const _NOEXCEPT {return static_cast<size_t>(__v);}
423
+ };
424
+
425
+ template <>
426
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<signed char>
427
+ : public __unary_function<signed char, size_t>
428
+ {
429
+ _LIBCUDACXX_INLINE_VISIBILITY
430
+ size_t operator()(signed char __v) const _NOEXCEPT {return static_cast<size_t>(__v);}
431
+ };
432
+
433
+ template <>
434
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<unsigned char>
435
+ : public __unary_function<unsigned char, size_t>
436
+ {
437
+ _LIBCUDACXX_INLINE_VISIBILITY
438
+ size_t operator()(unsigned char __v) const _NOEXCEPT {return static_cast<size_t>(__v);}
439
+ };
440
+
441
+ #ifndef _LIBCUDACXX_HAS_NO_UNICODE_CHARS
442
+ template <>
443
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<char16_t>
444
+ : public __unary_function<char16_t, size_t>
445
+ {
446
+ _LIBCUDACXX_INLINE_VISIBILITY
447
+ size_t operator()(char16_t __v) const _NOEXCEPT {return static_cast<size_t>(__v);}
448
+ };
449
+
450
+ template <>
451
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<char32_t>
452
+ : public __unary_function<char32_t, size_t>
453
+ {
454
+ _LIBCUDACXX_INLINE_VISIBILITY
455
+ size_t operator()(char32_t __v) const _NOEXCEPT {return static_cast<size_t>(__v);}
456
+ };
457
+ #endif // _LIBCUDACXX_HAS_NO_UNICODE_CHARS
458
+
459
+ #ifndef _LIBCUDACXX_HAS_NO_WIDE_CHARACTERS
460
+ template <>
461
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<wchar_t>
462
+ : public __unary_function<wchar_t, size_t>
463
+ {
464
+ _LIBCUDACXX_INLINE_VISIBILITY
465
+ size_t operator()(wchar_t __v) const _NOEXCEPT {return static_cast<size_t>(__v);}
466
+ };
467
+ #endif // _LIBCUDACXX_HAS_NO_WIDE_CHARACTERS
468
+
469
+ template <>
470
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<short>
471
+ : public __unary_function<short, size_t>
472
+ {
473
+ _LIBCUDACXX_INLINE_VISIBILITY
474
+ size_t operator()(short __v) const _NOEXCEPT {return static_cast<size_t>(__v);}
475
+ };
476
+
477
+ template <>
478
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<unsigned short>
479
+ : public __unary_function<unsigned short, size_t>
480
+ {
481
+ _LIBCUDACXX_INLINE_VISIBILITY
482
+ size_t operator()(unsigned short __v) const _NOEXCEPT {return static_cast<size_t>(__v);}
483
+ };
484
+
485
+ template <>
486
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<int>
487
+ : public __unary_function<int, size_t>
488
+ {
489
+ _LIBCUDACXX_INLINE_VISIBILITY
490
+ size_t operator()(int __v) const _NOEXCEPT {return static_cast<size_t>(__v);}
491
+ };
492
+
493
+ template <>
494
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<unsigned int>
495
+ : public __unary_function<unsigned int, size_t>
496
+ {
497
+ _LIBCUDACXX_INLINE_VISIBILITY
498
+ size_t operator()(unsigned int __v) const _NOEXCEPT {return static_cast<size_t>(__v);}
499
+ };
500
+
501
+ template <>
502
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<long>
503
+ : public __unary_function<long, size_t>
504
+ {
505
+ _LIBCUDACXX_INLINE_VISIBILITY
506
+ size_t operator()(long __v) const _NOEXCEPT {return static_cast<size_t>(__v);}
507
+ };
508
+
509
+ template <>
510
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<unsigned long>
511
+ : public __unary_function<unsigned long, size_t>
512
+ {
513
+ _LIBCUDACXX_INLINE_VISIBILITY
514
+ size_t operator()(unsigned long __v) const _NOEXCEPT {return static_cast<size_t>(__v);}
515
+ };
516
+
517
+ template <>
518
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<long long>
519
+ : public __scalar_hash<long long>
520
+ {
521
+ };
522
+
523
+ template <>
524
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<unsigned long long>
525
+ : public __scalar_hash<unsigned long long>
526
+ {
527
+ };
528
+
529
+ #ifndef _LIBCUDACXX_HAS_NO_INT128
530
+
531
+ template <>
532
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<__int128_t>
533
+ : public __scalar_hash<__int128_t>
534
+ {
535
+ };
536
+
537
+ template <>
538
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<__uint128_t>
539
+ : public __scalar_hash<__uint128_t>
540
+ {
541
+ };
542
+
543
+ #endif
544
+
545
+ template <>
546
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<float>
547
+ : public __scalar_hash<float>
548
+ {
549
+ _LIBCUDACXX_INLINE_VISIBILITY
550
+ size_t operator()(float __v) const _NOEXCEPT
551
+ {
552
+ // -0.0 and 0.0 should return same hash
553
+ if (__v == 0.0f)
554
+ return 0;
555
+ return __scalar_hash<float>::operator()(__v);
556
+ }
557
+ };
558
+
559
+ template <>
560
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<double>
561
+ : public __scalar_hash<double>
562
+ {
563
+ _LIBCUDACXX_INLINE_VISIBILITY
564
+ size_t operator()(double __v) const _NOEXCEPT
565
+ {
566
+ // -0.0 and 0.0 should return same hash
567
+ if (__v == 0.0)
568
+ return 0;
569
+ return __scalar_hash<double>::operator()(__v);
570
+ }
571
+ };
572
+
573
+ template <>
574
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<long double>
575
+ : public __scalar_hash<long double>
576
+ {
577
+ _LIBCUDACXX_INLINE_VISIBILITY
578
+ size_t operator()(long double __v) const _NOEXCEPT
579
+ {
580
+ // -0.0 and 0.0 should return same hash
581
+ if (__v == 0.0L)
582
+ return 0;
583
+ #if defined(__i386__) || (defined(__x86_64__) && defined(__ILP32__))
584
+ // Zero out padding bits
585
+ union
586
+ {
587
+ long double __t;
588
+ struct
589
+ {
590
+ size_t __a;
591
+ size_t __b;
592
+ size_t __c;
593
+ size_t __d;
594
+ } __s;
595
+ } __u;
596
+ __u.__s.__a = 0;
597
+ __u.__s.__b = 0;
598
+ __u.__s.__c = 0;
599
+ __u.__s.__d = 0;
600
+ __u.__t = __v;
601
+ return __u.__s.__a ^ __u.__s.__b ^ __u.__s.__c ^ __u.__s.__d;
602
+ #elif defined(__x86_64__)
603
+ // Zero out padding bits
604
+ union
605
+ {
606
+ long double __t;
607
+ struct
608
+ {
609
+ size_t __a;
610
+ size_t __b;
611
+ } __s;
612
+ } __u;
613
+ __u.__s.__a = 0;
614
+ __u.__s.__b = 0;
615
+ __u.__t = __v;
616
+ return __u.__s.__a ^ __u.__s.__b;
617
+ #else
618
+ return __scalar_hash<long double>::operator()(__v);
619
+ #endif
620
+ }
621
+ };
622
+
623
+ template <class _Tp, bool = is_enum<_Tp>::value>
624
+ struct _LIBCUDACXX_TEMPLATE_VIS __enum_hash
625
+ : public __unary_function<_Tp, size_t>
626
+ {
627
+ _LIBCUDACXX_INLINE_VISIBILITY
628
+ size_t operator()(_Tp __v) const _NOEXCEPT
629
+ {
630
+ typedef typename underlying_type<_Tp>::type type;
631
+ return hash<type>()(static_cast<type>(__v));
632
+ }
633
+ };
634
+ template <class _Tp>
635
+ struct _LIBCUDACXX_TEMPLATE_VIS __enum_hash<_Tp, false> {
636
+ __enum_hash() = delete;
637
+ __enum_hash(__enum_hash const&) = delete;
638
+ __enum_hash& operator=(__enum_hash const&) = delete;
639
+ };
640
+
641
+ template <class _Tp>
642
+ struct _LIBCUDACXX_TEMPLATE_VIS hash : public __enum_hash<_Tp>
643
+ {
644
+ };
645
+
646
+ #if _LIBCUDACXX_STD_VER > 14
647
+
648
+ template <>
649
+ struct _LIBCUDACXX_TEMPLATE_VIS hash<nullptr_t>
650
+ : public __unary_function<nullptr_t, size_t>
651
+ {
652
+ _LIBCUDACXX_INLINE_VISIBILITY
653
+ size_t operator()(nullptr_t) const _NOEXCEPT {
654
+ return 662607004ull;
655
+ }
656
+ };
657
+ #endif
658
+
659
+ #ifndef _LIBCUDACXX_CXX03_LANG
660
+ template <class _Key, class _Hash>
661
+ using __check_hash_requirements _LIBCUDACXX_NODEBUG_TYPE = integral_constant<bool,
662
+ is_copy_constructible<_Hash>::value &&
663
+ is_move_constructible<_Hash>::value &&
664
+ __invokable_r<size_t, _Hash, _Key const&>::value
665
+ >;
666
+
667
+ template <class _Key, class _Hash = hash<_Key> >
668
+ using __has_enabled_hash _LIBCUDACXX_NODEBUG_TYPE = integral_constant<bool,
669
+ __check_hash_requirements<_Key, _Hash>::value &&
670
+ is_default_constructible<_Hash>::value
671
+ >;
672
+
673
+ #if _LIBCUDACXX_STD_VER > 14
674
+ template <class _Type, class>
675
+ using __enable_hash_helper_imp _LIBCUDACXX_NODEBUG_TYPE = _Type;
676
+
677
+ template <class _Type, class ..._Keys>
678
+ using __enable_hash_helper _LIBCUDACXX_NODEBUG_TYPE = __enable_hash_helper_imp<_Type,
679
+ __enable_if_t<__all<__has_enabled_hash<_Keys>::value...>::value>
680
+ >;
681
+ #else
682
+ template <class _Type, class ...>
683
+ using __enable_hash_helper _LIBCUDACXX_NODEBUG_TYPE = _Type;
684
+ #endif
685
+
686
+ #endif // !_LIBCUDACXX_CXX03_LANG
687
+
688
+ _LIBCUDACXX_END_NAMESPACE_STD
689
+
690
+ #endif // __cuda_std__
691
+
692
+ #endif // _LIBCUDACXX___FUNCTIONAL_HASH_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/identity.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___FUNCTIONAL_IDENTITY_H
12
+ #define _LIBCUDACXX___FUNCTIONAL_IDENTITY_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__utility/forward.h"
19
+
20
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
21
+ #pragma GCC system_header
22
+ #endif
23
+
24
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
25
+
26
+ struct __identity {
27
+ template <class _Tp>
28
+ _LIBCUDACXX_NODISCARD_EXT _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR _Tp&& operator()(_Tp&& __t) const _NOEXCEPT {
29
+ return _CUDA_VSTD::forward<_Tp>(__t);
30
+ }
31
+
32
+ using is_transparent = void;
33
+ };
34
+
35
+ #if _LIBCUDACXX_STD_VER > 11
36
+
37
+ struct identity {
38
+ template<class _Tp>
39
+ _LIBCUDACXX_NODISCARD_EXT _LIBCUDACXX_INLINE_VISIBILITY constexpr _Tp&& operator()(_Tp&& __t) const noexcept
40
+ {
41
+ return _CUDA_VSTD::forward<_Tp>(__t);
42
+ }
43
+
44
+ using is_transparent = void;
45
+ };
46
+ #endif // _LIBCUDACXX_STD_VER > 11
47
+
48
+ _LIBCUDACXX_END_NAMESPACE_STD
49
+
50
+ #endif // _LIBCUDACXX___FUNCTIONAL_IDENTITY_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/is_transparent.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___FUNCTIONAL_IS_TRANSPARENT
12
+ #define _LIBCUDACXX___FUNCTIONAL_IS_TRANSPARENT
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__type_traits/integral_constant.h"
19
+ #include "../__type_traits/void_t.h"
20
+
21
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
22
+ #pragma GCC system_header
23
+ #endif
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ #if _LIBCUDACXX_STD_VER > 11
28
+
29
+ template <class _Tp, class, class = void>
30
+ struct __is_transparent : false_type {};
31
+
32
+ template <class _Tp, class _Up>
33
+ struct __is_transparent<_Tp, _Up, __void_t<typename _Tp::is_transparent> >
34
+ : true_type {};
35
+
36
+ #endif
37
+
38
+ _LIBCUDACXX_END_NAMESPACE_STD
39
+
40
+ #endif // _LIBCUDACXX___FUNCTIONAL_IS_TRANSPARENT
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/mem_fn.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___FUNCTIONAL_MEM_FN_H
12
+ #define _LIBCUDACXX___FUNCTIONAL_MEM_FN_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__functional/binary_function.h"
19
+ #include "../__functional/invoke.h"
20
+ #include "../__functional/weak_result_type.h"
21
+ #include "../__utility/forward.h"
22
+
23
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
24
+ #pragma GCC system_header
25
+ #endif
26
+
27
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
28
+
29
+ template <class _Tp>
30
+ class __mem_fn : public __weak_result_type<_Tp>
31
+ {
32
+ public:
33
+ // types
34
+ typedef _Tp type;
35
+ private:
36
+ type __f_;
37
+
38
+ public:
39
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
40
+ __mem_fn(type __f) _NOEXCEPT : __f_(__f) {}
41
+
42
+ // invoke
43
+ template <class... _ArgTypes>
44
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
45
+ typename __invoke_return<type, _ArgTypes...>::type
46
+ operator() (_ArgTypes&&... __args) const {
47
+ return _CUDA_VSTD::__invoke(__f_, _CUDA_VSTD::forward<_ArgTypes>(__args)...);
48
+ }
49
+ };
50
+
51
+ template<class _Rp, class _Tp>
52
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
53
+ __mem_fn<_Rp _Tp::*>
54
+ mem_fn(_Rp _Tp::* __pm) _NOEXCEPT
55
+ {
56
+ return __mem_fn<_Rp _Tp::*>(__pm);
57
+ }
58
+
59
+ _LIBCUDACXX_END_NAMESPACE_STD
60
+
61
+ #endif // _LIBCUDACXX___FUNCTIONAL_MEM_FN_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/operations.h ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___FUNCTIONAL_OPERATIONS_H
12
+ #define _LIBCUDACXX___FUNCTIONAL_OPERATIONS_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__functional/binary_function.h"
19
+ #include "../__functional/unary_function.h"
20
+ #include "../__utility/forward.h"
21
+
22
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
23
+ #pragma GCC system_header
24
+ #endif
25
+
26
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
27
+
28
+ // Arithmetic operations
29
+
30
+ #if _LIBCUDACXX_STD_VER > 11
31
+ template <class _Tp = void>
32
+ #else
33
+ template <class _Tp>
34
+ #endif
35
+ struct _LIBCUDACXX_TEMPLATE_VIS plus
36
+ : __binary_function<_Tp, _Tp, _Tp>
37
+ {
38
+ typedef _Tp __result_type; // used by valarray
39
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
40
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
41
+ _Tp operator()(const _Tp& __x, const _Tp& __y) const
42
+ {return __x + __y;}
43
+ };
44
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(plus);
45
+
46
+ #if _LIBCUDACXX_STD_VER > 11
47
+ template <>
48
+ struct _LIBCUDACXX_TEMPLATE_VIS plus<void>
49
+ {
50
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
51
+ template <class _T1, class _T2>
52
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
53
+ auto operator()(_T1&& __t, _T2&& __u) const
54
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) + _CUDA_VSTD::forward<_T2>(__u)))
55
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) + _CUDA_VSTD::forward<_T2>(__u))
56
+ { return _CUDA_VSTD::forward<_T1>(__t) + _CUDA_VSTD::forward<_T2>(__u); }
57
+ typedef void is_transparent;
58
+ };
59
+ #endif
60
+
61
+ #if _LIBCUDACXX_STD_VER > 11
62
+ template <class _Tp = void>
63
+ #else
64
+ template <class _Tp>
65
+ #endif
66
+ struct _LIBCUDACXX_TEMPLATE_VIS minus
67
+ : __binary_function<_Tp, _Tp, _Tp>
68
+ {
69
+ typedef _Tp __result_type; // used by valarray
70
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
71
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
72
+ _Tp operator()(const _Tp& __x, const _Tp& __y) const
73
+ {return __x - __y;}
74
+ };
75
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(minus);
76
+
77
+ #if _LIBCUDACXX_STD_VER > 11
78
+ template <>
79
+ struct _LIBCUDACXX_TEMPLATE_VIS minus<void>
80
+ {
81
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
82
+ template <class _T1, class _T2>
83
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
84
+ auto operator()(_T1&& __t, _T2&& __u) const
85
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) - _CUDA_VSTD::forward<_T2>(__u)))
86
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) - _CUDA_VSTD::forward<_T2>(__u))
87
+ { return _CUDA_VSTD::forward<_T1>(__t) - _CUDA_VSTD::forward<_T2>(__u); }
88
+ typedef void is_transparent;
89
+ };
90
+ #endif
91
+
92
+ #if _LIBCUDACXX_STD_VER > 11
93
+ template <class _Tp = void>
94
+ #else
95
+ template <class _Tp>
96
+ #endif
97
+ struct _LIBCUDACXX_TEMPLATE_VIS multiplies
98
+ : __binary_function<_Tp, _Tp, _Tp>
99
+ {
100
+ typedef _Tp __result_type; // used by valarray
101
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
102
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
103
+ _Tp operator()(const _Tp& __x, const _Tp& __y) const
104
+ {return __x * __y;}
105
+ };
106
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(multiplies);
107
+
108
+ #if _LIBCUDACXX_STD_VER > 11
109
+ template <>
110
+ struct _LIBCUDACXX_TEMPLATE_VIS multiplies<void>
111
+ {
112
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
113
+ template <class _T1, class _T2>
114
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
115
+ auto operator()(_T1&& __t, _T2&& __u) const
116
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) * _CUDA_VSTD::forward<_T2>(__u)))
117
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) * _CUDA_VSTD::forward<_T2>(__u))
118
+ { return _CUDA_VSTD::forward<_T1>(__t) * _CUDA_VSTD::forward<_T2>(__u); }
119
+ typedef void is_transparent;
120
+ };
121
+ #endif
122
+
123
+ #if _LIBCUDACXX_STD_VER > 11
124
+ template <class _Tp = void>
125
+ #else
126
+ template <class _Tp>
127
+ #endif
128
+ struct _LIBCUDACXX_TEMPLATE_VIS divides
129
+ : __binary_function<_Tp, _Tp, _Tp>
130
+ {
131
+ typedef _Tp __result_type; // used by valarray
132
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
133
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
134
+ _Tp operator()(const _Tp& __x, const _Tp& __y) const
135
+ {return __x / __y;}
136
+ };
137
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(divides);
138
+
139
+ #if _LIBCUDACXX_STD_VER > 11
140
+ template <>
141
+ struct _LIBCUDACXX_TEMPLATE_VIS divides<void>
142
+ {
143
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
144
+ template <class _T1, class _T2>
145
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
146
+ auto operator()(_T1&& __t, _T2&& __u) const
147
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) / _CUDA_VSTD::forward<_T2>(__u)))
148
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) / _CUDA_VSTD::forward<_T2>(__u))
149
+ { return _CUDA_VSTD::forward<_T1>(__t) / _CUDA_VSTD::forward<_T2>(__u); }
150
+ typedef void is_transparent;
151
+ };
152
+ #endif
153
+
154
+ #if _LIBCUDACXX_STD_VER > 11
155
+ template <class _Tp = void>
156
+ #else
157
+ template <class _Tp>
158
+ #endif
159
+ struct _LIBCUDACXX_TEMPLATE_VIS modulus
160
+ : __binary_function<_Tp, _Tp, _Tp>
161
+ {
162
+ typedef _Tp __result_type; // used by valarray
163
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
164
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
165
+ _Tp operator()(const _Tp& __x, const _Tp& __y) const
166
+ {return __x % __y;}
167
+ };
168
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(modulus);
169
+
170
+ #if _LIBCUDACXX_STD_VER > 11
171
+ template <>
172
+ struct _LIBCUDACXX_TEMPLATE_VIS modulus<void>
173
+ {
174
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
175
+ template <class _T1, class _T2>
176
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
177
+ auto operator()(_T1&& __t, _T2&& __u) const
178
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) % _CUDA_VSTD::forward<_T2>(__u)))
179
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) % _CUDA_VSTD::forward<_T2>(__u))
180
+ { return _CUDA_VSTD::forward<_T1>(__t) % _CUDA_VSTD::forward<_T2>(__u); }
181
+ typedef void is_transparent;
182
+ };
183
+ #endif
184
+
185
+ #if _LIBCUDACXX_STD_VER > 11
186
+ template <class _Tp = void>
187
+ #else
188
+ template <class _Tp>
189
+ #endif
190
+ struct _LIBCUDACXX_TEMPLATE_VIS negate
191
+ : __unary_function<_Tp, _Tp>
192
+ {
193
+ typedef _Tp __result_type; // used by valarray
194
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
195
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
196
+ _Tp operator()(const _Tp& __x) const
197
+ {return -__x;}
198
+ };
199
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(negate);
200
+
201
+ #if _LIBCUDACXX_STD_VER > 11
202
+ template <>
203
+ struct _LIBCUDACXX_TEMPLATE_VIS negate<void>
204
+ {
205
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
206
+ template <class _Tp>
207
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
208
+ auto operator()(_Tp&& __x) const
209
+ noexcept(noexcept(- _CUDA_VSTD::forward<_Tp>(__x)))
210
+ -> decltype( - _CUDA_VSTD::forward<_Tp>(__x))
211
+ { return - _CUDA_VSTD::forward<_Tp>(__x); }
212
+ typedef void is_transparent;
213
+ };
214
+ #endif
215
+
216
+ // Bitwise operations
217
+
218
+ #if _LIBCUDACXX_STD_VER > 11
219
+ template <class _Tp = void>
220
+ #else
221
+ template <class _Tp>
222
+ #endif
223
+ struct _LIBCUDACXX_TEMPLATE_VIS bit_and
224
+ : __binary_function<_Tp, _Tp, _Tp>
225
+ {
226
+ typedef _Tp __result_type; // used by valarray
227
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
228
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
229
+ _Tp operator()(const _Tp& __x, const _Tp& __y) const
230
+ {return __x & __y;}
231
+ };
232
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(bit_and);
233
+
234
+ #if _LIBCUDACXX_STD_VER > 11
235
+ template <>
236
+ struct _LIBCUDACXX_TEMPLATE_VIS bit_and<void>
237
+ {
238
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
239
+ template <class _T1, class _T2>
240
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
241
+ auto operator()(_T1&& __t, _T2&& __u) const
242
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) & _CUDA_VSTD::forward<_T2>(__u)))
243
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) & _CUDA_VSTD::forward<_T2>(__u))
244
+ { return _CUDA_VSTD::forward<_T1>(__t) & _CUDA_VSTD::forward<_T2>(__u); }
245
+ typedef void is_transparent;
246
+ };
247
+ #endif
248
+
249
+ #if _LIBCUDACXX_STD_VER > 11
250
+ template <class _Tp = void>
251
+ struct _LIBCUDACXX_TEMPLATE_VIS bit_not
252
+ : __unary_function<_Tp, _Tp>
253
+ {
254
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
255
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
256
+ _Tp operator()(const _Tp& __x) const
257
+ {return ~__x;}
258
+ };
259
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(bit_not);
260
+
261
+ template <>
262
+ struct _LIBCUDACXX_TEMPLATE_VIS bit_not<void>
263
+ {
264
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
265
+ template <class _Tp>
266
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
267
+ auto operator()(_Tp&& __x) const
268
+ noexcept(noexcept(~_CUDA_VSTD::forward<_Tp>(__x)))
269
+ -> decltype( ~_CUDA_VSTD::forward<_Tp>(__x))
270
+ { return ~_CUDA_VSTD::forward<_Tp>(__x); }
271
+ typedef void is_transparent;
272
+ };
273
+ #endif
274
+
275
+ #if _LIBCUDACXX_STD_VER > 11
276
+ template <class _Tp = void>
277
+ #else
278
+ template <class _Tp>
279
+ #endif
280
+ struct _LIBCUDACXX_TEMPLATE_VIS bit_or
281
+ : __binary_function<_Tp, _Tp, _Tp>
282
+ {
283
+ typedef _Tp __result_type; // used by valarray
284
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
285
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
286
+ _Tp operator()(const _Tp& __x, const _Tp& __y) const
287
+ {return __x | __y;}
288
+ };
289
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(bit_or);
290
+
291
+ #if _LIBCUDACXX_STD_VER > 11
292
+ template <>
293
+ struct _LIBCUDACXX_TEMPLATE_VIS bit_or<void>
294
+ {
295
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
296
+ template <class _T1, class _T2>
297
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
298
+ auto operator()(_T1&& __t, _T2&& __u) const
299
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) | _CUDA_VSTD::forward<_T2>(__u)))
300
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) | _CUDA_VSTD::forward<_T2>(__u))
301
+ { return _CUDA_VSTD::forward<_T1>(__t) | _CUDA_VSTD::forward<_T2>(__u); }
302
+ typedef void is_transparent;
303
+ };
304
+ #endif
305
+
306
+ #if _LIBCUDACXX_STD_VER > 11
307
+ template <class _Tp = void>
308
+ #else
309
+ template <class _Tp>
310
+ #endif
311
+ struct _LIBCUDACXX_TEMPLATE_VIS bit_xor
312
+ : __binary_function<_Tp, _Tp, _Tp>
313
+ {
314
+ typedef _Tp __result_type; // used by valarray
315
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
316
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
317
+ _Tp operator()(const _Tp& __x, const _Tp& __y) const
318
+ {return __x ^ __y;}
319
+ };
320
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(bit_xor);
321
+
322
+ #if _LIBCUDACXX_STD_VER > 11
323
+ template <>
324
+ struct _LIBCUDACXX_TEMPLATE_VIS bit_xor<void>
325
+ {
326
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
327
+ template <class _T1, class _T2>
328
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
329
+ auto operator()(_T1&& __t, _T2&& __u) const
330
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) ^ _CUDA_VSTD::forward<_T2>(__u)))
331
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) ^ _CUDA_VSTD::forward<_T2>(__u))
332
+ { return _CUDA_VSTD::forward<_T1>(__t) ^ _CUDA_VSTD::forward<_T2>(__u); }
333
+ typedef void is_transparent;
334
+ };
335
+ #endif
336
+
337
+ // Comparison operations
338
+
339
+ #if _LIBCUDACXX_STD_VER > 11
340
+ template <class _Tp = void>
341
+ #else
342
+ template <class _Tp>
343
+ #endif
344
+ struct _LIBCUDACXX_TEMPLATE_VIS equal_to
345
+ : __binary_function<_Tp, _Tp, bool>
346
+ {
347
+ typedef bool __result_type; // used by valarray
348
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
349
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
350
+ bool operator()(const _Tp& __x, const _Tp& __y) const
351
+ {return __x == __y;}
352
+ };
353
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(equal_to);
354
+
355
+ #if _LIBCUDACXX_STD_VER > 11
356
+ template <>
357
+ struct _LIBCUDACXX_TEMPLATE_VIS equal_to<void>
358
+ {
359
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
360
+ template <class _T1, class _T2>
361
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
362
+ auto operator()(_T1&& __t, _T2&& __u) const
363
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) == _CUDA_VSTD::forward<_T2>(__u)))
364
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) == _CUDA_VSTD::forward<_T2>(__u))
365
+ { return _CUDA_VSTD::forward<_T1>(__t) == _CUDA_VSTD::forward<_T2>(__u); }
366
+ typedef void is_transparent;
367
+ };
368
+ #endif
369
+
370
+ #if _LIBCUDACXX_STD_VER > 11
371
+ template <class _Tp = void>
372
+ #else
373
+ template <class _Tp>
374
+ #endif
375
+ struct _LIBCUDACXX_TEMPLATE_VIS not_equal_to
376
+ : __binary_function<_Tp, _Tp, bool>
377
+ {
378
+ typedef bool __result_type; // used by valarray
379
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
380
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
381
+ bool operator()(const _Tp& __x, const _Tp& __y) const
382
+ {return __x != __y;}
383
+ };
384
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(not_equal_to);
385
+
386
+ #if _LIBCUDACXX_STD_VER > 11
387
+ template <>
388
+ struct _LIBCUDACXX_TEMPLATE_VIS not_equal_to<void>
389
+ {
390
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
391
+ template <class _T1, class _T2>
392
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
393
+ auto operator()(_T1&& __t, _T2&& __u) const
394
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) != _CUDA_VSTD::forward<_T2>(__u)))
395
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) != _CUDA_VSTD::forward<_T2>(__u))
396
+ { return _CUDA_VSTD::forward<_T1>(__t) != _CUDA_VSTD::forward<_T2>(__u); }
397
+ typedef void is_transparent;
398
+ };
399
+ #endif
400
+
401
+ #if _LIBCUDACXX_STD_VER > 11
402
+ template <class _Tp = void>
403
+ #else
404
+ template <class _Tp>
405
+ #endif
406
+ struct _LIBCUDACXX_TEMPLATE_VIS less
407
+ : __binary_function<_Tp, _Tp, bool>
408
+ {
409
+ typedef bool __result_type; // used by valarray
410
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
411
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
412
+ bool operator()(const _Tp& __x, const _Tp& __y) const
413
+ {return __x < __y;}
414
+ };
415
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(less);
416
+
417
+ #if _LIBCUDACXX_STD_VER > 11
418
+ template <>
419
+ struct _LIBCUDACXX_TEMPLATE_VIS less<void>
420
+ {
421
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
422
+ template <class _T1, class _T2>
423
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
424
+ auto operator()(_T1&& __t, _T2&& __u) const
425
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) < _CUDA_VSTD::forward<_T2>(__u)))
426
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) < _CUDA_VSTD::forward<_T2>(__u))
427
+ { return _CUDA_VSTD::forward<_T1>(__t) < _CUDA_VSTD::forward<_T2>(__u); }
428
+ typedef void is_transparent;
429
+ };
430
+ #endif
431
+
432
+ #if _LIBCUDACXX_STD_VER > 11
433
+ template <class _Tp = void>
434
+ #else
435
+ template <class _Tp>
436
+ #endif
437
+ struct _LIBCUDACXX_TEMPLATE_VIS less_equal
438
+ : __binary_function<_Tp, _Tp, bool>
439
+ {
440
+ typedef bool __result_type; // used by valarray
441
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
442
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
443
+ bool operator()(const _Tp& __x, const _Tp& __y) const
444
+ {return __x <= __y;}
445
+ };
446
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(less_equal);
447
+
448
+ #if _LIBCUDACXX_STD_VER > 11
449
+ template <>
450
+ struct _LIBCUDACXX_TEMPLATE_VIS less_equal<void>
451
+ {
452
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
453
+ template <class _T1, class _T2>
454
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
455
+ auto operator()(_T1&& __t, _T2&& __u) const
456
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) <= _CUDA_VSTD::forward<_T2>(__u)))
457
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) <= _CUDA_VSTD::forward<_T2>(__u))
458
+ { return _CUDA_VSTD::forward<_T1>(__t) <= _CUDA_VSTD::forward<_T2>(__u); }
459
+ typedef void is_transparent;
460
+ };
461
+ #endif
462
+
463
+ #if _LIBCUDACXX_STD_VER > 11
464
+ template <class _Tp = void>
465
+ #else
466
+ template <class _Tp>
467
+ #endif
468
+ struct _LIBCUDACXX_TEMPLATE_VIS greater_equal
469
+ : __binary_function<_Tp, _Tp, bool>
470
+ {
471
+ typedef bool __result_type; // used by valarray
472
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
473
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
474
+ bool operator()(const _Tp& __x, const _Tp& __y) const
475
+ {return __x >= __y;}
476
+ };
477
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(greater_equal);
478
+
479
+ #if _LIBCUDACXX_STD_VER > 11
480
+ template <>
481
+ struct _LIBCUDACXX_TEMPLATE_VIS greater_equal<void>
482
+ {
483
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
484
+ template <class _T1, class _T2>
485
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
486
+ auto operator()(_T1&& __t, _T2&& __u) const
487
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) >= _CUDA_VSTD::forward<_T2>(__u)))
488
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) >= _CUDA_VSTD::forward<_T2>(__u))
489
+ { return _CUDA_VSTD::forward<_T1>(__t) >= _CUDA_VSTD::forward<_T2>(__u); }
490
+ typedef void is_transparent;
491
+ };
492
+ #endif
493
+
494
+ #if _LIBCUDACXX_STD_VER > 11
495
+ template <class _Tp = void>
496
+ #else
497
+ template <class _Tp>
498
+ #endif
499
+ struct _LIBCUDACXX_TEMPLATE_VIS greater
500
+ : __binary_function<_Tp, _Tp, bool>
501
+ {
502
+ typedef bool __result_type; // used by valarray
503
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
504
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
505
+ bool operator()(const _Tp& __x, const _Tp& __y) const
506
+ {return __x > __y;}
507
+ };
508
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(greater);
509
+
510
+ #if _LIBCUDACXX_STD_VER > 11
511
+ template <>
512
+ struct _LIBCUDACXX_TEMPLATE_VIS greater<void>
513
+ {
514
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
515
+ template <class _T1, class _T2>
516
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
517
+ auto operator()(_T1&& __t, _T2&& __u) const
518
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) > _CUDA_VSTD::forward<_T2>(__u)))
519
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) > _CUDA_VSTD::forward<_T2>(__u))
520
+ { return _CUDA_VSTD::forward<_T1>(__t) > _CUDA_VSTD::forward<_T2>(__u); }
521
+ typedef void is_transparent;
522
+ };
523
+ #endif
524
+
525
+ // Logical operations
526
+
527
+ #if _LIBCUDACXX_STD_VER > 11
528
+ template <class _Tp = void>
529
+ #else
530
+ template <class _Tp>
531
+ #endif
532
+ struct _LIBCUDACXX_TEMPLATE_VIS logical_and
533
+ : __binary_function<_Tp, _Tp, bool>
534
+ {
535
+ typedef bool __result_type; // used by valarray
536
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
537
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
538
+ bool operator()(const _Tp& __x, const _Tp& __y) const
539
+ {return __x && __y;}
540
+ };
541
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(logical_and);
542
+
543
+ #if _LIBCUDACXX_STD_VER > 11
544
+ template <>
545
+ struct _LIBCUDACXX_TEMPLATE_VIS logical_and<void>
546
+ {
547
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
548
+ template <class _T1, class _T2>
549
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
550
+ auto operator()(_T1&& __t, _T2&& __u) const
551
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) && _CUDA_VSTD::forward<_T2>(__u)))
552
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) && _CUDA_VSTD::forward<_T2>(__u))
553
+ { return _CUDA_VSTD::forward<_T1>(__t) && _CUDA_VSTD::forward<_T2>(__u); }
554
+ typedef void is_transparent;
555
+ };
556
+ #endif
557
+
558
+ #if _LIBCUDACXX_STD_VER > 11
559
+ template <class _Tp = void>
560
+ #else
561
+ template <class _Tp>
562
+ #endif
563
+ struct _LIBCUDACXX_TEMPLATE_VIS logical_not
564
+ : __unary_function<_Tp, bool>
565
+ {
566
+ typedef bool __result_type; // used by valarray
567
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
568
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
569
+ bool operator()(const _Tp& __x) const
570
+ {return !__x;}
571
+ };
572
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(logical_not);
573
+
574
+ #if _LIBCUDACXX_STD_VER > 11
575
+ template <>
576
+ struct _LIBCUDACXX_TEMPLATE_VIS logical_not<void>
577
+ {
578
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
579
+ template <class _Tp>
580
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
581
+ auto operator()(_Tp&& __x) const
582
+ noexcept(noexcept(!_CUDA_VSTD::forward<_Tp>(__x)))
583
+ -> decltype( !_CUDA_VSTD::forward<_Tp>(__x))
584
+ { return !_CUDA_VSTD::forward<_Tp>(__x); }
585
+ typedef void is_transparent;
586
+ };
587
+ #endif
588
+
589
+ #if _LIBCUDACXX_STD_VER > 11
590
+ template <class _Tp = void>
591
+ #else
592
+ template <class _Tp>
593
+ #endif
594
+ struct _LIBCUDACXX_TEMPLATE_VIS logical_or
595
+ : __binary_function<_Tp, _Tp, bool>
596
+ {
597
+ typedef bool __result_type; // used by valarray
598
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
599
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
600
+ bool operator()(const _Tp& __x, const _Tp& __y) const
601
+ {return __x || __y;}
602
+ };
603
+ _LIBCUDACXX_CTAD_SUPPORTED_FOR_TYPE(logical_or);
604
+
605
+ #if _LIBCUDACXX_STD_VER > 11
606
+ template <>
607
+ struct _LIBCUDACXX_TEMPLATE_VIS logical_or<void>
608
+ {
609
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
610
+ template <class _T1, class _T2>
611
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX11 _LIBCUDACXX_INLINE_VISIBILITY
612
+ auto operator()(_T1&& __t, _T2&& __u) const
613
+ noexcept(noexcept(_CUDA_VSTD::forward<_T1>(__t) || _CUDA_VSTD::forward<_T2>(__u)))
614
+ -> decltype( _CUDA_VSTD::forward<_T1>(__t) || _CUDA_VSTD::forward<_T2>(__u))
615
+ { return _CUDA_VSTD::forward<_T1>(__t) || _CUDA_VSTD::forward<_T2>(__u); }
616
+ typedef void is_transparent;
617
+ };
618
+ #endif
619
+
620
+ _LIBCUDACXX_END_NAMESPACE_STD
621
+
622
+ #endif // _LIBCUDACXX___FUNCTIONAL_OPERATIONS_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/perfect_forward.h ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___FUNCTIONAL_PERFECT_FORWARD_H
12
+ #define _LIBCUDACXX___FUNCTIONAL_PERFECT_FORWARD_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__concepts/__concept_macros.h"
19
+ #include "../__functional/invoke.h"
20
+ #include "../__type_traits/enable_if.h"
21
+ #include "../__type_traits/is_constructible.h"
22
+ #include "../__type_traits/is_nothrow_constructible.h"
23
+ #include "../__utility/declval.h"
24
+ #include "../__utility/forward.h"
25
+ #include "../__utility/integer_sequence.h"
26
+ #include "../__utility/move.h"
27
+
28
+ #include "../tuple"
29
+
30
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
31
+ #pragma GCC system_header
32
+ #endif
33
+
34
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
35
+
36
+ #if _LIBCUDACXX_STD_VER > 14
37
+
38
+ template <class _Op, class _Indices, class... _BoundArgs>
39
+ struct __perfect_forward_impl;
40
+
41
+ template <class _Op, size_t... _Idx, class... _BoundArgs>
42
+ struct __perfect_forward_impl<_Op, index_sequence<_Idx...>, _BoundArgs...> {
43
+ private:
44
+ tuple<_BoundArgs...> __bound_args_;
45
+
46
+ public:
47
+ _LIBCUDACXX_TEMPLATE(class... _Args)
48
+ (requires is_constructible_v<tuple<_BoundArgs...>, _Args&&...>)
49
+ _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY
50
+ explicit constexpr __perfect_forward_impl(_Args&&... __bound_args)
51
+ noexcept(is_nothrow_constructible_v<tuple<_BoundArgs...>, _Args&&...>)
52
+ : __bound_args_(_CUDA_VSTD::forward<_Args>(__bound_args)...) {}
53
+
54
+ __perfect_forward_impl(__perfect_forward_impl const&) = default;
55
+ __perfect_forward_impl(__perfect_forward_impl&&) = default;
56
+
57
+ __perfect_forward_impl& operator=(__perfect_forward_impl const&) = default;
58
+ __perfect_forward_impl& operator=(__perfect_forward_impl&&) = default;
59
+
60
+ _LIBCUDACXX_TEMPLATE(class... _Args)
61
+ (requires is_invocable_v<_Op, _BoundArgs&..., _Args...>)
62
+ _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY constexpr auto operator()(_Args&&... __args) &
63
+ noexcept(noexcept(_Op()(_CUDA_VSTD::get<_Idx>(__bound_args_)..., _CUDA_VSTD::forward<_Args>(__args)...)))
64
+ -> decltype( _Op()(_CUDA_VSTD::get<_Idx>(__bound_args_)..., _CUDA_VSTD::forward<_Args>(__args)...))
65
+ { return _Op()(_CUDA_VSTD::get<_Idx>(__bound_args_)..., _CUDA_VSTD::forward<_Args>(__args)...); }
66
+
67
+ _LIBCUDACXX_TEMPLATE(class... _Args)
68
+ (requires (!is_invocable_v<_Op, _BoundArgs&..., _Args...>))
69
+ _LIBCUDACXX_INLINE_VISIBILITY auto operator()(_Args&&...) & = delete;
70
+
71
+ _LIBCUDACXX_TEMPLATE(class... _Args)
72
+ (requires is_invocable_v<_Op, _BoundArgs const&..., _Args...>)
73
+ _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY constexpr auto operator()(_Args&&... __args) const&
74
+ noexcept(noexcept(_Op()(_CUDA_VSTD::get<_Idx>(__bound_args_)..., _CUDA_VSTD::forward<_Args>(__args)...)))
75
+ -> decltype( _Op()(_CUDA_VSTD::get<_Idx>(__bound_args_)..., _CUDA_VSTD::forward<_Args>(__args)...))
76
+ { return _Op()(_CUDA_VSTD::get<_Idx>(__bound_args_)..., _CUDA_VSTD::forward<_Args>(__args)...); }
77
+
78
+ _LIBCUDACXX_TEMPLATE(class... _Args)
79
+ (requires (!is_invocable_v<_Op, _BoundArgs const&..., _Args...>))
80
+ _LIBCUDACXX_INLINE_VISIBILITY auto operator()(_Args&&...) const& = delete;
81
+
82
+ _LIBCUDACXX_TEMPLATE(class... _Args)
83
+ (requires is_invocable_v<_Op, _BoundArgs..., _Args...>)
84
+ _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY constexpr auto operator()(_Args&&... __args) &&
85
+ noexcept(noexcept(_Op()(_CUDA_VSTD::get<_Idx>(_CUDA_VSTD::move(__bound_args_))..., _CUDA_VSTD::forward<_Args>(__args)...)))
86
+ -> decltype( _Op()(_CUDA_VSTD::get<_Idx>(_CUDA_VSTD::move(__bound_args_))..., _CUDA_VSTD::forward<_Args>(__args)...))
87
+ { return _Op()(_CUDA_VSTD::get<_Idx>(_CUDA_VSTD::move(__bound_args_))..., _CUDA_VSTD::forward<_Args>(__args)...); }
88
+
89
+ _LIBCUDACXX_TEMPLATE(class... _Args)
90
+ (requires (!is_invocable_v<_Op, _BoundArgs..., _Args...>))
91
+ _LIBCUDACXX_INLINE_VISIBILITY auto operator()(_Args&&...) && = delete;
92
+
93
+ _LIBCUDACXX_TEMPLATE(class... _Args)
94
+ (requires is_invocable_v<_Op, _BoundArgs const..., _Args...>)
95
+ _LIBCUDACXX_HIDE_FROM_ABI _LIBCUDACXX_INLINE_VISIBILITY constexpr auto operator()(_Args&&... __args) const&&
96
+ noexcept(noexcept(_Op()(_CUDA_VSTD::get<_Idx>(_CUDA_VSTD::move(__bound_args_))..., _CUDA_VSTD::forward<_Args>(__args)...)))
97
+ -> decltype( _Op()(_CUDA_VSTD::get<_Idx>(_CUDA_VSTD::move(__bound_args_))..., _CUDA_VSTD::forward<_Args>(__args)...))
98
+ { return _Op()(_CUDA_VSTD::get<_Idx>(_CUDA_VSTD::move(__bound_args_))..., _CUDA_VSTD::forward<_Args>(__args)...); }
99
+
100
+ _LIBCUDACXX_TEMPLATE(class... _Args)
101
+ (requires (!is_invocable_v<_Op, _BoundArgs const..., _Args...>))
102
+ _LIBCUDACXX_INLINE_VISIBILITY auto operator()(_Args&&...) const&& = delete;
103
+ };
104
+
105
+ // __perfect_forward implements a perfect-forwarding call wrapper as explained in [func.require].
106
+ template <class _Op, class ..._Args>
107
+ using __perfect_forward = __perfect_forward_impl<_Op, index_sequence_for<_Args...>, _Args...>;
108
+
109
+ #endif // _LIBCUDACXX_STD_VER > 14
110
+
111
+ _LIBCUDACXX_END_NAMESPACE_STD
112
+
113
+ #endif // _LIBCUDACXX___FUNCTIONAL_PERFECT_FORWARD_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/reference_wrapper.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___FUNCTIONAL_REFERENCE_WRAPPER_H
11
+ #define _LIBCUDACXX___FUNCTIONAL_REFERENCE_WRAPPER_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__functional/weak_result_type.h"
18
+ #include "../__memory/addressof.h"
19
+
20
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
21
+ #pragma GCC system_header
22
+ #endif
23
+
24
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
25
+
26
+ template <class _Tp>
27
+ class _LIBCUDACXX_TEMPLATE_VIS reference_wrapper : public __weak_result_type<_Tp>
28
+ {
29
+ public:
30
+ // types
31
+ typedef _Tp type;
32
+ private:
33
+ type* __f_;
34
+
35
+ static _LIBCUDACXX_INLINE_VISIBILITY void __fun(_Tp&) _NOEXCEPT;
36
+ static void __fun(_Tp&&) = delete;
37
+
38
+ public:
39
+ template <class _Up, class = __enable_if_t<!__is_same_uncvref<_Up, reference_wrapper>::value, decltype(__fun(declval<_Up>())) > >
40
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
41
+ reference_wrapper(_Up&& __u) _NOEXCEPT_(noexcept(__fun(declval<_Up>()))) {
42
+ type& __f = static_cast<_Up&&>(__u);
43
+ __f_ = _CUDA_VSTD::addressof(__f);
44
+ }
45
+
46
+ // access
47
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
48
+ operator type&() const _NOEXCEPT {return *__f_;}
49
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
50
+ type& get() const _NOEXCEPT {return *__f_;}
51
+
52
+ // invoke
53
+ template <class... _ArgTypes>
54
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
55
+ typename __invoke_of<type&, _ArgTypes...>::type
56
+ operator() (_ArgTypes&&... __args) const {
57
+ return _CUDA_VSTD::__invoke(get(), _CUDA_VSTD::forward<_ArgTypes>(__args)...);
58
+ }
59
+ };
60
+
61
+ #if _LIBCUDACXX_STD_VER > 14 && !defined(_LIBCUDACXX_HAS_NO_DEDUCTION_GUIDES)
62
+ template <class _Tp>
63
+ reference_wrapper(_Tp&) -> reference_wrapper<_Tp>;
64
+ #endif
65
+
66
+ template <class _Tp>
67
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
68
+ reference_wrapper<_Tp>
69
+ ref(_Tp& __t) _NOEXCEPT
70
+ {
71
+ return reference_wrapper<_Tp>(__t);
72
+ }
73
+
74
+ template <class _Tp>
75
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
76
+ reference_wrapper<_Tp>
77
+ ref(reference_wrapper<_Tp> __t) _NOEXCEPT
78
+ {
79
+ return __t;
80
+ }
81
+
82
+ template <class _Tp>
83
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
84
+ reference_wrapper<const _Tp>
85
+ cref(const _Tp& __t) _NOEXCEPT
86
+ {
87
+ return reference_wrapper<const _Tp>(__t);
88
+ }
89
+
90
+ template <class _Tp>
91
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
92
+ reference_wrapper<const _Tp>
93
+ cref(reference_wrapper<_Tp> __t) _NOEXCEPT
94
+ {
95
+ return __t;
96
+ }
97
+
98
+ template <class _Tp> void ref(const _Tp&&) = delete;
99
+ template <class _Tp> void cref(const _Tp&&) = delete;
100
+
101
+ _LIBCUDACXX_END_NAMESPACE_STD
102
+
103
+ #endif // _LIBCUDACXX___FUNCTIONAL_REFERENCE_WRAPPER_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__functional/weak_result_type.h ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___FUNCTIONAL_WEAK_RESULT_TYPE_H
11
+ #define _LIBCUDACXX___FUNCTIONAL_WEAK_RESULT_TYPE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__functional/binary_function.h"
18
+ #include "../__functional/invoke.h"
19
+ #include "../__functional/unary_function.h"
20
+ #include "../__type_traits/integral_constant.h"
21
+ #include "../__type_traits/is_same.h"
22
+ #include "../__utility/declval.h"
23
+
24
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
25
+ #pragma GCC system_header
26
+ #endif
27
+
28
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
29
+
30
+ template <class _Tp>
31
+ struct __has_result_type
32
+ {
33
+ private:
34
+ template <class _Up> _LIBCUDACXX_INLINE_VISIBILITY static false_type __test(...);
35
+ template <class _Up> _LIBCUDACXX_INLINE_VISIBILITY static true_type __test(typename _Up::result_type* = 0);
36
+ public:
37
+ static const bool value = decltype(__test<_Tp>(0))::value;
38
+ };
39
+
40
+ // __weak_result_type
41
+
42
+ template <class _Tp>
43
+ struct __derives_from_unary_function
44
+ {
45
+ private:
46
+ struct __two {char __lx; char __lxx;};
47
+ static _LIBCUDACXX_INLINE_VISIBILITY __two __test(...);
48
+ template <class _Ap, class _Rp>
49
+ static _LIBCUDACXX_INLINE_VISIBILITY __unary_function<_Ap, _Rp>
50
+ __test(const volatile __unary_function<_Ap, _Rp>*);
51
+
52
+ public:
53
+ static const bool value = !is_same<decltype(__test((_Tp*)0)), __two>::value;
54
+ typedef decltype(__test((_Tp*)0)) type;
55
+ };
56
+
57
+ template <class _Tp>
58
+ struct __derives_from_binary_function
59
+ {
60
+ private:
61
+ struct __two {char __lx; char __lxx;};
62
+ static __two _LIBCUDACXX_INLINE_VISIBILITY __test(...);
63
+ template <class _A1, class _A2, class _Rp>
64
+ static _LIBCUDACXX_INLINE_VISIBILITY __binary_function<_A1, _A2, _Rp>
65
+ __test(const volatile __binary_function<_A1, _A2, _Rp>*);
66
+
67
+ public:
68
+ static const bool value = !is_same<decltype(__test((_Tp*)0)), __two>::value;
69
+ typedef decltype(__test((_Tp*)0)) type;
70
+ };
71
+
72
+ template <class _Tp, bool = __derives_from_unary_function<_Tp>::value>
73
+ struct __maybe_derive_from_unary_function // bool is true
74
+ : public __derives_from_unary_function<_Tp>::type
75
+ {
76
+ };
77
+
78
+ template <class _Tp>
79
+ struct __maybe_derive_from_unary_function<_Tp, false>
80
+ {
81
+ };
82
+
83
+ template <class _Tp, bool = __derives_from_binary_function<_Tp>::value>
84
+ struct __maybe_derive_from_binary_function // bool is true
85
+ : public __derives_from_binary_function<_Tp>::type
86
+ {
87
+ };
88
+
89
+ template <class _Tp>
90
+ struct __maybe_derive_from_binary_function<_Tp, false>
91
+ {
92
+ };
93
+
94
+ template <class _Tp, bool = __has_result_type<_Tp>::value>
95
+ struct __weak_result_type_imp // bool is true
96
+ : public __maybe_derive_from_unary_function<_Tp>,
97
+ public __maybe_derive_from_binary_function<_Tp>
98
+ {
99
+ #if _LIBCUDACXX_STD_VER <= 17 || defined(_LIBCUDACXX_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS)
100
+ using result_type _LIBCUDACXX_NODEBUG_TYPE _LIBCUDACXX_DEPRECATED_IN_CXX17 = typename _Tp::result_type;
101
+ #endif
102
+ };
103
+
104
+ template <class _Tp>
105
+ struct __weak_result_type_imp<_Tp, false>
106
+ : public __maybe_derive_from_unary_function<_Tp>,
107
+ public __maybe_derive_from_binary_function<_Tp>
108
+ {
109
+ };
110
+
111
+ template <class _Tp>
112
+ struct __weak_result_type
113
+ : public __weak_result_type_imp<_Tp>
114
+ {
115
+ };
116
+
117
+ // 0 argument case
118
+
119
+ template <class _Rp>
120
+ struct __weak_result_type<_Rp ()>
121
+ {
122
+ #if _LIBCUDACXX_STD_VER <= 17 || defined(_LIBCUDACXX_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS)
123
+ using result_type _LIBCUDACXX_NODEBUG_TYPE _LIBCUDACXX_DEPRECATED_IN_CXX17 = _Rp;
124
+ #endif
125
+ };
126
+
127
+ template <class _Rp>
128
+ struct __weak_result_type<_Rp (&)()>
129
+ {
130
+ #if _LIBCUDACXX_STD_VER <= 17 || defined(_LIBCUDACXX_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS)
131
+ using result_type _LIBCUDACXX_NODEBUG_TYPE _LIBCUDACXX_DEPRECATED_IN_CXX17 = _Rp;
132
+ #endif
133
+ };
134
+
135
+ template <class _Rp>
136
+ struct __weak_result_type<_Rp (*)()>
137
+ {
138
+ #if _LIBCUDACXX_STD_VER <= 17 || defined(_LIBCUDACXX_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS)
139
+ using result_type _LIBCUDACXX_NODEBUG_TYPE _LIBCUDACXX_DEPRECATED_IN_CXX17 = _Rp;
140
+ #endif
141
+ };
142
+
143
+ // 1 argument case
144
+
145
+ template <class _Rp, class _A1>
146
+ struct __weak_result_type<_Rp (_A1)>
147
+ : public __unary_function<_A1, _Rp>
148
+ {
149
+ };
150
+
151
+ template <class _Rp, class _A1>
152
+ struct __weak_result_type<_Rp (&)(_A1)>
153
+ : public __unary_function<_A1, _Rp>
154
+ {
155
+ };
156
+
157
+ template <class _Rp, class _A1>
158
+ struct __weak_result_type<_Rp (*)(_A1)>
159
+ : public __unary_function<_A1, _Rp>
160
+ {
161
+ };
162
+
163
+ template <class _Rp, class _Cp>
164
+ struct __weak_result_type<_Rp (_Cp::*)()>
165
+ : public __unary_function<_Cp*, _Rp>
166
+ {
167
+ };
168
+
169
+ template <class _Rp, class _Cp>
170
+ struct __weak_result_type<_Rp (_Cp::*)() const>
171
+ : public __unary_function<const _Cp*, _Rp>
172
+ {
173
+ };
174
+
175
+ template <class _Rp, class _Cp>
176
+ struct __weak_result_type<_Rp (_Cp::*)() volatile>
177
+ : public __unary_function<volatile _Cp*, _Rp>
178
+ {
179
+ };
180
+
181
+ template <class _Rp, class _Cp>
182
+ struct __weak_result_type<_Rp (_Cp::*)() const volatile>
183
+ : public __unary_function<const volatile _Cp*, _Rp>
184
+ {
185
+ };
186
+
187
+ // 2 argument case
188
+
189
+ template <class _Rp, class _A1, class _A2>
190
+ struct __weak_result_type<_Rp (_A1, _A2)>
191
+ : public __binary_function<_A1, _A2, _Rp>
192
+ {
193
+ };
194
+
195
+ template <class _Rp, class _A1, class _A2>
196
+ struct __weak_result_type<_Rp (*)(_A1, _A2)>
197
+ : public __binary_function<_A1, _A2, _Rp>
198
+ {
199
+ };
200
+
201
+ template <class _Rp, class _A1, class _A2>
202
+ struct __weak_result_type<_Rp (&)(_A1, _A2)>
203
+ : public __binary_function<_A1, _A2, _Rp>
204
+ {
205
+ };
206
+
207
+ template <class _Rp, class _Cp, class _A1>
208
+ struct __weak_result_type<_Rp (_Cp::*)(_A1)>
209
+ : public __binary_function<_Cp*, _A1, _Rp>
210
+ {
211
+ };
212
+
213
+ template <class _Rp, class _Cp, class _A1>
214
+ struct __weak_result_type<_Rp (_Cp::*)(_A1) const>
215
+ : public __binary_function<const _Cp*, _A1, _Rp>
216
+ {
217
+ };
218
+
219
+ template <class _Rp, class _Cp, class _A1>
220
+ struct __weak_result_type<_Rp (_Cp::*)(_A1) volatile>
221
+ : public __binary_function<volatile _Cp*, _A1, _Rp>
222
+ {
223
+ };
224
+
225
+ template <class _Rp, class _Cp, class _A1>
226
+ struct __weak_result_type<_Rp (_Cp::*)(_A1) const volatile>
227
+ : public __binary_function<const volatile _Cp*, _A1, _Rp>
228
+ {
229
+ };
230
+
231
+ // 3 or more arguments
232
+
233
+ template <class _Rp, class _A1, class _A2, class _A3, class ..._A4>
234
+ struct __weak_result_type<_Rp (_A1, _A2, _A3, _A4...)>
235
+ {
236
+ #if _LIBCUDACXX_STD_VER <= 17 || defined(_LIBCUDACXX_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS)
237
+ using result_type _LIBCUDACXX_NODEBUG_TYPE _LIBCUDACXX_DEPRECATED_IN_CXX17 = _Rp;
238
+ #endif
239
+ };
240
+
241
+ template <class _Rp, class _A1, class _A2, class _A3, class ..._A4>
242
+ struct __weak_result_type<_Rp (&)(_A1, _A2, _A3, _A4...)>
243
+ {
244
+ #if _LIBCUDACXX_STD_VER <= 17 || defined(_LIBCUDACXX_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS)
245
+ using result_type _LIBCUDACXX_NODEBUG_TYPE _LIBCUDACXX_DEPRECATED_IN_CXX17 = _Rp;
246
+ #endif
247
+ };
248
+
249
+ template <class _Rp, class _A1, class _A2, class _A3, class ..._A4>
250
+ struct __weak_result_type<_Rp (*)(_A1, _A2, _A3, _A4...)>
251
+ {
252
+ #if _LIBCUDACXX_STD_VER <= 17 || defined(_LIBCUDACXX_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS)
253
+ using result_type _LIBCUDACXX_NODEBUG_TYPE _LIBCUDACXX_DEPRECATED_IN_CXX17 = _Rp;
254
+ #endif
255
+ };
256
+
257
+ template <class _Rp, class _Cp, class _A1, class _A2, class ..._A3>
258
+ struct __weak_result_type<_Rp (_Cp::*)(_A1, _A2, _A3...)>
259
+ {
260
+ #if _LIBCUDACXX_STD_VER <= 17 || defined(_LIBCUDACXX_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS)
261
+ using result_type _LIBCUDACXX_NODEBUG_TYPE _LIBCUDACXX_DEPRECATED_IN_CXX17 = _Rp;
262
+ #endif
263
+ };
264
+
265
+ template <class _Rp, class _Cp, class _A1, class _A2, class ..._A3>
266
+ struct __weak_result_type<_Rp (_Cp::*)(_A1, _A2, _A3...) const>
267
+ {
268
+ #if _LIBCUDACXX_STD_VER <= 17 || defined(_LIBCUDACXX_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS)
269
+ using result_type _LIBCUDACXX_NODEBUG_TYPE _LIBCUDACXX_DEPRECATED_IN_CXX17 = _Rp;
270
+ #endif
271
+ };
272
+
273
+ template <class _Rp, class _Cp, class _A1, class _A2, class ..._A3>
274
+ struct __weak_result_type<_Rp (_Cp::*)(_A1, _A2, _A3...) volatile>
275
+ {
276
+ #if _LIBCUDACXX_STD_VER <= 17 || defined(_LIBCUDACXX_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS)
277
+ using result_type _LIBCUDACXX_NODEBUG_TYPE _LIBCUDACXX_DEPRECATED_IN_CXX17 = _Rp;
278
+ #endif
279
+ };
280
+
281
+ template <class _Rp, class _Cp, class _A1, class _A2, class ..._A3>
282
+ struct __weak_result_type<_Rp (_Cp::*)(_A1, _A2, _A3...) const volatile>
283
+ {
284
+ #if _LIBCUDACXX_STD_VER <= 17 || defined(_LIBCUDACXX_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS)
285
+ using result_type _LIBCUDACXX_NODEBUG_TYPE _LIBCUDACXX_DEPRECATED_IN_CXX17 = _Rp;
286
+ #endif
287
+ };
288
+
289
+ template <class _Tp, class ..._Args>
290
+ struct __invoke_return
291
+ {
292
+ typedef decltype(_CUDA_VSTD::__invoke(declval<_Tp>(), declval<_Args>()...)) type;
293
+ };
294
+
295
+ _LIBCUDACXX_END_NAMESPACE_STD
296
+
297
+ #endif // _LIBCUDACXX___FUNCTIONAL_WEAK_RESULT_TYPE_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/array.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===---------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===---------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___FWD_ARRAY_H
11
+ #define _LIBCUDACXX___FWD_ARRAY_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../cstddef"
18
+
19
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
20
+ #pragma GCC system_header
21
+ #endif
22
+
23
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
24
+
25
+ template <class _Tp, size_t _Size>
26
+ struct _LIBCUDACXX_TEMPLATE_VIS array;
27
+
28
+ _LIBCUDACXX_END_NAMESPACE_STD
29
+
30
+ #endif // _LIBCUDACXX___FWD_ARRAY_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/get.h ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___FWD_GET_H
11
+ #define _LIBCUDACXX___FWD_GET_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__fwd/array.h"
18
+ #include "../__fwd/pair.h"
19
+ #include "../__fwd/tuple.h"
20
+ #include "../__tuple_dir/tuple_element.h"
21
+ #include "../cstddef"
22
+
23
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
24
+ #pragma GCC system_header
25
+ #endif
26
+
27
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
28
+
29
+ #ifndef _LIBCUDACXX_CXX03_LANG
30
+
31
+ template <size_t _Ip, class ..._Tp>
32
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
33
+ typename tuple_element<_Ip, tuple<_Tp...> >::type&
34
+ get(tuple<_Tp...>&) _NOEXCEPT;
35
+
36
+ template <size_t _Ip, class ..._Tp>
37
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
38
+ const typename tuple_element<_Ip, tuple<_Tp...> >::type&
39
+ get(const tuple<_Tp...>&) _NOEXCEPT;
40
+
41
+ template <size_t _Ip, class ..._Tp>
42
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
43
+ typename tuple_element<_Ip, tuple<_Tp...> >::type&&
44
+ get(tuple<_Tp...>&&) _NOEXCEPT;
45
+
46
+ template <size_t _Ip, class ..._Tp>
47
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
48
+ const typename tuple_element<_Ip, tuple<_Tp...> >::type&&
49
+ get(const tuple<_Tp...>&&) _NOEXCEPT;
50
+
51
+ #endif // _LIBCUDACXX_CXX03_LANG
52
+
53
+ template <size_t _Ip, class _T1, class _T2>
54
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
55
+ typename tuple_element<_Ip, pair<_T1, _T2> >::type&
56
+ get(pair<_T1, _T2>&) _NOEXCEPT;
57
+
58
+ template <size_t _Ip, class _T1, class _T2>
59
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
60
+ const typename tuple_element<_Ip, pair<_T1, _T2> >::type&
61
+ get(const pair<_T1, _T2>&) _NOEXCEPT;
62
+
63
+ #ifndef _LIBCUDACXX_CXX03_LANG
64
+ template <size_t _Ip, class _T1, class _T2>
65
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
66
+ typename tuple_element<_Ip, pair<_T1, _T2> >::type&&
67
+ get(pair<_T1, _T2>&&) _NOEXCEPT;
68
+
69
+ template <size_t _Ip, class _T1, class _T2>
70
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
71
+ const typename tuple_element<_Ip, pair<_T1, _T2> >::type&&
72
+ get(const pair<_T1, _T2>&&) _NOEXCEPT;
73
+ #endif // _LIBCUDACXX_CXX03_LANG
74
+
75
+ template <size_t _Ip, class _Tp, size_t _Size>
76
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
77
+ _Tp&
78
+ get(array<_Tp, _Size>&) _NOEXCEPT;
79
+
80
+ template <size_t _Ip, class _Tp, size_t _Size>
81
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
82
+ const _Tp&
83
+ get(const array<_Tp, _Size>&) _NOEXCEPT;
84
+
85
+ #ifndef _LIBCUDACXX_CXX03_LANG
86
+ template <size_t _Ip, class _Tp, size_t _Size>
87
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
88
+ _Tp&&
89
+ get(array<_Tp, _Size>&&) _NOEXCEPT;
90
+
91
+ template <size_t _Ip, class _Tp, size_t _Size>
92
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
93
+ const _Tp&&
94
+ get(const array<_Tp, _Size>&&) _NOEXCEPT;
95
+ #endif // _LIBCUDACXX_CXX03_LANG
96
+
97
+ _LIBCUDACXX_END_NAMESPACE_STD
98
+
99
+ #endif // _LIBCUDACXX___FWD_GET_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/hash.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===---------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===---------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___FWD_HASH_H
11
+ #define _LIBCUDACXX___FWD_HASH_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
18
+ #pragma GCC system_header
19
+ #endif
20
+
21
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
22
+
23
+ template <class>
24
+ struct _LIBCUDACXX_TEMPLATE_VIS hash;
25
+
26
+ _LIBCUDACXX_END_NAMESPACE_STD
27
+
28
+ #endif // _LIBCUDACXX___FWD_HASH_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/memory_resource.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___FWD_MEMORY_RESOURCE_H
11
+ #define _LIBCUDACXX___FWD_MEMORY_RESOURCE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
18
+ #pragma GCC system_header
19
+ #endif
20
+
21
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
22
+
23
+ namespace pmr {
24
+ template <class _ValueType>
25
+ class _LIBCUDACXX_TEMPLATE_VIS polymorphic_allocator;
26
+ } // namespace pmr
27
+
28
+ _LIBCUDACXX_END_NAMESPACE_STD
29
+
30
+ #endif // _LIBCUDACXX___FWD_MEMORY_RESOURCE_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/pair.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===---------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===---------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___FWD_PAIR_H
11
+ #define _LIBCUDACXX___FWD_PAIR_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
18
+ #pragma GCC system_header
19
+ #endif
20
+
21
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
22
+
23
+ template <class, class>
24
+ struct _LIBCUDACXX_TEMPLATE_VIS pair;
25
+
26
+ _LIBCUDACXX_END_NAMESPACE_STD
27
+
28
+ #endif // _LIBCUDACXX___FWD_PAIR_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/span.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===---------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===---------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___FWD_SPAN_H
12
+ #define _LIBCUDACXX___FWD_SPAN_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../cstddef"
19
+
20
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
21
+ #pragma GCC system_header
22
+ #endif
23
+
24
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
25
+
26
+ #if _LIBCUDACXX_STD_VER > 11
27
+
28
+ _LIBCUDACXX_INLINE_VAR constexpr size_t dynamic_extent = static_cast<size_t>(-1);
29
+ template <typename _Tp, size_t _Extent = dynamic_extent> class span;
30
+
31
+ #endif
32
+
33
+ _LIBCUDACXX_END_NAMESPACE_STD
34
+
35
+
36
+ #endif // _LIBCUDACXX___FWD_SPAN_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/string.h ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___FWD_STRING_H
11
+ #define _LIBCUDACXX___FWD_STRING_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__fwd/memory_resource.h"
18
+
19
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
20
+ #pragma GCC system_header
21
+ #endif
22
+
23
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
24
+
25
+ template <class _CharT>
26
+ struct _LIBCUDACXX_TEMPLATE_VIS char_traits;
27
+ template <>
28
+ struct char_traits<char>;
29
+
30
+ #ifndef _LIBCUDACXX_NO_HAS_CHAR8_T
31
+ template <>
32
+ struct char_traits<char8_t>;
33
+ #endif
34
+
35
+ template <>
36
+ struct char_traits<char16_t>;
37
+ template <>
38
+ struct char_traits<char32_t>;
39
+
40
+ #ifndef _LIBCUDACXX_HAS_NO_WIDE_CHARACTERS
41
+ template <>
42
+ struct char_traits<wchar_t>;
43
+ #endif
44
+
45
+ template <class _Tp>
46
+ class _LIBCUDACXX_TEMPLATE_VIS allocator;
47
+
48
+ template <class _CharT, class _Traits = char_traits<_CharT>, class _Allocator = allocator<_CharT> >
49
+ class _LIBCUDACXX_TEMPLATE_VIS basic_string;
50
+
51
+ using string = basic_string<char>;
52
+
53
+ #ifndef _LIBCUDACXX_HAS_NO_WIDE_CHARACTERS
54
+ using wstring = basic_string<wchar_t>;
55
+ #endif
56
+
57
+ #ifndef _LIBCUDACXX_NO_HAS_CHAR8_T
58
+ using u8string = basic_string<char8_t>;
59
+ #endif
60
+
61
+ using u16string = basic_string<char16_t>;
62
+ using u32string = basic_string<char32_t>;
63
+
64
+ #if _LIBCUDACXX_STD_VER >= 17
65
+
66
+ namespace pmr {
67
+ template <class _CharT, class _Traits = char_traits<_CharT>>
68
+ using basic_string = std::basic_string<_CharT, _Traits, polymorphic_allocator<_CharT>>;
69
+
70
+ using string = basic_string<char>;
71
+
72
+ # ifndef _LIBCUDACXX_HAS_NO_WIDE_CHARACTERS
73
+ using wstring = basic_string<wchar_t>;
74
+ # endif
75
+
76
+ # ifndef _LIBCUDACXX_NO_HAS_CHAR8_T
77
+ using u8string = basic_string<char8_t>;
78
+ # endif
79
+
80
+ using u16string = basic_string<char16_t>;
81
+ using u32string = basic_string<char32_t>;
82
+
83
+ } // namespace pmr
84
+
85
+ #endif // _LIBCUDACXX_STD_VER >= 17
86
+
87
+ // clang-format off
88
+ template <class _CharT, class _Traits, class _Allocator>
89
+ class _LIBCUDACXX_PREFERRED_NAME(string)
90
+ #ifndef _LIBCUDACXX_HAS_NO_WIDE_CHARACTERS
91
+ _LIBCUDACXX_PREFERRED_NAME(wstring)
92
+ #endif
93
+ #ifndef _LIBCUDACXX_NO_HAS_CHAR8_T
94
+ _LIBCUDACXX_PREFERRED_NAME(u8string)
95
+ #endif
96
+ _LIBCUDACXX_PREFERRED_NAME(u16string)
97
+ _LIBCUDACXX_PREFERRED_NAME(u32string)
98
+ #if _LIBCUDACXX_STD_VER >= 17
99
+ _LIBCUDACXX_PREFERRED_NAME(pmr::string)
100
+ # ifndef _LIBCUDACXX_HAS_NO_WIDE_CHARACTERS
101
+ _LIBCUDACXX_PREFERRED_NAME(pmr::wstring)
102
+ # endif
103
+ # ifndef _LIBCUDACXX_NO_HAS_CHAR8_T
104
+ _LIBCUDACXX_PREFERRED_NAME(pmr::u8string)
105
+ # endif
106
+ _LIBCUDACXX_PREFERRED_NAME(pmr::u16string)
107
+ _LIBCUDACXX_PREFERRED_NAME(pmr::u32string)
108
+ #endif
109
+ basic_string;
110
+ // clang-format on
111
+
112
+ _LIBCUDACXX_END_NAMESPACE_STD
113
+
114
+ #endif // _LIBCUDACXX___FWD_STRING_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/string_view.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===---------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===---------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___FWD_STRING_VIEW_H
12
+ #define _LIBCUDACXX___FWD_STRING_VIEW_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__fwd/string.h"
19
+
20
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
21
+ #pragma GCC system_header
22
+ #endif
23
+
24
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
25
+
26
+ template<class _CharT, class _Traits = char_traits<_CharT> >
27
+ class _LIBCUDACXX_TEMPLATE_VIS basic_string_view;
28
+
29
+ typedef basic_string_view<char> string_view;
30
+ #ifndef _LIBCUDACXX_NO_HAS_CHAR8_T
31
+ typedef basic_string_view<char8_t> u8string_view;
32
+ #endif
33
+ typedef basic_string_view<char16_t> u16string_view;
34
+ typedef basic_string_view<char32_t> u32string_view;
35
+ #ifndef _LIBCUDACXX_HAS_NO_WIDE_CHARACTERS
36
+ typedef basic_string_view<wchar_t> wstring_view;
37
+ #endif
38
+
39
+ // clang-format off
40
+ template <class _CharT, class _Traits>
41
+ class _LIBCUDACXX_PREFERRED_NAME(string_view)
42
+ #ifndef _LIBCUDACXX_HAS_NO_WIDE_CHARACTERS
43
+ _LIBCUDACXX_PREFERRED_NAME(wstring_view)
44
+ #endif
45
+ #ifndef _LIBCUDACXX_NO_HAS_CHAR8_T
46
+ _LIBCUDACXX_PREFERRED_NAME(u8string_view)
47
+ #endif
48
+ _LIBCUDACXX_PREFERRED_NAME(u16string_view)
49
+ _LIBCUDACXX_PREFERRED_NAME(u32string_view)
50
+ basic_string_view;
51
+ // clang-format on
52
+ _LIBCUDACXX_END_NAMESPACE_STD
53
+
54
+ #endif // _LIBCUDACXX___FWD_STRING_VIEW_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__fwd/tuple.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===---------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===---------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___FWD_TUPLE_H
11
+ #define _LIBCUDACXX___FWD_TUPLE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
18
+ #pragma GCC system_header
19
+ #endif
20
+
21
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
22
+
23
+ #ifndef _LIBCUDACXX_CXX03_LANG
24
+
25
+ template <class...>
26
+ class _LIBCUDACXX_TEMPLATE_VIS tuple;
27
+
28
+ #endif // _LIBCUDACXX_CXX03_LANG
29
+
30
+ _LIBCUDACXX_END_NAMESPACE_STD
31
+
32
+ #endif // _LIBCUDACXX___FWD_TUPLE_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/data.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___ITERATOR_DATA_H
12
+ #define _LIBCUDACXX___ITERATOR_DATA_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../cstddef"
19
+ #include "../initializer_list"
20
+
21
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
22
+ #pragma GCC system_header
23
+ #endif
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ #if _LIBCUDACXX_STD_VER > 11
28
+
29
+ template <class _Cont> constexpr
30
+ _LIBCUDACXX_INLINE_VISIBILITY
31
+ auto data(_Cont& __c)
32
+ _NOEXCEPT_(noexcept(__c.data()))
33
+ -> decltype (__c.data())
34
+ { return __c.data(); }
35
+
36
+ template <class _Cont> constexpr
37
+ _LIBCUDACXX_INLINE_VISIBILITY
38
+ auto data(const _Cont& __c)
39
+ _NOEXCEPT_(noexcept(__c.data()))
40
+ -> decltype (__c.data())
41
+ { return __c.data(); }
42
+
43
+ template <class _Tp, size_t _Sz>
44
+ _LIBCUDACXX_INLINE_VISIBILITY
45
+ constexpr _Tp* data(_Tp (&__array)[_Sz]) noexcept { return __array; }
46
+
47
+ template <class _Ep>
48
+ _LIBCUDACXX_INLINE_VISIBILITY
49
+ constexpr const _Ep* data(initializer_list<_Ep> __il) noexcept { return __il.begin(); }
50
+
51
+ #endif
52
+
53
+ _LIBCUDACXX_END_NAMESPACE_STD
54
+
55
+ #endif // _LIBCUDACXX___ITERATOR_DATA_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/empty.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___ITERATOR_EMPTY_H
12
+ #define _LIBCUDACXX___ITERATOR_EMPTY_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../cstddef"
19
+ #include "../initializer_list"
20
+
21
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
22
+ #pragma GCC system_header
23
+ #endif
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ #if _LIBCUDACXX_STD_VER > 11
28
+
29
+ template <class _Cont>
30
+ _LIBCUDACXX_NODISCARD_AFTER_CXX17 _LIBCUDACXX_INLINE_VISIBILITY
31
+ constexpr auto empty(const _Cont& __c)
32
+ _NOEXCEPT_(noexcept(__c.empty()))
33
+ -> decltype (__c.empty())
34
+ { return __c.empty(); }
35
+
36
+ template <class _Tp, size_t _Sz>
37
+ _LIBCUDACXX_NODISCARD_AFTER_CXX17 _LIBCUDACXX_INLINE_VISIBILITY
38
+ constexpr bool empty(const _Tp (&)[_Sz]) noexcept { return false; }
39
+
40
+ template <class _Ep>
41
+ _LIBCUDACXX_NODISCARD_AFTER_CXX17 _LIBCUDACXX_INLINE_VISIBILITY
42
+ constexpr bool empty(initializer_list<_Ep> __il) noexcept { return __il.size() == 0; }
43
+
44
+ #endif // _LIBCUDACXX_STD_VER > 17
45
+
46
+ _LIBCUDACXX_END_NAMESPACE_STD
47
+
48
+ #endif // _LIBCUDACXX___ITERATOR_EMPTY_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/erase_if_container.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___ITERATOR_ERASE_IF_CONTAINER_H
12
+ #define _LIBCUDACXX___ITERATOR_ERASE_IF_CONTAINER_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
19
+ #pragma GCC system_header
20
+ #endif
21
+
22
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
23
+
24
+ template <class _Container, class _Predicate>
25
+ _LIBCUDACXX_INLINE_VISIBILITY
26
+ typename _Container::size_type
27
+ __libcpp_erase_if_container(_Container& __c, _Predicate& __pred) {
28
+ typename _Container::size_type __old_size = __c.size();
29
+
30
+ const typename _Container::iterator __last = __c.end();
31
+ for (typename _Container::iterator __iter = __c.begin(); __iter != __last;) {
32
+ if (__pred(*__iter))
33
+ __iter = __c.erase(__iter);
34
+ else
35
+ ++__iter;
36
+ }
37
+
38
+ return __old_size - __c.size();
39
+ }
40
+
41
+ _LIBCUDACXX_END_NAMESPACE_STD
42
+
43
+ #endif // _LIBCUDACXX___ITERATOR_ERASE_IF_CONTAINER_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/front_insert_iterator.h ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___ITERATOR_FRONT_INSERT_ITERATOR_H
12
+ #define _LIBCUDACXX___ITERATOR_FRONT_INSERT_ITERATOR_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__iterator/iterator.h"
19
+ #include "../__iterator/iterator_traits.h"
20
+ #include "../__memory/addressof.h"
21
+ #include "../__utility/move.h"
22
+ #include "../cstddef"
23
+
24
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
25
+ #pragma GCC system_header
26
+ #endif
27
+
28
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
29
+
30
+ _LIBCUDACXX_SUPPRESS_DEPRECATED_PUSH
31
+ template <class _Container>
32
+ class _LIBCUDACXX_TEMPLATE_VIS front_insert_iterator
33
+ #if _LIBCUDACXX_STD_VER <= 14 || !defined(_LIBCUDACXX_ABI_NO_ITERATOR_BASES)
34
+ : public iterator<output_iterator_tag,
35
+ void,
36
+ void,
37
+ void,
38
+ void>
39
+ #endif
40
+ {
41
+ _LIBCUDACXX_SUPPRESS_DEPRECATED_POP
42
+ protected:
43
+ _Container* container;
44
+ public:
45
+ typedef _Container container_type;
46
+
47
+ _LIBCUDACXX_INLINE_VISIBILITY explicit front_insert_iterator(_Container& __x) : container(_CUDA_VSTD::addressof(__x)) {}
48
+ _LIBCUDACXX_INLINE_VISIBILITY front_insert_iterator& operator=(const typename _Container::value_type& __value_)
49
+ {container->push_front(__value_); return *this;}
50
+ #ifndef _LIBCUDACXX_CXX03_LANG
51
+ _LIBCUDACXX_INLINE_VISIBILITY front_insert_iterator& operator=(typename _Container::value_type&& __value_)
52
+ {container->push_front(_CUDA_VSTD::move(__value_)); return *this;}
53
+ #endif // _LIBCUDACXX_CXX03_LANG
54
+ _LIBCUDACXX_INLINE_VISIBILITY front_insert_iterator& operator*() {return *this;}
55
+ _LIBCUDACXX_INLINE_VISIBILITY front_insert_iterator& operator++() {return *this;}
56
+ _LIBCUDACXX_INLINE_VISIBILITY front_insert_iterator operator++(int) {return *this;}
57
+ };
58
+
59
+ template <class _Container>
60
+ inline _LIBCUDACXX_INLINE_VISIBILITY
61
+ front_insert_iterator<_Container>
62
+ front_inserter(_Container& __x)
63
+ {
64
+ return front_insert_iterator<_Container>(__x);
65
+ }
66
+
67
+ _LIBCUDACXX_END_NAMESPACE_STD
68
+
69
+ #endif // _LIBCUDACXX___ITERATOR_FRONT_INSERT_ITERATOR_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/insert_iterator.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___ITERATOR_INSERT_ITERATOR_H
12
+ #define _LIBCUDACXX___ITERATOR_INSERT_ITERATOR_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__iterator/iterator_traits.h"
19
+ #include "../__iterator/iterator.h"
20
+ #include "../__memory/addressof.h"
21
+ #include "../__utility/move.h"
22
+ #include "../cstddef"
23
+
24
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
25
+ #pragma GCC system_header
26
+ #endif
27
+
28
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
29
+
30
+ _LIBCUDACXX_SUPPRESS_DEPRECATED_PUSH
31
+ template <class _Container>
32
+ class _LIBCUDACXX_TEMPLATE_VIS insert_iterator
33
+ #if _LIBCUDACXX_STD_VER <= 14 || !defined(_LIBCUDACXX_ABI_NO_ITERATOR_BASES)
34
+ : public iterator<output_iterator_tag,
35
+ void,
36
+ void,
37
+ void,
38
+ void>
39
+ #endif
40
+ {
41
+ _LIBCUDACXX_SUPPRESS_DEPRECATED_POP
42
+ protected:
43
+ _Container* container;
44
+ typename _Container::iterator iter;
45
+ public:
46
+ typedef _Container container_type;
47
+
48
+ _LIBCUDACXX_INLINE_VISIBILITY insert_iterator(_Container& __x, typename _Container::iterator __i)
49
+ : container(_CUDA_VSTD::addressof(__x)), iter(__i) {}
50
+ _LIBCUDACXX_INLINE_VISIBILITY insert_iterator& operator=(const typename _Container::value_type& __value_)
51
+ {iter = container->insert(iter, __value_); ++iter; return *this;}
52
+ #ifndef _LIBCUDACXX_CXX03_LANG
53
+ _LIBCUDACXX_INLINE_VISIBILITY insert_iterator& operator=(typename _Container::value_type&& __value_)
54
+ {iter = container->insert(iter, _CUDA_VSTD::move(__value_)); ++iter; return *this;}
55
+ #endif // _LIBCUDACXX_CXX03_LANG
56
+ _LIBCUDACXX_INLINE_VISIBILITY insert_iterator& operator*() {return *this;}
57
+ _LIBCUDACXX_INLINE_VISIBILITY insert_iterator& operator++() {return *this;}
58
+ _LIBCUDACXX_INLINE_VISIBILITY insert_iterator& operator++(int) {return *this;}
59
+ };
60
+
61
+ template <class _Container>
62
+ inline _LIBCUDACXX_INLINE_VISIBILITY
63
+ insert_iterator<_Container>
64
+ inserter(_Container& __x, typename _Container::iterator __i)
65
+ {
66
+ return insert_iterator<_Container>(__x, __i);
67
+ }
68
+
69
+ _LIBCUDACXX_END_NAMESPACE_STD
70
+
71
+ #endif // _LIBCUDACXX___ITERATOR_INSERT_ITERATOR_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/next.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___ITERATOR_NEXT_H
12
+ #define _LIBCUDACXX___ITERATOR_NEXT_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__assert"
19
+ #include "../__iterator/advance.h"
20
+ #include "../__iterator/iterator_traits.h"
21
+ #include "../__type_traits/enable_if.h"
22
+
23
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
24
+ #pragma GCC system_header
25
+ #endif
26
+
27
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
28
+
29
+ template <class _InputIter>
30
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
31
+ __enable_if_t
32
+ <
33
+ __is_cpp17_input_iterator<_InputIter>::value,
34
+ _InputIter
35
+ >
36
+ next(_InputIter __x,
37
+ typename iterator_traits<_InputIter>::difference_type __n = 1)
38
+ {
39
+ _LIBCUDACXX_ASSERT(__n >= 0 || __is_cpp17_bidirectional_iterator<_InputIter>::value,
40
+ "Attempt to next(it, -n) on a non-bidi iterator");
41
+
42
+ _CUDA_VSTD::advance(__x, __n);
43
+ return __x;
44
+ }
45
+
46
+ _LIBCUDACXX_END_NAMESPACE_STD
47
+
48
+ #endif // _LIBCUDACXX___ITERATOR_NEXT_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/ostreambuf_iterator.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___ITERATOR_OSTREAMBUF_ITERATOR_H
12
+ #define _LIBCUDACXX___ITERATOR_OSTREAMBUF_ITERATOR_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__iterator/iterator.h"
19
+ #include "../__iterator/iterator_traits.h"
20
+ #include "../cstddef"
21
+ #include "../iosfwd"
22
+
23
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
24
+ #pragma GCC system_header
25
+ #endif
26
+
27
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
28
+
29
+ _LIBCUDACXX_SUPPRESS_DEPRECATED_PUSH
30
+ template <class _CharT, class _Traits>
31
+ class _LIBCUDACXX_TEMPLATE_VIS ostreambuf_iterator
32
+ #if _LIBCUDACXX_STD_VER <= 14 || !defined(_LIBCUDACXX_ABI_NO_ITERATOR_BASES)
33
+ : public iterator<output_iterator_tag, void, void, void, void>
34
+ #endif
35
+ {
36
+ _LIBCUDACXX_SUPPRESS_DEPRECATED_POP
37
+ public:
38
+ typedef _CharT char_type;
39
+ typedef _Traits traits_type;
40
+ typedef basic_streambuf<_CharT,_Traits> streambuf_type;
41
+ typedef basic_ostream<_CharT,_Traits> ostream_type;
42
+ private:
43
+ streambuf_type* __sbuf_;
44
+ public:
45
+ _LIBCUDACXX_INLINE_VISIBILITY ostreambuf_iterator(ostream_type& __s) _NOEXCEPT
46
+ : __sbuf_(__s.rdbuf()) {}
47
+ _LIBCUDACXX_INLINE_VISIBILITY ostreambuf_iterator(streambuf_type* __s) _NOEXCEPT
48
+ : __sbuf_(__s) {}
49
+ _LIBCUDACXX_INLINE_VISIBILITY ostreambuf_iterator& operator=(_CharT __c)
50
+ {
51
+ if (__sbuf_ && traits_type::eq_int_type(__sbuf_->sputc(__c), traits_type::eof()))
52
+ __sbuf_ = 0;
53
+ return *this;
54
+ }
55
+ _LIBCUDACXX_INLINE_VISIBILITY ostreambuf_iterator& operator*() {return *this;}
56
+ _LIBCUDACXX_INLINE_VISIBILITY ostreambuf_iterator& operator++() {return *this;}
57
+ _LIBCUDACXX_INLINE_VISIBILITY ostreambuf_iterator& operator++(int) {return *this;}
58
+ _LIBCUDACXX_INLINE_VISIBILITY bool failed() const _NOEXCEPT {return __sbuf_ == 0;}
59
+
60
+ template <class _Ch, class _Tr>
61
+ friend
62
+ _LIBCUDACXX_HIDDEN _LIBCUDACXX_HOST_DEVICE
63
+ ostreambuf_iterator<_Ch, _Tr>
64
+ __pad_and_output(ostreambuf_iterator<_Ch, _Tr> __s,
65
+ const _Ch* __ob, const _Ch* __op, const _Ch* __oe,
66
+ ios_base& __iob, _Ch __fl);
67
+ };
68
+
69
+ _LIBCUDACXX_END_NAMESPACE_STD
70
+
71
+ #endif // _LIBCUDACXX___ITERATOR_OSTREAMBUF_ITERATOR_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/prev.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___ITERATOR_PREV_H
12
+ #define _LIBCUDACXX___ITERATOR_PREV_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__assert"
19
+ #include "../__iterator/advance.h"
20
+ #include "../__iterator/iterator_traits.h"
21
+ #include "../__type_traits/enable_if.h"
22
+
23
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
24
+ #pragma GCC system_header
25
+ #endif
26
+
27
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
28
+
29
+ template <class _InputIter>
30
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
31
+ __enable_if_t
32
+ <
33
+ __is_cpp17_input_iterator<_InputIter>::value,
34
+ _InputIter
35
+ >
36
+ prev(_InputIter __x,
37
+ typename iterator_traits<_InputIter>::difference_type __n = 1)
38
+ {
39
+ _LIBCUDACXX_ASSERT(__n <= 0 || __is_cpp17_bidirectional_iterator<_InputIter>::value,
40
+ "Attempt to prev(it, +n) on a non-bidi iterator");
41
+ _CUDA_VSTD::advance(__x, -__n);
42
+ return __x;
43
+ }
44
+
45
+ _LIBCUDACXX_END_NAMESPACE_STD
46
+
47
+ #endif // _LIBCUDACXX___ITERATOR_PREV_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__iterator/reverse_iterator.h ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___ITERATOR_REVERSE_ITERATOR_H
12
+ #define _LIBCUDACXX___ITERATOR_REVERSE_ITERATOR_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__iterator/iterator.h"
19
+ #include "../__iterator/iterator_traits.h"
20
+ #include "../__memory/addressof.h"
21
+ #include "../__type_traits/void_t.h"
22
+
23
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
24
+ #pragma GCC system_header
25
+ #endif
26
+
27
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
28
+
29
+ template <class _Tp, class = void>
30
+ struct __is_cpp17_stashing_iterator : false_type {};
31
+
32
+ template <class _Tp>
33
+ struct __is_cpp17_stashing_iterator<_Tp, __void_t<typename _Tp::__stashing_iterator_tag>>
34
+ : true_type {};
35
+
36
+ _LIBCUDACXX_SUPPRESS_DEPRECATED_PUSH
37
+ template <class _Iter>
38
+ class _LIBCUDACXX_TEMPLATE_VIS reverse_iterator
39
+ #if _LIBCUDACXX_STD_VER <= 14 || !defined(_LIBCUDACXX_ABI_NO_ITERATOR_BASES)
40
+ : public iterator<typename iterator_traits<_Iter>::iterator_category,
41
+ typename iterator_traits<_Iter>::value_type,
42
+ typename iterator_traits<_Iter>::difference_type,
43
+ typename iterator_traits<_Iter>::pointer,
44
+ typename iterator_traits<_Iter>::reference>
45
+ #endif
46
+ {
47
+ _LIBCUDACXX_SUPPRESS_DEPRECATED_POP
48
+ private:
49
+ /*mutable*/ _Iter __t; // no longer used as of LWG #2360, not removed due to ABI break
50
+
51
+ static_assert(!__is_cpp17_stashing_iterator<_Iter>::value,
52
+ "The specified iterator type cannot be used with reverse_iterator; "
53
+ "Using stashing iterators with reverse_iterator causes undefined behavior");
54
+
55
+ protected:
56
+ _Iter current;
57
+ public:
58
+ typedef _Iter iterator_type;
59
+ typedef typename iterator_traits<_Iter>::difference_type difference_type;
60
+ typedef typename iterator_traits<_Iter>::reference reference;
61
+ typedef typename iterator_traits<_Iter>::pointer pointer;
62
+
63
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
64
+ reverse_iterator() : __t(), current() {}
65
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
66
+ explicit reverse_iterator(_Iter __x) : __t(__x), current(__x) {}
67
+ template <class _Up>
68
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
69
+ reverse_iterator(const reverse_iterator<_Up>& __u) : __t(__u.base()), current(__u.base()) {}
70
+ template <class _Up>
71
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
72
+ reverse_iterator& operator=(const reverse_iterator<_Up>& __u)
73
+ { __t = current = __u.base(); return *this; }
74
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
75
+ _Iter base() const {return current;}
76
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
77
+ reference operator*() const {_Iter __tmp = current; return *--__tmp;}
78
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
79
+ pointer operator->() const {return _CUDA_VSTD::addressof(operator*());}
80
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
81
+ reverse_iterator& operator++() {--current; return *this;}
82
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
83
+ reverse_iterator operator++(int) {reverse_iterator __tmp(*this); --current; return __tmp;}
84
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
85
+ reverse_iterator& operator--() {++current; return *this;}
86
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
87
+ reverse_iterator operator--(int) {reverse_iterator __tmp(*this); ++current; return __tmp;}
88
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
89
+ reverse_iterator operator+ (difference_type __n) const {return reverse_iterator(current - __n);}
90
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
91
+ reverse_iterator& operator+=(difference_type __n) {current -= __n; return *this;}
92
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
93
+ reverse_iterator operator- (difference_type __n) const {return reverse_iterator(current + __n);}
94
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
95
+ reverse_iterator& operator-=(difference_type __n) {current += __n; return *this;}
96
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
97
+ reference operator[](difference_type __n) const {return *(*this + __n);}
98
+ };
99
+
100
+ template <class _Iter1, class _Iter2>
101
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
102
+ bool
103
+ operator==(const reverse_iterator<_Iter1>& __x, const reverse_iterator<_Iter2>& __y)
104
+ {
105
+ return __x.base() == __y.base();
106
+ }
107
+
108
+ template <class _Iter1, class _Iter2>
109
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
110
+ bool
111
+ operator<(const reverse_iterator<_Iter1>& __x, const reverse_iterator<_Iter2>& __y)
112
+ {
113
+ return __x.base() > __y.base();
114
+ }
115
+
116
+ template <class _Iter1, class _Iter2>
117
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
118
+ bool
119
+ operator!=(const reverse_iterator<_Iter1>& __x, const reverse_iterator<_Iter2>& __y)
120
+ {
121
+ return __x.base() != __y.base();
122
+ }
123
+
124
+ template <class _Iter1, class _Iter2>
125
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
126
+ bool
127
+ operator>(const reverse_iterator<_Iter1>& __x, const reverse_iterator<_Iter2>& __y)
128
+ {
129
+ return __x.base() < __y.base();
130
+ }
131
+
132
+ template <class _Iter1, class _Iter2>
133
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
134
+ bool
135
+ operator>=(const reverse_iterator<_Iter1>& __x, const reverse_iterator<_Iter2>& __y)
136
+ {
137
+ return __x.base() <= __y.base();
138
+ }
139
+
140
+ template <class _Iter1, class _Iter2>
141
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
142
+ bool
143
+ operator<=(const reverse_iterator<_Iter1>& __x, const reverse_iterator<_Iter2>& __y)
144
+ {
145
+ return __x.base() >= __y.base();
146
+ }
147
+
148
+ #ifndef _LIBCUDACXX_CXX03_LANG
149
+ template <class _Iter1, class _Iter2>
150
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
151
+ auto
152
+ operator-(const reverse_iterator<_Iter1>& __x, const reverse_iterator<_Iter2>& __y)
153
+ -> decltype(__y.base() - __x.base())
154
+ {
155
+ return __y.base() - __x.base();
156
+ }
157
+ #else
158
+ template <class _Iter1, class _Iter2>
159
+ inline _LIBCUDACXX_INLINE_VISIBILITY
160
+ typename reverse_iterator<_Iter1>::difference_type
161
+ operator-(const reverse_iterator<_Iter1>& __x, const reverse_iterator<_Iter2>& __y)
162
+ {
163
+ return __y.base() - __x.base();
164
+ }
165
+ #endif
166
+
167
+ template <class _Iter>
168
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
169
+ reverse_iterator<_Iter>
170
+ operator+(typename reverse_iterator<_Iter>::difference_type __n, const reverse_iterator<_Iter>& __x)
171
+ {
172
+ return reverse_iterator<_Iter>(__x.base() - __n);
173
+ }
174
+
175
+ #if _LIBCUDACXX_STD_VER > 11
176
+ template <class _Iter>
177
+ inline _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX14
178
+ reverse_iterator<_Iter> make_reverse_iterator(_Iter __i)
179
+ {
180
+ return reverse_iterator<_Iter>(__i);
181
+ }
182
+ #endif
183
+
184
+ _LIBCUDACXX_END_NAMESPACE_STD
185
+
186
+ #endif // _LIBCUDACXX___ITERATOR_REVERSE_ITERATOR_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/compressed_pair.h ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ //@HEADER
3
+ // ************************************************************************
4
+ //
5
+ // Kokkos v. 2.0
6
+ // Copyright (2019) Sandia Corporation
7
+ //
8
+ // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9
+ // the U.S. Government retains certain rights in this software.
10
+ //
11
+ // Redistribution and use in source and binary forms, with or without
12
+ // modification, are permitted provided that the following conditions are
13
+ // met:
14
+ //
15
+ // 1. Redistributions of source code must retain the above copyright
16
+ // notice, this list of conditions and the following disclaimer.
17
+ //
18
+ // 2. Redistributions in binary form must reproduce the above copyright
19
+ // notice, this list of conditions and the following disclaimer in the
20
+ // documentation and/or other materials provided with the distribution.
21
+ //
22
+ // 3. Neither the name of the Corporation nor the names of the
23
+ // contributors may be used to endorse or promote products derived from
24
+ // this software without specific prior written permission.
25
+ //
26
+ // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
+ //
38
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
39
+ //
40
+ // ************************************************************************
41
+ //@HEADER
42
+ */
43
+
44
+ #ifndef _LIBCUDACXX___MDSPAN_COMPRESSED_PAIR_HPP
45
+ #define _LIBCUDACXX___MDSPAN_COMPRESSED_PAIR_HPP
46
+
47
+ #ifndef __cuda_std__
48
+ #include <__config>
49
+ #endif // __cuda_std__
50
+
51
+ #include "../__mdspan/macros.h"
52
+ #ifdef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
53
+ #include "../__mdspan/no_unique_address.h"
54
+ #endif
55
+ #include "../__type_traits/enable_if.h"
56
+ #include "../__type_traits/is_empty.h"
57
+
58
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
59
+ #pragma GCC system_header
60
+ #endif
61
+
62
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
63
+
64
+ #if _LIBCUDACXX_STD_VER > 11
65
+
66
+ namespace __detail {
67
+
68
+ // For no unique address emulation, this is the case taken when neither are empty.
69
+ // For real `[[no_unique_address]]`, this case is always taken.
70
+ template <class _Tp, class _Up, class _Enable = void> struct __compressed_pair {
71
+ _LIBCUDACXX_NO_UNIQUE_ADDRESS _Tp __t_val;
72
+ _LIBCUDACXX_NO_UNIQUE_ADDRESS _Up __u_val;
73
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp &__first() noexcept { return __t_val; }
74
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp const &__first() const noexcept {
75
+ return __t_val;
76
+ }
77
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up &__second() noexcept { return __u_val; }
78
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up const &__second() const noexcept {
79
+ return __u_val;
80
+ }
81
+
82
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
83
+ constexpr __compressed_pair() noexcept = default;
84
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
85
+ constexpr __compressed_pair(__compressed_pair const &) noexcept = default;
86
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
87
+ constexpr __compressed_pair(__compressed_pair &&) noexcept = default;
88
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
89
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
90
+ operator=(__compressed_pair const &) noexcept = default;
91
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
92
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
93
+ operator=(__compressed_pair &&) noexcept = default;
94
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
95
+ ~__compressed_pair() noexcept = default;
96
+ template <class _TLike, class _ULike>
97
+ __MDSPAN_INLINE_FUNCTION constexpr __compressed_pair(_TLike &&__t, _ULike &&__u)
98
+ : __t_val((_TLike &&) __t), __u_val((_ULike &&) __u) {}
99
+ };
100
+
101
+ #ifdef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
102
+
103
+ // First empty.
104
+ template <class _Tp, class _Up>
105
+ struct __compressed_pair<
106
+ _Tp, _Up,
107
+ _CUDA_VSTD::enable_if_t<_LIBCUDACXX_TRAIT(_CUDA_VSTD::is_empty, _Tp) && !_LIBCUDACXX_TRAIT(_CUDA_VSTD::is_empty, _Up)>>
108
+ : private _Tp {
109
+ _Up __u_val;
110
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp &__first() noexcept {
111
+ return *static_cast<_Tp *>(this);
112
+ }
113
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp const &__first() const noexcept {
114
+ return *static_cast<_Tp const *>(this);
115
+ }
116
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up &__second() noexcept { return __u_val; }
117
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up const &__second() const noexcept {
118
+ return __u_val;
119
+ }
120
+
121
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
122
+ constexpr __compressed_pair() noexcept = default;
123
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
124
+ constexpr __compressed_pair(__compressed_pair const &) noexcept = default;
125
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
126
+ constexpr __compressed_pair(__compressed_pair &&) noexcept = default;
127
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
128
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
129
+ operator=(__compressed_pair const &) noexcept = default;
130
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
131
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
132
+ operator=(__compressed_pair &&) noexcept = default;
133
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
134
+ ~__compressed_pair() noexcept = default;
135
+ template <class _TLike, class _ULike>
136
+ __MDSPAN_INLINE_FUNCTION constexpr __compressed_pair(_TLike &&__t, _ULike &&__u)
137
+ : _Tp((_TLike &&) __t), __u_val((_ULike &&) __u) {}
138
+ };
139
+
140
+ // Second empty.
141
+ template <class _Tp, class _Up>
142
+ struct __compressed_pair<
143
+ _Tp, _Up,
144
+ _CUDA_VSTD::enable_if_t<!_LIBCUDACXX_TRAIT(_CUDA_VSTD::is_empty, _Tp) && _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_empty, _Up)>>
145
+ : private _Up {
146
+ _Tp __t_val;
147
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp &__first() noexcept { return __t_val; }
148
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp const &__first() const noexcept {
149
+ return __t_val;
150
+ }
151
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up &__second() noexcept {
152
+ return *static_cast<_Up *>(this);
153
+ }
154
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up const &__second() const noexcept {
155
+ return *static_cast<_Up const *>(this);
156
+ }
157
+
158
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
159
+ constexpr __compressed_pair() noexcept = default;
160
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
161
+ constexpr __compressed_pair(__compressed_pair const &) noexcept = default;
162
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
163
+ constexpr __compressed_pair(__compressed_pair &&) noexcept = default;
164
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
165
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
166
+ operator=(__compressed_pair const &) noexcept = default;
167
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
168
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
169
+ operator=(__compressed_pair &&) noexcept = default;
170
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
171
+ ~__compressed_pair() noexcept = default;
172
+
173
+ template <class _TLike, class _ULike>
174
+ __MDSPAN_INLINE_FUNCTION constexpr __compressed_pair(_TLike &&__t, _ULike &&__u)
175
+ : _Up((_ULike &&) __u), __t_val((_TLike &&) __t) {}
176
+ };
177
+
178
+ // Both empty.
179
+ template <class _Tp, class _Up>
180
+ struct __compressed_pair<
181
+ _Tp, _Up,
182
+ _CUDA_VSTD::enable_if_t<_LIBCUDACXX_TRAIT(_CUDA_VSTD::is_empty, _Tp) && _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_empty, _Up)>>
183
+ // We need to use the __no_unique_address_emulation wrapper here to avoid
184
+ // base class ambiguities.
185
+ #ifdef __MDSPAN_COMPILER_MSVC
186
+ // MSVC doesn't allow you to access public static member functions of a type
187
+ // when you *happen* to privately inherit from that type.
188
+ : protected __no_unique_address_emulation<_Tp, 0>,
189
+ protected __no_unique_address_emulation<_Up, 1>
190
+ #else
191
+ : private __no_unique_address_emulation<_Tp, 0>,
192
+ private __no_unique_address_emulation<_Up, 1>
193
+ #endif
194
+ {
195
+ using __first_base_t = __no_unique_address_emulation<_Tp, 0>;
196
+ using __second_base_t = __no_unique_address_emulation<_Up, 1>;
197
+
198
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp &__first() noexcept {
199
+ return this->__first_base_t::__ref();
200
+ }
201
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp const &__first() const noexcept {
202
+ return this->__first_base_t::__ref();
203
+ }
204
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up &__second() noexcept {
205
+ return this->__second_base_t::__ref();
206
+ }
207
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up const &__second() const noexcept {
208
+ return this->__second_base_t::__ref();
209
+ }
210
+
211
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
212
+ constexpr __compressed_pair() noexcept = default;
213
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
214
+ constexpr __compressed_pair(__compressed_pair const &) noexcept = default;
215
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
216
+ constexpr __compressed_pair(__compressed_pair &&) noexcept = default;
217
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
218
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
219
+ operator=(__compressed_pair const &) noexcept = default;
220
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
221
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
222
+ operator=(__compressed_pair &&) noexcept = default;
223
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
224
+ ~__compressed_pair() noexcept = default;
225
+ template <class _TLike, class _ULike>
226
+ __MDSPAN_INLINE_FUNCTION constexpr __compressed_pair(_TLike &&__t, _ULike &&__u) noexcept
227
+ : __first_base_t(_Tp((_TLike &&) __t)),
228
+ __second_base_t(_Up((_ULike &&) __u))
229
+ { }
230
+ };
231
+
232
+ #endif // !_LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
233
+
234
+ } // end namespace __detail
235
+
236
+ #endif // _LIBCUDACXX_STD_VER > 11
237
+
238
+ _LIBCUDACXX_END_NAMESPACE_STD
239
+
240
+ #endif // _LIBCUDACXX___MDSPAN_COMPRESSED_PAIR_HPP
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/config.h ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ //@HEADER
3
+ // ************************************************************************
4
+ //
5
+ // Kokkos v. 2.0
6
+ // Copyright (2019) Sandia Corporation
7
+ //
8
+ // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9
+ // the U.S. Government retains certain rights in this software.
10
+ //
11
+ // Redistribution and use in source and binary forms, with or without
12
+ // modification, are permitted provided that the following conditions are
13
+ // met:
14
+ //
15
+ // 1. Redistributions of source code must retain the above copyright
16
+ // notice, this list of conditions and the following disclaimer.
17
+ //
18
+ // 2. Redistributions in binary form must reproduce the above copyright
19
+ // notice, this list of conditions and the following disclaimer in the
20
+ // documentation and/or other materials provided with the distribution.
21
+ //
22
+ // 3. Neither the name of the Corporation nor the names of the
23
+ // contributors may be used to endorse or promote products derived from
24
+ // this software without specific prior written permission.
25
+ //
26
+ // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
+ //
38
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
39
+ //
40
+ // ************************************************************************
41
+ //@HEADER
42
+ */
43
+
44
+ #ifndef _LIBCUDACXX___MDSPAN_CONFIG_HPP
45
+ #define _LIBCUDACXX___MDSPAN_CONFIG_HPP
46
+
47
+ #ifndef __cuda_std__
48
+ #include <__config>
49
+ #endif // __cuda_std__
50
+
51
+ #if _LIBCUDACXX_STD_VER > 11
52
+
53
+ #ifndef __has_include
54
+ # define __has_include(x) 0
55
+ #endif
56
+
57
+ #ifndef __cuda_std__
58
+ #if __has_include(<version>)
59
+ # include <version>
60
+ #else
61
+ # include <type_traits>
62
+ # include <utility>
63
+ #endif
64
+ #endif
65
+
66
+ #ifdef _MSVC_LANG
67
+ #define __MDSPAN_CPLUSPLUS _MSVC_LANG
68
+ #else
69
+ #define __MDSPAN_CPLUSPLUS __cplusplus
70
+ #endif
71
+
72
+ #define __MDSPAN_CXX_STD_14 201402L
73
+ #define __MDSPAN_CXX_STD_17 201703L
74
+ #define __MDSPAN_CXX_STD_20 202002L
75
+
76
+ #define __MDSPAN_HAS_CXX_14 (__MDSPAN_CPLUSPLUS >= __MDSPAN_CXX_STD_14)
77
+ #define __MDSPAN_HAS_CXX_17 (__MDSPAN_CPLUSPLUS >= __MDSPAN_CXX_STD_17)
78
+ #define __MDSPAN_HAS_CXX_20 (__MDSPAN_CPLUSPLUS >= __MDSPAN_CXX_STD_20)
79
+
80
+ static_assert(__MDSPAN_CPLUSPLUS >= __MDSPAN_CXX_STD_14, "mdspan requires C++14 or later.");
81
+
82
+ #ifndef __MDSPAN_COMPILER_CLANG
83
+ # if defined(__clang__)
84
+ # define __MDSPAN_COMPILER_CLANG __clang__
85
+ # endif
86
+ #endif
87
+
88
+ #if !defined(__MDSPAN_COMPILER_MSVC) && !defined(__MDSPAN_COMPILER_MSVC_CLANG)
89
+ # if defined(_MSC_VER)
90
+ # if !defined(__MDSPAN_COMPILER_CLANG)
91
+ # define __MDSPAN_COMPILER_MSVC _MSC_VER
92
+ # else
93
+ # define __MDSPAN_COMPILER_MSVC_CLANG _MSC_VER
94
+ # endif
95
+ # endif
96
+ #endif
97
+
98
+ #ifndef __MDSPAN_COMPILER_INTEL
99
+ # ifdef __INTEL_COMPILER
100
+ # define __MDSPAN_COMPILER_INTEL __INTEL_COMPILER
101
+ # endif
102
+ #endif
103
+
104
+ #ifndef __MDSPAN_COMPILER_APPLECLANG
105
+ # ifdef __apple_build_version__
106
+ # define __MDSPAN_COMPILER_APPLECLANG __apple_build_version__
107
+ # endif
108
+ #endif
109
+
110
+ #ifndef __MDSPAN_HAS_CUDA
111
+ # if defined(__CUDACC__)
112
+ # define __MDSPAN_HAS_CUDA __CUDACC__
113
+ # endif
114
+ #endif
115
+
116
+ #ifndef __MDSPAN_HAS_HIP
117
+ # if defined(__HIPCC__)
118
+ # define __MDSPAN_HAS_HIP __HIPCC__
119
+ # endif
120
+ #endif
121
+
122
+ #ifndef __has_cpp_attribute
123
+ # define __has_cpp_attribute(x) 0
124
+ #endif
125
+
126
+ #ifndef __MDSPAN_PRESERVE_STANDARD_LAYOUT
127
+ // Preserve standard layout by default, but we're not removing the old version
128
+ // that turns this off until we're sure this doesn't have an unreasonable cost
129
+ // to the compiler or optimizer.
130
+ # define __MDSPAN_PRESERVE_STANDARD_LAYOUT 1
131
+ #endif
132
+
133
+ #ifndef __MDSPAN_USE_CONCEPTS
134
+ // Looks like concepts doesn't work in CUDA 12
135
+ # if defined(__cpp_concepts) && __cpp_concepts >= 201507L && !defined __cuda_std__
136
+ # define __MDSPAN_USE_CONCEPTS 1
137
+ # endif
138
+ #endif
139
+
140
+ #ifndef __MDSPAN_USE_FOLD_EXPRESSIONS
141
+ # if (defined(__cpp_fold_expressions) && __cpp_fold_expressions >= 201603L) \
142
+ || (!defined(__cpp_fold_expressions) && __MDSPAN_HAS_CXX_17)
143
+ # define __MDSPAN_USE_FOLD_EXPRESSIONS 1
144
+ # endif
145
+ #endif
146
+
147
+ #ifndef __MDSPAN_NEEDS_TRAIT_VARIABLE_TEMPLATE_BACKPORTS
148
+ # if (!(defined(__cpp_lib_type_trait_variable_templates) && __cpp_lib_type_trait_variable_templates >= 201510L) \
149
+ || !__MDSPAN_HAS_CXX_17)
150
+ # if !(defined(__MDSPAN_COMPILER_APPLECLANG) && __MDSPAN_HAS_CXX_17)
151
+ # define __MDSPAN_NEEDS_TRAIT_VARIABLE_TEMPLATE_BACKPORTS 1
152
+ # endif
153
+ # endif
154
+ #endif
155
+
156
+ #ifndef __MDSPAN_USE_VARIABLE_TEMPLATES
157
+ # if (defined(__cpp_variable_templates) && __cpp_variable_templates >= 201304 && __MDSPAN_HAS_CXX_17) \
158
+ || (!defined(__cpp_variable_templates) && __MDSPAN_HAS_CXX_17)
159
+ # define __MDSPAN_USE_VARIABLE_TEMPLATES 1
160
+ # endif
161
+ #endif // __MDSPAN_USE_VARIABLE_TEMPLATES
162
+
163
+ #ifndef __MDSPAN_USE_CONSTEXPR_14
164
+ # if (defined(__cpp_constexpr) && __cpp_constexpr >= 201304) \
165
+ || (!defined(__cpp_constexpr) && __MDSPAN_HAS_CXX_14) \
166
+ && (!(defined(__INTEL_COMPILER) && __INTEL_COMPILER <= 1700))
167
+ # define __MDSPAN_USE_CONSTEXPR_14 1
168
+ # endif
169
+ #endif
170
+
171
+ #ifndef __MDSPAN_USE_INTEGER_SEQUENCE
172
+ # if defined(__MDSPAN_COMPILER_MSVC)
173
+ # if (defined(__cpp_lib_integer_sequence) && __cpp_lib_integer_sequence >= 201304)
174
+ # define __MDSPAN_USE_INTEGER_SEQUENCE 1
175
+ # endif
176
+ # endif
177
+ #endif
178
+ #ifndef __MDSPAN_USE_INTEGER_SEQUENCE
179
+ # if (defined(__cpp_lib_integer_sequence) && __cpp_lib_integer_sequence >= 201304) \
180
+ || (!defined(__cpp_lib_integer_sequence) && __MDSPAN_HAS_CXX_14) \
181
+ /* as far as I can tell, libc++ seems to think this is a C++11 feature... */ \
182
+ || (defined(__GLIBCXX__) && __GLIBCXX__ > 20150422 && __GNUC__ < 5 && !defined(__INTEL_CXX11_MODE__))
183
+ // several compilers lie about integer_sequence working properly unless the C++14 standard is used
184
+ # define __MDSPAN_USE_INTEGER_SEQUENCE 1
185
+ # elif defined(__MDSPAN_COMPILER_APPLECLANG) && __MDSPAN_HAS_CXX_14
186
+ // appleclang seems to be missing the __cpp_lib_... macros, but doesn't seem to lie about C++14 making
187
+ // integer_sequence work
188
+ # define __MDSPAN_USE_INTEGER_SEQUENCE 1
189
+ # endif
190
+ #endif
191
+
192
+ #ifndef __MDSPAN_USE_RETURN_TYPE_DEDUCTION
193
+ # if (defined(__cpp_return_type_deduction) && __cpp_return_type_deduction >= 201304) \
194
+ || (!defined(__cpp_return_type_deduction) && __MDSPAN_HAS_CXX_14)
195
+ # define __MDSPAN_USE_RETURN_TYPE_DEDUCTION 1
196
+ # endif
197
+ #endif
198
+
199
+ #ifndef __MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
200
+ // GCC 10 is known not to work with CTAD for this case.
201
+ # if (defined(__MDSPAN_COMPILER_CLANG) || !defined(_LIBCUDACXX_COMPILER_GCC) || __GNUC__ >= 11) \
202
+ && ((defined(__cpp_deduction_guides) && __cpp_deduction_guides >= 201703) \
203
+ || (!defined(__cpp_deduction_guides) && __MDSPAN_HAS_CXX_17))
204
+ # define __MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1
205
+ # endif
206
+ #endif
207
+
208
+ #ifndef __MDSPAN_USE_ALIAS_TEMPLATE_ARGUMENT_DEDUCTION
209
+ // GCC 10 is known not to work with CTAD for this case.
210
+ # if (defined(__MDSPAN_COMPILER_CLANG) || !defined(_LIBCUDACXX_COMPILER_GCC) || __GNUC__ >= 11) \
211
+ && ((defined(__cpp_deduction_guides) && __cpp_deduction_guides >= 201907) \
212
+ || (!defined(__cpp_deduction_guides) && __MDSPAN_HAS_CXX_20))
213
+ # define __MDSPAN_USE_ALIAS_TEMPLATE_ARGUMENT_DEDUCTION 1
214
+ # endif
215
+ #endif
216
+
217
+ #ifndef __MDSPAN_USE_STANDARD_TRAIT_ALIASES
218
+ # if (defined(__cpp_lib_transformation_trait_aliases) && __cpp_lib_transformation_trait_aliases >= 201304) \
219
+ || (!defined(__cpp_lib_transformation_trait_aliases) && __MDSPAN_HAS_CXX_14)
220
+ # define __MDSPAN_USE_STANDARD_TRAIT_ALIASES 1
221
+ # elif defined(__MDSPAN_COMPILER_APPLECLANG) && __MDSPAN_HAS_CXX_14
222
+ // appleclang seems to be missing the __cpp_lib_... macros, but doesn't seem to lie about C++14
223
+ # define __MDSPAN_USE_STANDARD_TRAIT_ALIASES 1
224
+ # endif
225
+ #endif
226
+
227
+ #ifndef __MDSPAN_DEFAULTED_CONSTRUCTORS_INHERITANCE_WORKAROUND
228
+ # ifdef __GNUC__
229
+ # if __GNUC__ < 9
230
+ # define __MDSPAN_DEFAULTED_CONSTRUCTORS_INHERITANCE_WORKAROUND 1
231
+ # endif
232
+ # endif
233
+ #endif
234
+
235
+ #ifndef __MDSPAN_CONDITIONAL_EXPLICIT
236
+ # if __MDSPAN_HAS_CXX_20 && !defined(__MDSPAN_COMPILER_MSVC)
237
+ # define __MDSPAN_CONDITIONAL_EXPLICIT(COND) explicit(COND)
238
+ # else
239
+ # define __MDSPAN_CONDITIONAL_EXPLICIT(COND)
240
+ # endif
241
+ #endif
242
+
243
+ #ifndef __MDSPAN_USE_BRACKET_OPERATOR
244
+ # if defined(__cpp_multidimensional_subscript)
245
+ # define __MDSPAN_USE_BRACKET_OPERATOR 1
246
+ # else
247
+ # define __MDSPAN_USE_BRACKET_OPERATOR 0
248
+ # endif
249
+ #endif
250
+
251
+ #ifndef __MDSPAN_USE_PAREN_OPERATOR
252
+ # if !__MDSPAN_USE_BRACKET_OPERATOR
253
+ # define __MDSPAN_USE_PAREN_OPERATOR 1
254
+ # else
255
+ # define __MDSPAN_USE_PAREN_OPERATOR 0
256
+ # endif
257
+ #endif
258
+
259
+ #if __MDSPAN_USE_BRACKET_OPERATOR
260
+ # define __MDSPAN_OP(mds,...) mds[__VA_ARGS__]
261
+ // Corentins demo compiler for subscript chokes on empty [] call,
262
+ // though I believe the proposal supports it?
263
+ #ifdef __MDSPAN_NO_EMPTY_BRACKET_OPERATOR
264
+ # define __MDSPAN_OP0(mds) mds.accessor().access(mds.data_handle(),0)
265
+ #else
266
+ # define __MDSPAN_OP0(mds) mds[]
267
+ #endif
268
+ # define __MDSPAN_OP1(mds, a) mds[a]
269
+ # define __MDSPAN_OP2(mds, a, b) mds[a,b]
270
+ # define __MDSPAN_OP3(mds, a, b, c) mds[a,b,c]
271
+ # define __MDSPAN_OP4(mds, a, b, c, d) mds[a,b,c,d]
272
+ # define __MDSPAN_OP5(mds, a, b, c, d, e) mds[a,b,c,d,e]
273
+ # define __MDSPAN_OP6(mds, a, b, c, d, e, f) mds[a,b,c,d,e,f]
274
+ #else
275
+ # define __MDSPAN_OP(mds,...) mds(__VA_ARGS__)
276
+ # define __MDSPAN_OP0(mds) mds()
277
+ # define __MDSPAN_OP1(mds, a) mds(a)
278
+ # define __MDSPAN_OP2(mds, a, b) mds(a,b)
279
+ # define __MDSPAN_OP3(mds, a, b, c) mds(a,b,c)
280
+ # define __MDSPAN_OP4(mds, a, b, c, d) mds(a,b,c,d)
281
+ # define __MDSPAN_OP5(mds, a, b, c, d, e) mds(a,b,c,d,e)
282
+ # define __MDSPAN_OP6(mds, a, b, c, d, e, f) mds(a,b,c,d,e,f)
283
+ #endif
284
+
285
+ #endif // _LIBCUDACXX_STD_VER > 11
286
+
287
+ #endif // _LIBCUDACXX___MDSPAN_CONFIG_HPP
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/default_accessor.h ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ //@HEADER
3
+ // ************************************************************************
4
+ //
5
+ // Kokkos v. 2.0
6
+ // Copyright (2019) Sandia Corporation
7
+ //
8
+ // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9
+ // the U.S. Government retains certain rights in this software.
10
+ //
11
+ // Redistribution and use in source and binary forms, with or without
12
+ // modification, are permitted provided that the following conditions are
13
+ // met:
14
+ //
15
+ // 1. Redistributions of source code must retain the above copyright
16
+ // notice, this list of conditions and the following disclaimer.
17
+ //
18
+ // 2. Redistributions in binary form must reproduce the above copyright
19
+ // notice, this list of conditions and the following disclaimer in the
20
+ // documentation and/or other materials provided with the distribution.
21
+ //
22
+ // 3. Neither the name of the Corporation nor the names of the
23
+ // contributors may be used to endorse or promote products derived from
24
+ // this software without specific prior written permission.
25
+ //
26
+ // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
+ //
38
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
39
+ //
40
+ // ************************************************************************
41
+ //@HEADER
42
+ */
43
+
44
+ #ifndef _LIBCUDACXX___MDSPAN_DEFAULT_ACCESSOR_HPP
45
+ #define _LIBCUDACXX___MDSPAN_DEFAULT_ACCESSOR_HPP
46
+
47
+ #ifndef __cuda_std__
48
+ #include <__config>
49
+ #endif // __cuda_std__
50
+
51
+ #include "../__mdspan/macros.h"
52
+ #include "../__type_traits/is_convertible.h"
53
+ #include "../cstddef"
54
+
55
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
56
+ #pragma GCC system_header
57
+ #endif
58
+
59
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
60
+
61
+ #if _LIBCUDACXX_STD_VER > 11
62
+
63
+ template <class _ElementType>
64
+ struct default_accessor {
65
+
66
+ using offset_policy = default_accessor;
67
+ using element_type = _ElementType;
68
+ using reference = _ElementType&;
69
+ using data_handle_type = _ElementType*;
70
+
71
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr default_accessor() noexcept = default;
72
+
73
+ __MDSPAN_TEMPLATE_REQUIRES(
74
+ class _OtherElementType,
75
+ /* requires */ (
76
+ _LIBCUDACXX_TRAIT(is_convertible, _OtherElementType(*)[], element_type(*)[])
77
+ )
78
+ )
79
+ __MDSPAN_INLINE_FUNCTION
80
+ constexpr default_accessor(default_accessor<_OtherElementType>) noexcept {}
81
+
82
+ __MDSPAN_INLINE_FUNCTION
83
+ constexpr data_handle_type
84
+ offset(data_handle_type __p, size_t __i) const noexcept {
85
+ return __p + __i;
86
+ }
87
+
88
+ __MDSPAN_FORCE_INLINE_FUNCTION
89
+ constexpr reference access(data_handle_type __p, size_t __i) const noexcept {
90
+ return __p[__i];
91
+ }
92
+
93
+ };
94
+
95
+ #endif // _LIBCUDACXX_STD_VER > 11
96
+
97
+ _LIBCUDACXX_END_NAMESPACE_STD
98
+
99
+ #endif // _LIBCUDACXX___MDSPAN_DEFAULT_ACCESSOR_HPP
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/dynamic_extent.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ //@HEADER
3
+ // ************************************************************************
4
+ //
5
+ // Kokkos v. 2.0
6
+ // Copyright (2019) Sandia Corporation
7
+ //
8
+ // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9
+ // the U.S. Government retains certain rights in this software.
10
+ //
11
+ // Redistribution and use in source and binary forms, with or without
12
+ // modification, are permitted provided that the following conditions are
13
+ // met:
14
+ //
15
+ // 1. Redistributions of source code must retain the above copyright
16
+ // notice, this list of conditions and the following disclaimer.
17
+ //
18
+ // 2. Redistributions in binary form must reproduce the above copyright
19
+ // notice, this list of conditions and the following disclaimer in the
20
+ // documentation and/or other materials provided with the distribution.
21
+ //
22
+ // 3. Neither the name of the Corporation nor the names of the
23
+ // contributors may be used to endorse or promote products derived from
24
+ // this software without specific prior written permission.
25
+ //
26
+ // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
+ //
38
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
39
+ //
40
+ // ************************************************************************
41
+ //@HEADER
42
+ */
43
+
44
+ #ifndef _LIBCUDACXX___MDSPAN_DYNAMIC_EXTENT_HPP
45
+ #define _LIBCUDACXX___MDSPAN_DYNAMIC_EXTENT_HPP
46
+
47
+ #ifndef __cuda_std__
48
+ #include <__config>
49
+ #endif // __cuda_std__
50
+
51
+ #include "../__fwd/span.h" // dynamic_extent
52
+ #include "../__mdspan/macros.h"
53
+ #include "../cstddef"
54
+ #include "../limits" // numeric_limits
55
+
56
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
57
+ #endif
58
+
59
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
60
+
61
+ #if _LIBCUDACXX_STD_VER > 11
62
+
63
+ namespace __detail {
64
+
65
+ template <class>
66
+ _LIBCUDACXX_HOST_DEVICE constexpr auto __make_dynamic_extent() {
67
+ return dynamic_extent;
68
+ }
69
+
70
+ template <size_t>
71
+ _LIBCUDACXX_HOST_DEVICE constexpr auto __make_dynamic_extent_integral() {
72
+ return dynamic_extent;
73
+ }
74
+
75
+ } // end namespace __detail
76
+
77
+ #endif // _LIBCUDACXX_STD_VER > 11
78
+
79
+ _LIBCUDACXX_END_NAMESPACE_STD
80
+
81
+ //==============================================================================================================
82
+
83
+ #endif // _LIBCUDACXX___MDSPAN_DYNAMIC_EXTENT_HPP
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/extents.h ADDED
@@ -0,0 +1,579 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ //@HEADER
3
+ // ************************************************************************
4
+ //
5
+ // Kokkos v. 2.0
6
+ // Copyright (2019) Sandia Corporation
7
+ //
8
+ // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9
+ // the U.S. Government retains certain rights in this software.
10
+ //
11
+ // Redistribution and use in source and binary forms, with or without
12
+ // modification, are permitted provided that the following conditions are
13
+ // met:
14
+ //
15
+ // 1. Redistributions of source code must retain the above copyright
16
+ // notice, this list of conditions and the following disclaimer.
17
+ //
18
+ // 2. Redistributions in binary form must reproduce the above copyright
19
+ // notice, this list of conditions and the following disclaimer in the
20
+ // documentation and/or other materials provided with the distribution.
21
+ //
22
+ // 3. Neither the name of the Corporation nor the names of the
23
+ // contributors may be used to endorse or promote products derived from
24
+ // this software without specific prior written permission.
25
+ //
26
+ // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
+ //
38
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
39
+ //
40
+ // ************************************************************************
41
+ //@HEADER
42
+ */
43
+
44
+ #ifndef _LIBCUDACXX___MDSPAN_EXTENTS_HPP
45
+ #define _LIBCUDACXX___MDSPAN_EXTENTS_HPP
46
+
47
+ #ifndef __cuda_std__
48
+ #include <__config>
49
+ #endif // __cuda_std__
50
+
51
+ #include "../__mdspan/macros.h"
52
+ #ifdef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
53
+ #include "../__mdspan/no_unique_address.h"
54
+ #endif
55
+ #include "../__mdspan/static_array.h"
56
+ #include "../__mdspan/standard_layout_static_array.h"
57
+ #include "../__type_traits/conditional.h"
58
+ #include "../__type_traits/integral_constant.h"
59
+ #include "../__type_traits/is_convertible.h"
60
+ #include "../__type_traits/is_nothrow_constructible.h"
61
+ #include "../__type_traits/make_unsigned.h"
62
+ #include "../__utility/integer_sequence.h"
63
+ #include "../array"
64
+ #include "../cstddef"
65
+ #include "../span"
66
+
67
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
68
+ #pragma GCC system_header
69
+ #endif
70
+
71
+ #if defined(_LIBCUDACXX_PUSH_MACROS)
72
+ _LIBCUDACXX_PUSH_MACROS
73
+ #endif
74
+ #include "../__undef_macros"
75
+
76
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
77
+
78
+ #if _LIBCUDACXX_STD_VER > 11
79
+
80
+ namespace __detail {
81
+
82
+ template<size_t ... _Extents>
83
+ struct __count_dynamic_extents;
84
+
85
+ template<size_t _Ep, size_t ... _Extents>
86
+ struct __count_dynamic_extents<_Ep,_Extents...> {
87
+ static constexpr size_t val = (_Ep==dynamic_extent?1:0) + __count_dynamic_extents<_Extents...>::val;
88
+ };
89
+
90
+ template<>
91
+ struct __count_dynamic_extents<> {
92
+ static constexpr size_t val = 0;
93
+ };
94
+
95
+ template <size_t... _Extents, size_t... _OtherExtents>
96
+ _LIBCUDACXX_HOST_DEVICE
97
+ static constexpr false_type __check_compatible_extents(
98
+ false_type, _CUDA_VSTD::integer_sequence<size_t, _Extents...>, _CUDA_VSTD::integer_sequence<size_t, _OtherExtents...>
99
+ ) noexcept { return { }; }
100
+
101
+ // This helper prevents ICE's on MSVC.
102
+ template <size_t _Lhs, size_t _Rhs>
103
+ struct __compare_extent_compatible : integral_constant<bool,
104
+ _Lhs == dynamic_extent ||
105
+ _Rhs == dynamic_extent ||
106
+ _Lhs == _Rhs>
107
+ {};
108
+
109
+ template <size_t... _Extents, size_t... _OtherExtents>
110
+ static integral_constant<
111
+ bool,
112
+ __MDSPAN_FOLD_AND(
113
+ (
114
+ __compare_extent_compatible<_Extents, _OtherExtents>::value
115
+ ) /* && ... */
116
+ )
117
+ >
118
+ _LIBCUDACXX_HOST_DEVICE
119
+ __check_compatible_extents(
120
+ true_type, _CUDA_VSTD::integer_sequence<size_t, _Extents...>, _CUDA_VSTD::integer_sequence<size_t, _OtherExtents...>
121
+ ) noexcept { return { }; }
122
+
123
+ struct __extents_tag { };
124
+
125
+ } // end namespace __detail
126
+
127
+ template <class _ThisIndexType, size_t... _Extents>
128
+ class extents
129
+ #ifdef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
130
+ : private __detail::__no_unique_address_emulation<
131
+ __detail::__partially_static_sizes_tagged<__detail::__extents_tag, _ThisIndexType , size_t, _Extents...>>
132
+ #endif
133
+ {
134
+ public:
135
+
136
+ using rank_type = size_t;
137
+ using index_type = _ThisIndexType;
138
+ using size_type = make_unsigned_t<index_type>;
139
+
140
+ // internal typedefs which for technical reasons are public
141
+ using __storage_t = __detail::__partially_static_sizes_tagged<__detail::__extents_tag, index_type, size_t, _Extents...>;
142
+
143
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
144
+ _LIBCUDACXX_NO_UNIQUE_ADDRESS __storage_t __storage_;
145
+ #else
146
+ using __base_t = __detail::__no_unique_address_emulation<__storage_t>;
147
+ #endif
148
+
149
+ // private members dealing with the way we internally store dynamic extents
150
+ private:
151
+
152
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr
153
+ __storage_t& __storage() noexcept {
154
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
155
+ return __storage_;
156
+ #else
157
+ return this->__base_t::__ref();
158
+ #endif
159
+ }
160
+ __MDSPAN_FORCE_INLINE_FUNCTION
161
+ constexpr __storage_t const& __storage() const noexcept {
162
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
163
+ return __storage_;
164
+ #else
165
+ return this->__base_t::__ref();
166
+ #endif
167
+ }
168
+
169
+ template <size_t... _Idxs>
170
+ __MDSPAN_FORCE_INLINE_FUNCTION
171
+ static constexpr
172
+ index_type _static_extent_impl(size_t __n, _CUDA_VSTD::integer_sequence<size_t, _Idxs...>) noexcept {
173
+ return __MDSPAN_FOLD_PLUS_RIGHT(((_Idxs == __n) ? _Extents : 0), /* + ... + */ 0);
174
+ }
175
+
176
+ template <class, size_t...>
177
+ friend class extents;
178
+
179
+ template <class _OtherIndexType, size_t... _OtherExtents, size_t... _Idxs>
180
+ __MDSPAN_INLINE_FUNCTION
181
+ constexpr bool _eq_impl(_CUDA_VSTD::extents<_OtherIndexType, _OtherExtents...>, false_type, _CUDA_VSTD::index_sequence<_Idxs...>) const noexcept { return false; }
182
+ template <class _OtherIndexType, size_t... _OtherExtents, size_t... _Idxs>
183
+ __MDSPAN_INLINE_FUNCTION
184
+ constexpr bool _eq_impl(
185
+ _CUDA_VSTD::extents<_OtherIndexType, _OtherExtents...> __other,
186
+ true_type, _CUDA_VSTD::index_sequence<_Idxs...>
187
+ ) const noexcept {
188
+ return __MDSPAN_FOLD_AND(
189
+ (__storage().template __get_n<_Idxs>() == __other.__storage().template __get_n<_Idxs>()) /* && ... */
190
+ );
191
+ }
192
+
193
+ template <class _OtherIndexType, size_t... _OtherExtents, size_t... _Idxs>
194
+ __MDSPAN_INLINE_FUNCTION
195
+ constexpr bool _not_eq_impl(_CUDA_VSTD::extents<_OtherIndexType, _OtherExtents...>, false_type, _CUDA_VSTD::index_sequence<_Idxs...>) const noexcept { return true; }
196
+ template <class _OtherIndexType, size_t... _OtherExtents, size_t... _Idxs>
197
+ __MDSPAN_INLINE_FUNCTION
198
+ constexpr bool _not_eq_impl(
199
+ _CUDA_VSTD::extents<_OtherIndexType, _OtherExtents...> __other,
200
+ true_type, _CUDA_VSTD::index_sequence<_Idxs...>
201
+ ) const noexcept {
202
+ return __MDSPAN_FOLD_OR(
203
+ (__storage().template __get_n<_Idxs>() != __other.__storage().template __get_n<_Idxs>()) /* || ... */
204
+ );
205
+ }
206
+
207
+ #ifdef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
208
+ __MDSPAN_INLINE_FUNCTION constexpr explicit
209
+ extents(__base_t&& __b) noexcept
210
+ : __base_t(_CUDA_VSTD::move(__b))
211
+ { }
212
+ #endif
213
+
214
+
215
+ // public interface:
216
+ public:
217
+ /* Defined above for use in the private code
218
+ using rank_type = size_t;
219
+ using index_type = _ThisIndexType;
220
+ */
221
+
222
+ __MDSPAN_INLINE_FUNCTION
223
+ static constexpr rank_type rank() noexcept { return sizeof...(_Extents); }
224
+ __MDSPAN_INLINE_FUNCTION
225
+ static constexpr rank_type rank_dynamic() noexcept { return __MDSPAN_FOLD_PLUS_RIGHT((rank_type(_Extents == dynamic_extent)), /* + ... + */ 0); }
226
+
227
+ //--------------------------------------------------------------------------------
228
+ // Constructors, Destructors, and Assignment
229
+
230
+ // Default constructor
231
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr extents() noexcept = default;
232
+
233
+ // Converting constructor
234
+ __MDSPAN_TEMPLATE_REQUIRES(
235
+ class _OtherIndexType, size_t... _OtherExtents,
236
+ /* requires */ (
237
+ /* multi-stage check to protect from invalid pack expansion when sizes don't match? */
238
+ decltype(__detail::__check_compatible_extents(
239
+ integral_constant<bool, sizeof...(_Extents) == sizeof...(_OtherExtents)>{},
240
+ _CUDA_VSTD::integer_sequence<size_t, _Extents...>{},
241
+ _CUDA_VSTD::integer_sequence<size_t, _OtherExtents...>{}
242
+ ))::value
243
+ )
244
+ )
245
+ __MDSPAN_INLINE_FUNCTION
246
+ __MDSPAN_CONDITIONAL_EXPLICIT(
247
+ (((_Extents != dynamic_extent) && (_OtherExtents == dynamic_extent)) || ...) ||
248
+ (_CUDA_VSTD::numeric_limits<index_type>::max() < _CUDA_VSTD::numeric_limits<_OtherIndexType>::max()))
249
+ constexpr extents(const extents<_OtherIndexType, _OtherExtents...>& __other)
250
+ noexcept
251
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
252
+ : __storage_{
253
+ #else
254
+ : __base_t(__base_t{__storage_t{
255
+ #endif
256
+ __other.__storage().__enable_psa_conversion()
257
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
258
+ }
259
+ #else
260
+ }})
261
+ #endif
262
+ {
263
+ /* TODO: precondition check
264
+ * __other.extent(r) equals Er for each r for which Er is a static extent, and
265
+ * either
266
+ * - sizeof...(_OtherExtents) is zero, or
267
+ * - __other.extent(r) is a representable value of type index_type for all rank index r of __other
268
+ */
269
+ }
270
+
271
+ #ifdef __NVCC__
272
+ __MDSPAN_TEMPLATE_REQUIRES(
273
+ class... _Integral,
274
+ /* requires */ (
275
+ // TODO: check whether the other version works with newest NVCC, doesn't with 11.4
276
+ // NVCC seems to pick up rank_dynamic from the wrong extents type???
277
+ __MDSPAN_FOLD_AND(_LIBCUDACXX_TRAIT(_CUDA_VSTD::is_convertible, _Integral, index_type) /* && ... */) &&
278
+ __MDSPAN_FOLD_AND(_LIBCUDACXX_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _Integral) /* && ... */) &&
279
+ // NVCC chokes on the fold thingy here so wrote the workaround
280
+ ((sizeof...(_Integral) == __detail::__count_dynamic_extents<_Extents...>::val) ||
281
+ (sizeof...(_Integral) == sizeof...(_Extents)))
282
+ )
283
+ )
284
+ #else
285
+ __MDSPAN_TEMPLATE_REQUIRES(
286
+ class... _Integral,
287
+ /* requires */ (
288
+ __MDSPAN_FOLD_AND(_LIBCUDACXX_TRAIT(_CUDA_VSTD::is_convertible, _Integral, index_type) /* && ... */) &&
289
+ __MDSPAN_FOLD_AND(_LIBCUDACXX_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _Integral) /* && ... */) &&
290
+ ((sizeof...(_Integral) == rank_dynamic()) || (sizeof...(_Integral) == rank()))
291
+ )
292
+ )
293
+ #endif
294
+ __MDSPAN_INLINE_FUNCTION
295
+ explicit constexpr extents(_Integral... __exts) noexcept
296
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
297
+ : __storage_{
298
+ #else
299
+ : __base_t(__base_t{typename __base_t::__stored_type{
300
+ #endif
301
+ _CUDA_VSTD::conditional_t<sizeof...(_Integral)==rank_dynamic(),
302
+ __detail::__construct_psa_from_dynamic_exts_values_tag_t,
303
+ __detail::__construct_psa_from_all_exts_values_tag_t>(),
304
+ static_cast<index_type>(__exts)...
305
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
306
+ }
307
+ #else
308
+ }})
309
+ #endif
310
+ {
311
+ /* TODO: precondition check
312
+ * If sizeof...(_IndexTypes) != rank_dynamic() is true, exts_arr[r] equals Er for each r for which Er is a static extent, and
313
+ * either
314
+ * - sizeof...(__exts) == 0 is true, or
315
+ * - each element of __exts is nonnegative and is a representable value of type index_type.
316
+ */
317
+ }
318
+
319
+ // TODO: check whether this works with newest NVCC, doesn't with 11.4
320
+ #ifdef __NVCC__
321
+ // NVCC seems to pick up rank_dynamic from the wrong extents type???
322
+ // NVCC chokes on the fold thingy here so wrote the workaround
323
+ __MDSPAN_TEMPLATE_REQUIRES(
324
+ class _IndexType, size_t _Np,
325
+ /* requires */ (
326
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_convertible, _IndexType, index_type) &&
327
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _IndexType) &&
328
+ ((_Np == __detail::__count_dynamic_extents<_Extents...>::val) ||
329
+ (_Np == sizeof...(_Extents)))
330
+ )
331
+ )
332
+ #else
333
+ __MDSPAN_TEMPLATE_REQUIRES(
334
+ class _IndexType, size_t _Np,
335
+ /* requires */ (
336
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_convertible, _IndexType, index_type) &&
337
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _IndexType) &&
338
+ (_Np == rank() || _Np == rank_dynamic())
339
+ )
340
+ )
341
+ #endif
342
+ __MDSPAN_CONDITIONAL_EXPLICIT(_Np != rank_dynamic())
343
+ __MDSPAN_INLINE_FUNCTION
344
+ constexpr
345
+ extents(_CUDA_VSTD::array<_IndexType, _Np> const& __exts) noexcept
346
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
347
+ : __storage_{
348
+ #else
349
+ : __base_t(__base_t{typename __base_t::__stored_type{
350
+ #endif
351
+ _CUDA_VSTD::conditional_t<_Np==rank_dynamic(),
352
+ __detail::__construct_psa_from_dynamic_exts_array_tag_t<0>,
353
+ __detail::__construct_psa_from_all_exts_array_tag_t>(),
354
+ _CUDA_VSTD::array<_IndexType,_Np>{__exts}
355
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
356
+ }
357
+ #else
358
+ }})
359
+ #endif
360
+ {
361
+ /* TODO: precondition check
362
+ * If _Np != rank_dynamic() is true, __exts[r] equals Er for each r for which Er is a static extent, and
363
+ * either
364
+ * - _Np is zero, or
365
+ * - __exts[r] is nonnegative and is a representable value of type index_type for all rank index r
366
+ */
367
+ }
368
+
369
+ // TODO: check whether the below works with newest NVCC, doesn't with 11.4
370
+ #ifdef __NVCC__
371
+ // NVCC seems to pick up rank_dynamic from the wrong extents type???
372
+ // NVCC chokes on the fold thingy here so wrote the workaround
373
+ __MDSPAN_TEMPLATE_REQUIRES(
374
+ class _IndexType, size_t _Np,
375
+ /* requires */ (
376
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_convertible, _IndexType, index_type) &&
377
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _IndexType) &&
378
+ ((_Np == __detail::__count_dynamic_extents<_Extents...>::val) ||
379
+ (_Np == sizeof...(_Extents)))
380
+ )
381
+ )
382
+ #else
383
+ __MDSPAN_TEMPLATE_REQUIRES(
384
+ class _IndexType, size_t _Np,
385
+ /* requires */ (
386
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_convertible, _IndexType, index_type) &&
387
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _IndexType) &&
388
+ (_Np == rank() || _Np == rank_dynamic())
389
+ )
390
+ )
391
+ #endif
392
+ __MDSPAN_CONDITIONAL_EXPLICIT(_Np != rank_dynamic())
393
+ __MDSPAN_INLINE_FUNCTION
394
+ constexpr
395
+ extents(_CUDA_VSTD::span<_IndexType, _Np> __exts) noexcept
396
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
397
+ : __storage_{
398
+ #else
399
+ : __base_t(__base_t{typename __base_t::__stored_type{
400
+ #endif
401
+ _CUDA_VSTD::conditional_t<_Np==rank_dynamic(),
402
+ __detail::__construct_psa_from_dynamic_exts_array_tag_t<0>,
403
+ __detail::__construct_psa_from_all_exts_array_tag_t>(),
404
+ __exts
405
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
406
+ }
407
+ #else
408
+ }})
409
+ #endif
410
+ {
411
+ /* TODO: precondition check
412
+ * If _Np != rank_dynamic() is true, __exts[r] equals Er for each r for which Er is a static extent, and
413
+ * either
414
+ * - _Np is zero, or
415
+ * - __exts[r] is nonnegative and is a representable value of type index_type for all rank index r
416
+ */
417
+ }
418
+
419
+ // Need this constructor for some submdspan implementation stuff
420
+ // for the layout_stride case where I use an extents object for strides
421
+ __MDSPAN_INLINE_FUNCTION
422
+ constexpr explicit
423
+ extents(__storage_t const& __sto ) noexcept
424
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
425
+ : __storage_{
426
+ #else
427
+ : __base_t(__base_t{
428
+ #endif
429
+ __sto
430
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
431
+ }
432
+ #else
433
+ })
434
+ #endif
435
+ { }
436
+
437
+ //--------------------------------------------------------------------------------
438
+
439
+ __MDSPAN_INLINE_FUNCTION
440
+ static constexpr
441
+ size_t static_extent(size_t __n) noexcept {
442
+ // Can't do assert here since that breaks true constexpr ness
443
+ // assert(__n<rank());
444
+ return _static_extent_impl(__n, _CUDA_VSTD::make_integer_sequence<size_t, sizeof...(_Extents)>{});
445
+ }
446
+
447
+ __MDSPAN_INLINE_FUNCTION
448
+ constexpr
449
+ index_type extent(size_t __n) const noexcept {
450
+ // Can't do assert here since that breaks true constexpr ness
451
+ // assert(__n<rank());
452
+ return __storage().__get(__n);
453
+ }
454
+
455
+ //--------------------------------------------------------------------------------
456
+
457
+ template<class _OtherIndexType, size_t... _RHS>
458
+ __MDSPAN_INLINE_FUNCTION
459
+ friend constexpr bool operator==(extents const& lhs, extents<_OtherIndexType, _RHS...> const& __rhs) noexcept {
460
+ return lhs._eq_impl(
461
+ __rhs, integral_constant<bool, (sizeof...(_RHS) == rank())>{},
462
+ _CUDA_VSTD::make_index_sequence<sizeof...(_RHS)>{}
463
+ );
464
+ }
465
+
466
+ #if !(__MDSPAN_HAS_CXX_20)
467
+ template<class _OtherIndexType, size_t... _RHS>
468
+ __MDSPAN_INLINE_FUNCTION
469
+ friend constexpr bool operator!=(extents const& lhs, extents<_OtherIndexType, _RHS...> const& __rhs) noexcept {
470
+ return lhs._not_eq_impl(
471
+ __rhs, integral_constant<bool, (sizeof...(_RHS) == rank())>{},
472
+ _CUDA_VSTD::make_index_sequence<sizeof...(_RHS)>{}
473
+ );
474
+ }
475
+ #endif
476
+
477
+ // End of public interface
478
+
479
+ public: // (but not really)
480
+
481
+ __MDSPAN_INLINE_FUNCTION static constexpr
482
+ extents __make_extents_impl(__detail::__partially_static_sizes<index_type, size_t,_Extents...>&& __bs) noexcept {
483
+ // This effectively amounts to a sideways cast that can be done in a constexpr
484
+ // context, but we have to do it to handle the case where the extents and the
485
+ // strides could accidentally end up with the same types in their hierarchies
486
+ // somehow (which would cause layout_stride::mapping to not be standard_layout)
487
+ return extents(
488
+ #ifdef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
489
+ __base_t{
490
+ #endif
491
+ _CUDA_VSTD::move(__bs.template __with_tag<__detail::__extents_tag>())
492
+ #ifdef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
493
+ }
494
+ #endif
495
+ );
496
+ }
497
+
498
+ template <size_t _Np>
499
+ __MDSPAN_FORCE_INLINE_FUNCTION
500
+ constexpr
501
+ index_type __extent() const noexcept {
502
+ return __storage().template __get_n<_Np>();
503
+ }
504
+
505
+ template <size_t _Np, size_t _Default=dynamic_extent>
506
+ __MDSPAN_INLINE_FUNCTION
507
+ static constexpr
508
+ index_type __static_extent() noexcept {
509
+ return __storage_t::template __get_static_n<_Np, _Default>();
510
+ }
511
+
512
+ };
513
+
514
+ namespace __detail {
515
+
516
+ template <class _IndexType, size_t _Rank, class _Extents = _CUDA_VSTD::extents<_IndexType>>
517
+ struct __make_dextents;
518
+
519
+ template <class _IndexType, size_t _Rank, size_t... _ExtentsPack>
520
+ struct __make_dextents<_IndexType, _Rank, _CUDA_VSTD::extents<_IndexType, _ExtentsPack...>> {
521
+ using type = typename __make_dextents<_IndexType, _Rank - 1,
522
+ _CUDA_VSTD::extents<_IndexType, _CUDA_VSTD::dynamic_extent, _ExtentsPack...>>::type;
523
+ };
524
+
525
+ template <class _IndexType, size_t... _ExtentsPack>
526
+ struct __make_dextents<_IndexType, 0, _CUDA_VSTD::extents<_IndexType, _ExtentsPack...>> {
527
+ using type = _CUDA_VSTD::extents<_IndexType, _ExtentsPack...>;
528
+ };
529
+
530
+ } // end namespace __detail
531
+
532
+ template <class _IndexType, size_t _Rank>
533
+ using dextents = typename __detail::__make_dextents<_IndexType, _Rank>::type;
534
+
535
+ #if defined(__MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
536
+ template <class... _IndexTypes>
537
+ extents(_IndexTypes...)
538
+ // Workaround for nvcc
539
+ //-> extents<size_t, __detail::__make_dynamic_extent<_IndexTypes>()...>;
540
+ // Adding "(void)" so that clang doesn't complain this is unused
541
+ -> extents<size_t, size_t(((void)_IndexTypes(), -1))...>;
542
+ #endif
543
+
544
+ namespace __detail {
545
+
546
+ template <class _Tp>
547
+ struct __is_extents : false_type {};
548
+
549
+ template <class _IndexType, size_t... _ExtentsPack>
550
+ struct __is_extents<_CUDA_VSTD::extents<_IndexType, _ExtentsPack...>> : true_type {};
551
+
552
+ template <class _Tp>
553
+ static constexpr bool __is_extents_v = __is_extents<_Tp>::value;
554
+
555
+
556
+ template <typename _Extents>
557
+ struct __extents_to_partially_static_sizes;
558
+
559
+ template <class _IndexType, size_t... _ExtentsPack>
560
+ struct __extents_to_partially_static_sizes<_CUDA_VSTD::extents<_IndexType, _ExtentsPack...>> {
561
+ using type = __detail::__partially_static_sizes<
562
+ typename _CUDA_VSTD::extents<_IndexType, _ExtentsPack...>::index_type, size_t,
563
+ _ExtentsPack...>;
564
+ };
565
+
566
+ template <typename _Extents>
567
+ using __extents_to_partially_static_sizes_t = typename __extents_to_partially_static_sizes<_Extents>::type;
568
+
569
+ } // end namespace __detail
570
+
571
+ #endif // _LIBCUDACXX_STD_VER > 11
572
+
573
+ _LIBCUDACXX_END_NAMESPACE_STD
574
+
575
+ #if defined(_LIBCUDACXX_POP_MACROS)
576
+ _LIBCUDACXX_POP_MACROS
577
+ #endif
578
+
579
+ #endif // _LIBCUDACXX___MDSPAN_EXTENTS_HPP
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/full_extent_t.h ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ //@HEADER
3
+ // ************************************************************************
4
+ //
5
+ // Kokkos v. 2.0
6
+ // Copyright (2019) Sandia Corporation
7
+ //
8
+ // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9
+ // the U.S. Government retains certain rights in this software.
10
+ //
11
+ // Redistribution and use in source and binary forms, with or without
12
+ // modification, are permitted provided that the following conditions are
13
+ // met:
14
+ //
15
+ // 1. Redistributions of source code must retain the above copyright
16
+ // notice, this list of conditions and the following disclaimer.
17
+ //
18
+ // 2. Redistributions in binary form must reproduce the above copyright
19
+ // notice, this list of conditions and the following disclaimer in the
20
+ // documentation and/or other materials provided with the distribution.
21
+ //
22
+ // 3. Neither the name of the Corporation nor the names of the
23
+ // contributors may be used to endorse or promote products derived from
24
+ // this software without specific prior written permission.
25
+ //
26
+ // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
+ //
38
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
39
+ //
40
+ // ************************************************************************
41
+ //@HEADER
42
+ */
43
+
44
+ #ifndef _LIBCUDACXX___MDSPAN_FULL_EXTENT_T_HPP
45
+ #define _LIBCUDACXX___MDSPAN_FULL_EXTENT_T_HPP
46
+
47
+ #ifndef __cuda_std__
48
+ #include <__config>
49
+ #endif // __cuda_std__
50
+
51
+ #include "../__mdspan/macros.h"
52
+
53
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
54
+ #pragma GCC system_header
55
+ #endif
56
+
57
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
58
+
59
+ #if _LIBCUDACXX_STD_VER > 11
60
+
61
+ struct full_extent_t { explicit full_extent_t() = default; };
62
+
63
+ _LIBCUDACXX_INLINE_VAR constexpr auto full_extent = full_extent_t{ };
64
+
65
+ #endif // _LIBCUDACXX_STD_VER > 11
66
+
67
+ _LIBCUDACXX_END_NAMESPACE_STD
68
+
69
+ #endif // _LIBCUDACXX___MDSPAN_FULL_EXTENT_T_HPP
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/layout_left.h ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ //@HEADER
3
+ // ************************************************************************
4
+ //
5
+ // Kokkos v. 2.0
6
+ // Copyright (2019) Sandia Corporation
7
+ //
8
+ // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9
+ // the U.S. Government retains certain rights in this software.
10
+ //
11
+ // Redistribution and use in source and binary forms, with or without
12
+ // modification, are permitted provided that the following conditions are
13
+ // met:
14
+ //
15
+ // 1. Redistributions of source code must retain the above copyright
16
+ // notice, this list of conditions and the following disclaimer.
17
+ //
18
+ // 2. Redistributions in binary form must reproduce the above copyright
19
+ // notice, this list of conditions and the following disclaimer in the
20
+ // documentation and/or other materials provided with the distribution.
21
+ //
22
+ // 3. Neither the name of the Corporation nor the names of the
23
+ // contributors may be used to endorse or promote products derived from
24
+ // this software without specific prior written permission.
25
+ //
26
+ // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
+ //
38
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
39
+ //
40
+ // ************************************************************************
41
+ //@HEADER
42
+ */
43
+
44
+ #ifndef _LIBCUDACXX___MDSPAN_LAYOUT_LEFT_HPP
45
+ #define _LIBCUDACXX___MDSPAN_LAYOUT_LEFT_HPP
46
+
47
+ #ifndef __cuda_std__
48
+ #include <__config>
49
+ #endif // __cuda_std__
50
+
51
+ #include "../__assert"
52
+ #include "../__mdspan/extents.h"
53
+ #include "../__mdspan/macros.h"
54
+ #include "../__type_traits/is_constructible.h"
55
+ #include "../__type_traits/is_convertible.h"
56
+ #include "../__type_traits/is_nothrow_constructible.h"
57
+ #include "../__utility/integer_sequence.h"
58
+ #include "../cstddef"
59
+
60
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
61
+ #pragma GCC system_header
62
+ #endif
63
+
64
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
65
+
66
+ #if _LIBCUDACXX_STD_VER > 11
67
+
68
+ //==============================================================================
69
+
70
+ template <class _Extents>
71
+ class layout_left::mapping {
72
+ public:
73
+ using extents_type = _Extents;
74
+ using index_type = typename extents_type::index_type;
75
+ using size_type = typename extents_type::size_type;
76
+ using rank_type = typename extents_type::rank_type;
77
+ using layout_type = layout_left;
78
+ private:
79
+
80
+ static_assert(__detail::__is_extents_v<extents_type>, "layout_left::mapping must be instantiated with a specialization of _CUDA_VSTD::extents.");
81
+
82
+ template <class>
83
+ friend class mapping;
84
+
85
+ // i0+(i1 + E(1)*(i2 + E(2)*i3))
86
+ template <size_t _r, size_t _Rank>
87
+ struct __rank_count {};
88
+
89
+ template <size_t _r, size_t _Rank, class _Ip, class... _Indices>
90
+ _LIBCUDACXX_HOST_DEVICE
91
+ constexpr index_type __compute_offset(
92
+ __rank_count<_r,_Rank>, const _Ip& __i, _Indices... __idx) const {
93
+ return __compute_offset(__rank_count<_r+1,_Rank>(), __idx...) *
94
+ __extents.template __extent<_r>() + __i;
95
+ }
96
+
97
+ template<class _Ip>
98
+ _LIBCUDACXX_HOST_DEVICE
99
+ constexpr index_type __compute_offset(
100
+ __rank_count<extents_type::rank()-1,extents_type::rank()>, const _Ip& __i) const {
101
+ return __i;
102
+ }
103
+
104
+ _LIBCUDACXX_HOST_DEVICE
105
+ constexpr index_type __compute_offset(__rank_count<0,0>) const { return 0; }
106
+
107
+ public:
108
+
109
+ //--------------------------------------------------------------------------------
110
+
111
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mapping() noexcept = default;
112
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mapping(mapping const&) noexcept = default;
113
+
114
+ _LIBCUDACXX_HOST_DEVICE
115
+ constexpr mapping(extents_type const& __exts) noexcept
116
+ :__extents(__exts)
117
+ { }
118
+
119
+ __MDSPAN_TEMPLATE_REQUIRES(
120
+ class _OtherExtents,
121
+ /* requires */ (
122
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_constructible, extents_type, _OtherExtents)
123
+ )
124
+ )
125
+ __MDSPAN_CONDITIONAL_EXPLICIT((!_CUDA_VSTD::is_convertible<_OtherExtents, extents_type>::value)) // needs two () due to comma
126
+ __MDSPAN_INLINE_FUNCTION constexpr
127
+ mapping(mapping<_OtherExtents> const& __other) noexcept // NOLINT(google-explicit-constructor)
128
+ :__extents(__other.extents())
129
+ {
130
+ /*
131
+ * TODO: check precondition
132
+ * __other.required_span_size() is a representable value of type index_type
133
+ */
134
+ }
135
+
136
+ __MDSPAN_TEMPLATE_REQUIRES(
137
+ class _OtherExtents,
138
+ /* requires */ (
139
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_constructible, extents_type, _OtherExtents) &&
140
+ (extents_type::rank() <= 1)
141
+ )
142
+ )
143
+ __MDSPAN_CONDITIONAL_EXPLICIT((!_CUDA_VSTD::is_convertible<_OtherExtents, extents_type>::value)) // needs two () due to comma
144
+ __MDSPAN_INLINE_FUNCTION constexpr
145
+ mapping(layout_right::mapping<_OtherExtents> const& __other) noexcept // NOLINT(google-explicit-constructor)
146
+ :__extents(__other.extents())
147
+ {
148
+ /*
149
+ * TODO: check precondition
150
+ * __other.required_span_size() is a representable value of type index_type
151
+ */
152
+ }
153
+
154
+ __MDSPAN_TEMPLATE_REQUIRES(
155
+ class _OtherExtents,
156
+ /* requires */ (
157
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_constructible, extents_type, _OtherExtents)
158
+ )
159
+ )
160
+ __MDSPAN_CONDITIONAL_EXPLICIT((extents_type::rank() > 0))
161
+ __MDSPAN_INLINE_FUNCTION constexpr
162
+ mapping(layout_stride::mapping<_OtherExtents> const& __other) // NOLINT(google-explicit-constructor)
163
+ :__extents(__other.extents())
164
+ {
165
+ /*
166
+ * TODO: check precondition
167
+ * __other.required_span_size() is a representable value of type index_type
168
+ */
169
+ NV_IF_TARGET(NV_IS_HOST,(
170
+ size_t __stride = 1;
171
+ for(rank_type __r=0; __r<__extents.rank(); __r++) {
172
+ _LIBCUDACXX_THROW_RUNTIME_ERROR(__stride == static_cast<size_t>(__other.stride(__r)),
173
+ "Assigning layout_stride to layout_left with invalid strides.");
174
+ __stride *= __extents.extent(__r);
175
+ }
176
+ ))
177
+ }
178
+
179
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED __MDSPAN_CONSTEXPR_14_DEFAULTED mapping& operator=(mapping const&) noexcept = default;
180
+
181
+ __MDSPAN_INLINE_FUNCTION
182
+ constexpr const extents_type& extents() const noexcept {
183
+ return __extents;
184
+ }
185
+
186
+ __MDSPAN_INLINE_FUNCTION
187
+ constexpr index_type required_span_size() const noexcept {
188
+ index_type __value = 1;
189
+ for(rank_type __r=0; __r<extents_type::rank(); __r++) __value*=__extents.extent(__r);
190
+ return __value;
191
+ }
192
+
193
+ //--------------------------------------------------------------------------------
194
+
195
+ __MDSPAN_TEMPLATE_REQUIRES(
196
+ class... _Indices,
197
+ /* requires */ (
198
+ (sizeof...(_Indices) == extents_type::rank()) &&
199
+ __MDSPAN_FOLD_AND(
200
+ (_LIBCUDACXX_TRAIT(_CUDA_VSTD::is_convertible, _Indices, index_type) &&
201
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _Indices))
202
+ )
203
+ )
204
+ )
205
+ _LIBCUDACXX_HOST_DEVICE
206
+ constexpr index_type operator()(_Indices... __idxs) const noexcept {
207
+ // Immediately cast incoming indices to `index_type`
208
+ return __compute_offset(__rank_count<0, extents_type::rank()>(), static_cast<index_type>(__idxs)...);
209
+ }
210
+
211
+
212
+
213
+ __MDSPAN_INLINE_FUNCTION static constexpr bool is_always_unique() noexcept { return true; }
214
+ __MDSPAN_INLINE_FUNCTION static constexpr bool is_always_exhaustive() noexcept { return true; }
215
+ __MDSPAN_INLINE_FUNCTION static constexpr bool is_always_strided() noexcept { return true; }
216
+
217
+ __MDSPAN_INLINE_FUNCTION constexpr bool is_unique() const noexcept { return true; }
218
+ __MDSPAN_INLINE_FUNCTION constexpr bool is_exhaustive() const noexcept { return true; }
219
+ __MDSPAN_INLINE_FUNCTION constexpr bool is_strided() const noexcept { return true; }
220
+
221
+ __MDSPAN_TEMPLATE_REQUIRES(
222
+ class _Ext = _Extents,
223
+ /* requires */ (
224
+ _Ext::rank() > 0
225
+ )
226
+ )
227
+ __MDSPAN_INLINE_FUNCTION
228
+ constexpr index_type stride(rank_type __i) const noexcept {
229
+ index_type __value = 1;
230
+ for(rank_type __r=0; __r<__i; __r++) __value*=__extents.extent(__r);
231
+ return __value;
232
+ }
233
+
234
+ template<class _OtherExtents>
235
+ __MDSPAN_INLINE_FUNCTION
236
+ friend constexpr bool operator==(mapping const& __lhs, mapping<_OtherExtents> const& __rhs) noexcept {
237
+ return __lhs.extents() == __rhs.extents();
238
+ }
239
+
240
+ // In C++ 20 the not equal exists if equal is found
241
+ #if !(__MDSPAN_HAS_CXX_20)
242
+ template<class _OtherExtents>
243
+ __MDSPAN_INLINE_FUNCTION
244
+ friend constexpr bool operator!=(mapping const& __lhs, mapping<_OtherExtents> const& __rhs) noexcept {
245
+ return __lhs.extents() != __rhs.extents();
246
+ }
247
+ #endif
248
+
249
+ // Not really public, but currently needed to implement fully constexpr useable submdspan:
250
+ template<size_t _Np, class _SizeType, size_t ... _Ep, size_t ... _Idx>
251
+ _LIBCUDACXX_HOST_DEVICE
252
+ constexpr index_type __get_stride(_CUDA_VSTD::extents<_SizeType, _Ep...>,_CUDA_VSTD::integer_sequence<size_t, _Idx...>) const {
253
+ return __MDSPAN_FOLD_TIMES_RIGHT((_Idx<_Np? __extents.template __extent<_Idx>():1),1);
254
+ }
255
+ template<size_t _Np>
256
+ _LIBCUDACXX_HOST_DEVICE
257
+ constexpr index_type stride() const noexcept {
258
+ return __get_stride<_Np>(__extents, _CUDA_VSTD::make_index_sequence<extents_type::rank()>());
259
+ }
260
+
261
+ private:
262
+ _LIBCUDACXX_NO_UNIQUE_ADDRESS extents_type __extents{};
263
+
264
+ };
265
+
266
+ #endif // _LIBCUDACXX_STD_VER > 11
267
+
268
+ _LIBCUDACXX_END_NAMESPACE_STD
269
+
270
+ #endif // _LIBCUDACXX___MDSPAN_LAYOUT_LEFT_HPP
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/layout_right.h ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ //@HEADER
3
+ // ************************************************************************
4
+ //
5
+ // Kokkos v. 2.0
6
+ // Copyright (2019) Sandia Corporation
7
+ //
8
+ // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9
+ // the U.S. Government retains certain rights in this software.
10
+ //
11
+ // Redistribution and use in source and binary forms, with or without
12
+ // modification, are permitted provided that the following conditions are
13
+ // met:
14
+ //
15
+ // 1. Redistributions of source code must retain the above copyright
16
+ // notice, this list of conditions and the following disclaimer.
17
+ //
18
+ // 2. Redistributions in binary form must reproduce the above copyright
19
+ // notice, this list of conditions and the following disclaimer in the
20
+ // documentation and/or other materials provided with the distribution.
21
+ //
22
+ // 3. Neither the name of the Corporation nor the names of the
23
+ // contributors may be used to endorse or promote products derived from
24
+ // this software without specific prior written permission.
25
+ //
26
+ // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
+ //
38
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
39
+ //
40
+ // ************************************************************************
41
+ //@HEADER
42
+ */
43
+
44
+ #ifndef _LIBCUDACXX___MDSPAN_LAYOUT_RIGHT_HPP
45
+ #define _LIBCUDACXX___MDSPAN_LAYOUT_RIGHT_HPP
46
+
47
+ #ifndef __cuda_std__
48
+ #include <__config>
49
+ #endif // __cuda_std__
50
+
51
+ #include "../__assert"
52
+ #include "../__mdspan/extents.h"
53
+ #include "../__mdspan/layout_stride.h"
54
+ #include "../__mdspan/macros.h"
55
+ #include "../__type_traits/is_constructible.h"
56
+ #include "../__type_traits/is_convertible.h"
57
+ #include "../__type_traits/is_nothrow_constructible.h"
58
+ #include "../__utility/integer_sequence.h"
59
+ #include "../cstddef"
60
+
61
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
62
+ #pragma GCC system_header
63
+ #endif
64
+
65
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
66
+
67
+ #if _LIBCUDACXX_STD_VER > 11
68
+
69
+ //==============================================================================
70
+ template <class _Extents>
71
+ class layout_right::mapping {
72
+ public:
73
+ using extents_type = _Extents;
74
+ using index_type = typename extents_type::index_type;
75
+ using size_type = typename extents_type::size_type;
76
+ using rank_type = typename extents_type::rank_type;
77
+ using layout_type = layout_right;
78
+ private:
79
+
80
+ static_assert(__detail::__is_extents_v<extents_type>, "layout_right::mapping must be instantiated with a specialization of _CUDA_VSTD::extents.");
81
+
82
+ template <class>
83
+ friend class mapping;
84
+
85
+ // i0+(i1 + E(1)*(i2 + E(2)*i3))
86
+ template <size_t _r, size_t _Rank>
87
+ struct __rank_count {};
88
+
89
+ template <size_t _r, size_t _Rank, class _Ip, class... _Indices>
90
+ _LIBCUDACXX_HOST_DEVICE
91
+ constexpr index_type __compute_offset(
92
+ index_type __offset, __rank_count<_r,_Rank>, const _Ip& __i, _Indices... __idx) const {
93
+ return __compute_offset(__offset * __extents.template __extent<_r>() + __i,__rank_count<_r+1,_Rank>(), __idx...);
94
+ }
95
+
96
+ template<class _Ip, class ... _Indices>
97
+ _LIBCUDACXX_HOST_DEVICE
98
+ constexpr index_type __compute_offset(
99
+ __rank_count<0,extents_type::rank()>, const _Ip& __i, _Indices... __idx) const {
100
+ return __compute_offset(__i,__rank_count<1,extents_type::rank()>(),__idx...);
101
+ }
102
+
103
+ _LIBCUDACXX_HOST_DEVICE
104
+ constexpr index_type __compute_offset(size_t __offset, __rank_count<extents_type::rank(), extents_type::rank()>) const {
105
+ return static_cast<index_type>(__offset);
106
+ }
107
+
108
+ _LIBCUDACXX_HOST_DEVICE
109
+ constexpr index_type __compute_offset(__rank_count<0,0>) const { return 0; }
110
+
111
+ public:
112
+
113
+ //--------------------------------------------------------------------------------
114
+
115
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mapping() noexcept = default;
116
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mapping(mapping const&) noexcept = default;
117
+
118
+ _LIBCUDACXX_HOST_DEVICE
119
+ constexpr mapping(extents_type const& __exts) noexcept
120
+ :__extents(__exts)
121
+ { }
122
+
123
+ __MDSPAN_TEMPLATE_REQUIRES(
124
+ class _OtherExtents,
125
+ /* requires */ (
126
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_constructible, extents_type, _OtherExtents)
127
+ )
128
+ )
129
+ __MDSPAN_CONDITIONAL_EXPLICIT((!_CUDA_VSTD::is_convertible<_OtherExtents, extents_type>::value)) // needs two () due to comma
130
+ __MDSPAN_INLINE_FUNCTION constexpr
131
+ mapping(mapping<_OtherExtents> const& __other) noexcept // NOLINT(google-explicit-constructor)
132
+ :__extents(__other.extents())
133
+ {
134
+ /*
135
+ * TODO: check precondition
136
+ * __other.required_span_size() is a representable value of type index_type
137
+ */
138
+ }
139
+
140
+ __MDSPAN_TEMPLATE_REQUIRES(
141
+ class _OtherExtents,
142
+ /* requires */ (
143
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_constructible, extents_type, _OtherExtents) &&
144
+ (extents_type::rank() <= 1)
145
+ )
146
+ )
147
+ __MDSPAN_CONDITIONAL_EXPLICIT((!_CUDA_VSTD::is_convertible<_OtherExtents, extents_type>::value)) // needs two () due to comma
148
+ __MDSPAN_INLINE_FUNCTION constexpr
149
+ mapping(layout_left::mapping<_OtherExtents> const& __other) noexcept // NOLINT(google-explicit-constructor)
150
+ :__extents(__other.extents())
151
+ {
152
+ /*
153
+ * TODO: check precondition
154
+ * __other.required_span_size() is a representable value of type index_type
155
+ */
156
+ }
157
+
158
+ __MDSPAN_TEMPLATE_REQUIRES(
159
+ class _OtherExtents,
160
+ /* requires */ (
161
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_constructible, extents_type, _OtherExtents)
162
+ )
163
+ )
164
+ __MDSPAN_CONDITIONAL_EXPLICIT((extents_type::rank() > 0))
165
+ __MDSPAN_INLINE_FUNCTION constexpr
166
+ mapping(layout_stride::mapping<_OtherExtents> const& __other) // NOLINT(google-explicit-constructor)
167
+ :__extents(__other.extents())
168
+ {
169
+ /*
170
+ * TODO: check precondition
171
+ * __other.required_span_size() is a representable value of type index_type
172
+ */
173
+ NV_IF_TARGET(NV_IS_HOST,(
174
+ size_t __stride = 1;
175
+ for(rank_type __r=__extents.rank(); __r>0; __r--) {
176
+ _LIBCUDACXX_THROW_RUNTIME_ERROR(__stride == static_cast<size_t>(__other.stride(__r-1)),
177
+ "Assigning layout_stride to layout_right with invalid strides.");
178
+ __stride *= __extents.extent(__r-1);
179
+ }
180
+ ))
181
+ }
182
+
183
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED __MDSPAN_CONSTEXPR_14_DEFAULTED mapping& operator=(mapping const&) noexcept = default;
184
+
185
+ __MDSPAN_INLINE_FUNCTION
186
+ constexpr const extents_type& extents() const noexcept {
187
+ return __extents;
188
+ }
189
+
190
+ __MDSPAN_INLINE_FUNCTION
191
+ constexpr index_type required_span_size() const noexcept {
192
+ index_type __value = 1;
193
+ for(rank_type __r=0; __r != extents_type::rank(); ++__r) __value*=__extents.extent(__r);
194
+ return __value;
195
+ }
196
+
197
+ //--------------------------------------------------------------------------------
198
+
199
+ __MDSPAN_TEMPLATE_REQUIRES(
200
+ class... _Indices,
201
+ /* requires */ (
202
+ (sizeof...(_Indices) == extents_type::rank()) &&
203
+ __MDSPAN_FOLD_AND(
204
+ (_LIBCUDACXX_TRAIT(_CUDA_VSTD::is_convertible, _Indices, index_type) &&
205
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _Indices))
206
+ )
207
+ )
208
+ )
209
+ _LIBCUDACXX_HOST_DEVICE
210
+ constexpr index_type operator()(_Indices... __idxs) const noexcept {
211
+ return __compute_offset(__rank_count<0, extents_type::rank()>(), static_cast<index_type>(__idxs)...);
212
+ }
213
+
214
+ __MDSPAN_INLINE_FUNCTION static constexpr bool is_always_unique() noexcept { return true; }
215
+ __MDSPAN_INLINE_FUNCTION static constexpr bool is_always_exhaustive() noexcept { return true; }
216
+ __MDSPAN_INLINE_FUNCTION static constexpr bool is_always_strided() noexcept { return true; }
217
+ __MDSPAN_INLINE_FUNCTION constexpr bool is_unique() const noexcept { return true; }
218
+ __MDSPAN_INLINE_FUNCTION constexpr bool is_exhaustive() const noexcept { return true; }
219
+ __MDSPAN_INLINE_FUNCTION constexpr bool is_strided() const noexcept { return true; }
220
+
221
+ __MDSPAN_TEMPLATE_REQUIRES(
222
+ class _Ext = _Extents,
223
+ /* requires */ (
224
+ _Ext::rank() > 0
225
+ )
226
+ )
227
+ __MDSPAN_INLINE_FUNCTION
228
+ constexpr index_type stride(rank_type __i) const noexcept {
229
+ index_type __value = 1;
230
+ for(rank_type __r=extents_type::rank()-1; __r>__i; __r--) __value*=__extents.extent(__r);
231
+ return __value;
232
+ }
233
+
234
+ template<class _OtherExtents>
235
+ __MDSPAN_INLINE_FUNCTION
236
+ friend constexpr bool operator==(mapping const& __lhs, mapping<_OtherExtents> const& __rhs) noexcept {
237
+ return __lhs.extents() == __rhs.extents();
238
+ }
239
+
240
+ // In C++ 20 the not equal exists if equal is found
241
+ #if !(__MDSPAN_HAS_CXX_20)
242
+ template<class _OtherExtents>
243
+ __MDSPAN_INLINE_FUNCTION
244
+ friend constexpr bool operator!=(mapping const& __lhs, mapping<_OtherExtents> const& __rhs) noexcept {
245
+ return __lhs.extents() != __rhs.extents();
246
+ }
247
+ #endif
248
+
249
+ // Not really public, but currently needed to implement fully constexpr useable submdspan:
250
+ template<size_t _Np, class _SizeType, size_t ... _Ep, size_t ... _Idx>
251
+ _LIBCUDACXX_HOST_DEVICE
252
+ constexpr index_type __get_stride(_CUDA_VSTD::extents<_SizeType, _Ep...>,_CUDA_VSTD::integer_sequence<size_t, _Idx...>) const {
253
+ return __MDSPAN_FOLD_TIMES_RIGHT((_Idx>_Np? __extents.template __extent<_Idx>():1),1);
254
+ }
255
+ template<size_t _Np>
256
+ _LIBCUDACXX_HOST_DEVICE
257
+ constexpr index_type __stride() const noexcept {
258
+ return __get_stride<_Np>(__extents, _CUDA_VSTD::make_index_sequence<extents_type::rank()>());
259
+ }
260
+
261
+ private:
262
+ _LIBCUDACXX_NO_UNIQUE_ADDRESS extents_type __extents{};
263
+
264
+ };
265
+
266
+ #endif // _LIBCUDACXX_STD_VER > 11
267
+
268
+ _LIBCUDACXX_END_NAMESPACE_STD
269
+
270
+ #endif // _LIBCUDACXX___MDSPAN_LAYOUT_RIGHT_HPP
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/layout_stride.h ADDED
@@ -0,0 +1,555 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ //@HEADER
3
+ // ************************************************************************
4
+ //
5
+ // Kokkos v. 2.0
6
+ // Copyright (2019) Sandia Corporation
7
+ //
8
+ // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9
+ // the U.S. Government retains certain rights in this software.
10
+ //
11
+ // Redistribution and use in source and binary forms, with or without
12
+ // modification, are permitted provided that the following conditions are
13
+ // met:
14
+ //
15
+ // 1. Redistributions of source code must retain the above copyright
16
+ // notice, this list of conditions and the following disclaimer.
17
+ //
18
+ // 2. Redistributions in binary form must reproduce the above copyright
19
+ // notice, this list of conditions and the following disclaimer in the
20
+ // documentation and/or other materials provided with the distribution.
21
+ //
22
+ // 3. Neither the name of the Corporation nor the names of the
23
+ // contributors may be used to endorse or promote products derived from
24
+ // this software without specific prior written permission.
25
+ //
26
+ // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
+ //
38
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
39
+ //
40
+ // ************************************************************************
41
+ //@HEADER
42
+ */
43
+
44
+ #ifndef _LIBCUDACXX___MDSPAN_LAYOUT_STRIDE_HPP
45
+ #define _LIBCUDACXX___MDSPAN_LAYOUT_STRIDE_HPP
46
+
47
+ #ifndef __cuda_std__
48
+ #include <__config>
49
+ #endif // __cuda_std__
50
+
51
+ #include "../__mdspan/compressed_pair.h"
52
+ #include "../__mdspan/extents.h"
53
+ #include "../__mdspan/macros.h"
54
+ #ifdef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
55
+ #include "../__mdspan/no_unique_address.h"
56
+ #endif
57
+ #include "../__mdspan/static_array.h"
58
+ #include "../__type_traits/is_same.h"
59
+ #include "../__type_traits/is_constructible.h"
60
+ #include "../__type_traits/is_convertible.h"
61
+ #include "../__type_traits/is_nothrow_constructible.h"
62
+ #include "../__type_traits/remove_const.h"
63
+ #include "../__utility/integer_sequence.h"
64
+ #include "../__utility/move.h"
65
+ #include "../algorithm"
66
+ #include "../array"
67
+ #if __MDSPAN_USE_CONCEPTS && __MDSPAN_HAS_CXX_20
68
+ #include "../concepts"
69
+ #endif
70
+ #include "../numeric"
71
+ #include "../span"
72
+
73
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
74
+ #pragma GCC system_header
75
+ #endif
76
+
77
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
78
+
79
+ #if _LIBCUDACXX_STD_VER > 11
80
+
81
+ struct layout_left {
82
+ template<class _Extents>
83
+ class mapping;
84
+ };
85
+ struct layout_right {
86
+ template<class _Extents>
87
+ class mapping;
88
+ };
89
+
90
+ namespace __detail {
91
+ template<class _Layout, class _Mapping>
92
+ constexpr bool __is_mapping_of =
93
+ _CUDA_VSTD::is_same<typename _Layout::template mapping<typename _Mapping::extents_type>, _Mapping>::value;
94
+
95
+ #if __MDSPAN_USE_CONCEPTS && __MDSPAN_HAS_CXX_20
96
+ template<class _Mp>
97
+ concept __layout_mapping_alike = requires {
98
+ requires __is_extents<typename _Mp::extents_type>::value;
99
+ { _Mp::is_always_strided() } -> same_as<bool>;
100
+ { _Mp::is_always_exhaustive() } -> same_as<bool>;
101
+ { _Mp::is_always_unique() } -> same_as<bool>;
102
+ bool_constant<_Mp::is_always_strided()>::value;
103
+ bool_constant<_Mp::is_always_exhaustive()>::value;
104
+ bool_constant<_Mp::is_always_unique()>::value;
105
+ };
106
+ #endif
107
+ } // namespace __detail
108
+
109
+ struct layout_stride {
110
+ template <class _Extents>
111
+ class mapping
112
+ #ifdef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
113
+ : private __detail::__no_unique_address_emulation<
114
+ __detail::__compressed_pair<
115
+ _Extents,
116
+ _CUDA_VSTD::array<typename _Extents::index_type, _Extents::rank()>
117
+ >
118
+ >
119
+ #endif
120
+ {
121
+ public:
122
+ using extents_type = _Extents;
123
+ using index_type = typename extents_type::index_type;
124
+ using size_type = typename extents_type::size_type;
125
+ using rank_type = typename extents_type::rank_type;
126
+ using layout_type = layout_stride;
127
+
128
+ // This could be a `requires`, but I think it's better and clearer as a `static_assert`.
129
+ static_assert(__detail::__is_extents_v<_Extents>, "layout_stride::mapping must be instantiated with a specialization of _CUDA_VSTD::extents.");
130
+
131
+
132
+ private:
133
+
134
+ //----------------------------------------------------------------------------
135
+
136
+ using __strides_storage_t = _CUDA_VSTD::array<index_type, extents_type::rank()>;//_CUDA_VSTD::dextents<index_type, extents_type::rank()>;
137
+ using __member_pair_t = __detail::__compressed_pair<extents_type, __strides_storage_t>;
138
+
139
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
140
+ _LIBCUDACXX_NO_UNIQUE_ADDRESS __member_pair_t __members;
141
+ #else
142
+ using __base_t = __detail::__no_unique_address_emulation<__member_pair_t>;
143
+ #endif
144
+
145
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr __strides_storage_t const&
146
+ __strides_storage() const noexcept {
147
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
148
+ return __members.__second();
149
+ #else
150
+ return this->__base_t::__ref().__second();
151
+ #endif
152
+ }
153
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr __strides_storage_t&
154
+ __strides_storage() noexcept {
155
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
156
+ return __members.__second();
157
+ #else
158
+ return this->__base_t::__ref().__second();
159
+ #endif
160
+ }
161
+
162
+ template<class _SizeType, size_t ... _Ep, size_t ... _Idx>
163
+ _LIBCUDACXX_HOST_DEVICE
164
+ constexpr index_type __get_size(_CUDA_VSTD::extents<_SizeType, _Ep...>,_CUDA_VSTD::integer_sequence<size_t, _Idx...>) const {
165
+ return __MDSPAN_FOLD_TIMES_RIGHT( static_cast<index_type>(extents().extent(_Idx)), 1 );
166
+ }
167
+
168
+ //----------------------------------------------------------------------------
169
+
170
+ template <class>
171
+ friend class mapping;
172
+
173
+ //----------------------------------------------------------------------------
174
+
175
+ // Workaround for non-deducibility of the index sequence template parameter if it's given at the top level
176
+ template <class>
177
+ struct __deduction_workaround;
178
+
179
+ template <size_t... _Idxs>
180
+ struct __deduction_workaround<_CUDA_VSTD::index_sequence<_Idxs...>>
181
+ {
182
+ template <class _OtherExtents>
183
+ __MDSPAN_INLINE_FUNCTION
184
+ static constexpr bool _eq_impl(mapping const& __self, mapping<_OtherExtents> const& __other) noexcept {
185
+ return __MDSPAN_FOLD_AND((__self.stride(_Idxs) == __other.stride(_Idxs)) /* && ... */)
186
+ && __MDSPAN_FOLD_AND((__self.extents().extent(_Idxs) == __other.extents().extent(_Idxs)) /* || ... */);
187
+ }
188
+ template <class _OtherExtents>
189
+ __MDSPAN_INLINE_FUNCTION
190
+ static constexpr bool _not_eq_impl(mapping const& __self, mapping<_OtherExtents> const& __other) noexcept {
191
+ return __MDSPAN_FOLD_OR((__self.stride(_Idxs) != __other.stride(_Idxs)) /* || ... */)
192
+ || __MDSPAN_FOLD_OR((__self.extents().extent(_Idxs) != __other.extents().extent(_Idxs)) /* || ... */);
193
+ }
194
+
195
+ template <class... _Integral>
196
+ __MDSPAN_FORCE_INLINE_FUNCTION
197
+ static constexpr size_t _call_op_impl(mapping const& __self, _Integral... __idxs) noexcept {
198
+ return __MDSPAN_FOLD_PLUS_RIGHT((__idxs * __self.stride(_Idxs)), /* + ... + */ 0);
199
+ }
200
+
201
+ __MDSPAN_INLINE_FUNCTION
202
+ static constexpr size_t _req_span_size_impl(mapping const& __self) noexcept {
203
+ // assumes no negative strides; not sure if I'm allowed to assume that or not
204
+ return __impl::_call_op_impl(__self, (__self.extents().template __extent<_Idxs>() - 1)...) + 1;
205
+ }
206
+
207
+ template<class _OtherMapping>
208
+ __MDSPAN_INLINE_FUNCTION
209
+ static constexpr const __strides_storage_t fill_strides(const _OtherMapping& __map) {
210
+ return __strides_storage_t{static_cast<index_type>(__map.stride(_Idxs))...};
211
+ }
212
+
213
+ __MDSPAN_INLINE_FUNCTION
214
+ static constexpr const __strides_storage_t& fill_strides(const __strides_storage_t& __s) {
215
+ return __s;
216
+ }
217
+
218
+ template<class _IntegralType>
219
+ __MDSPAN_INLINE_FUNCTION
220
+ static constexpr const __strides_storage_t fill_strides(const _CUDA_VSTD::array<_IntegralType,extents_type::rank()>& __s) {
221
+ return __strides_storage_t{static_cast<index_type>(__s[_Idxs])...};
222
+ }
223
+
224
+ template<class _IntegralType>
225
+ __MDSPAN_INLINE_FUNCTION
226
+ static constexpr const __strides_storage_t fill_strides(const _CUDA_VSTD::span<_IntegralType,extents_type::rank()>& __s) {
227
+ return __strides_storage_t{static_cast<index_type>(__s[_Idxs])...};
228
+ }
229
+
230
+ __MDSPAN_INLINE_FUNCTION
231
+ static constexpr const __strides_storage_t fill_strides(
232
+ __detail::__extents_to_partially_static_sizes_t<
233
+ _CUDA_VSTD::dextents<index_type, extents_type::rank()>>&& __s) {
234
+ return __strides_storage_t{static_cast<index_type>(__s.template __get_n<_Idxs>())...};
235
+ }
236
+
237
+ template<size_t K>
238
+ __MDSPAN_INLINE_FUNCTION
239
+ static constexpr size_t __return_zero() { return 0; }
240
+
241
+ template<class _Mapping>
242
+ __MDSPAN_INLINE_FUNCTION
243
+ static constexpr typename _Mapping::index_type
244
+ __OFFSET(const _Mapping& m) { return m(__return_zero<_Idxs>()...); }
245
+ };
246
+
247
+ // Can't use defaulted parameter in the __deduction_workaround template because of a bug in MSVC warning C4348.
248
+ using __impl = __deduction_workaround<_CUDA_VSTD::make_index_sequence<_Extents::rank()>>;
249
+
250
+
251
+ //----------------------------------------------------------------------------
252
+
253
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
254
+ __MDSPAN_INLINE_FUNCTION constexpr explicit
255
+ mapping(__member_pair_t&& __m) : __members(_CUDA_VSTD::move(__m)) {}
256
+ #else
257
+ __MDSPAN_INLINE_FUNCTION constexpr explicit
258
+ mapping(__base_t&& __b) : __base_t(_CUDA_VSTD::move(__b)) {}
259
+ #endif
260
+
261
+ public: // but not really
262
+ __MDSPAN_INLINE_FUNCTION
263
+ static constexpr mapping
264
+ __make_mapping(
265
+ __detail::__extents_to_partially_static_sizes_t<_Extents>&& __exts,
266
+ __detail::__extents_to_partially_static_sizes_t<
267
+ _CUDA_VSTD::dextents<index_type, _Extents::rank()>>&& __strs
268
+ ) noexcept {
269
+ // call the private constructor we created for this purpose
270
+ return mapping(
271
+ #ifdef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
272
+ __base_t{
273
+ #endif
274
+ __member_pair_t(
275
+ extents_type::__make_extents_impl(_CUDA_VSTD::move(__exts)),
276
+ __strides_storage_t{__impl::fill_strides(_CUDA_VSTD::move(__strs))}
277
+ )
278
+ #ifdef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
279
+ }
280
+ #endif
281
+ );
282
+ }
283
+ //----------------------------------------------------------------------------
284
+
285
+
286
+ public:
287
+
288
+ //--------------------------------------------------------------------------------
289
+
290
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mapping() noexcept = default;
291
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mapping(mapping const&) noexcept = default;
292
+
293
+ __MDSPAN_TEMPLATE_REQUIRES(
294
+ class _IntegralTypes,
295
+ /* requires */ (
296
+ // MSVC 19.32 does not like using index_type here, requires the typename _Extents::index_type
297
+ // error C2641: cannot deduce template arguments for '_CUDA_VSTD::layout_stride::mapping'
298
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_convertible, const remove_const_t<_IntegralTypes>&, typename _Extents::index_type) &&
299
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_nothrow_constructible, typename _Extents::index_type, const remove_const_t<_IntegralTypes>&)
300
+ )
301
+ )
302
+ __MDSPAN_INLINE_FUNCTION
303
+ constexpr
304
+ mapping(
305
+ extents_type const& __e,
306
+ _CUDA_VSTD::array<_IntegralTypes, extents_type::rank()> const& __s
307
+ ) noexcept
308
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
309
+ : __members{
310
+ #else
311
+ : __base_t(__base_t{__member_pair_t(
312
+ #endif
313
+ __e, __strides_storage_t(__impl::fill_strides(__s))
314
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
315
+ }
316
+ #else
317
+ )})
318
+ #endif
319
+ {
320
+ /*
321
+ * TODO: check preconditions
322
+ * - __s[i] > 0 is true for all i in the range [0, rank_ ).
323
+ * - REQUIRED-SPAN-SIZE(__e, __s) is a representable value of type index_type ([basic.fundamental]).
324
+ * - If rank_ is greater than 0, then there exists a permutation P of the integers in the
325
+ * range [0, rank_), such that __s[ pi ] >= __s[ pi − 1 ] * __e.extent( pi − 1 ) is true for
326
+ * all i in the range [1, rank_ ), where pi is the ith element of P.
327
+ */
328
+ }
329
+
330
+ __MDSPAN_TEMPLATE_REQUIRES(
331
+ class _IntegralTypes,
332
+ /* requires */ (
333
+ // MSVC 19.32 does not like using index_type here, requires the typename _Extents::index_type
334
+ // error C2641: cannot deduce template arguments for '_CUDA_VSTD::layout_stride::mapping'
335
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_convertible, const remove_const_t<_IntegralTypes>&, typename _Extents::index_type) &&
336
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_nothrow_constructible, typename _Extents::index_type, const remove_const_t<_IntegralTypes>&)
337
+ )
338
+ )
339
+ __MDSPAN_INLINE_FUNCTION
340
+ constexpr
341
+ mapping(
342
+ extents_type const& __e,
343
+ _CUDA_VSTD::span<_IntegralTypes, extents_type::rank()> const& __s
344
+ ) noexcept
345
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
346
+ : __members{
347
+ #else
348
+ : __base_t(__base_t{__member_pair_t(
349
+ #endif
350
+ __e, __strides_storage_t(__impl::fill_strides(__s))
351
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
352
+ }
353
+ #else
354
+ )})
355
+ #endif
356
+ {
357
+ /*
358
+ * TODO: check preconditions
359
+ * - __s[i] > 0 is true for all i in the range [0, rank_ ).
360
+ * - REQUIRED-SPAN-SIZE(__e, __s) is a representable value of type index_type ([basic.fundamental]).
361
+ * - If rank_ is greater than 0, then there exists a permutation P of the integers in the
362
+ * range [0, rank_), such that __s[ pi ] >= __s[ pi − 1 ] * __e.extent( pi − 1 ) is true for
363
+ * all i in the range [1, rank_ ), where pi is the ith element of P.
364
+ */
365
+ }
366
+
367
+ #if !(__MDSPAN_USE_CONCEPTS && __MDSPAN_HAS_CXX_20)
368
+ __MDSPAN_TEMPLATE_REQUIRES(
369
+ class _StridedLayoutMapping,
370
+ /* requires */ (
371
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_constructible, extents_type, typename _StridedLayoutMapping::extents_type) &&
372
+ __detail::__is_mapping_of<typename _StridedLayoutMapping::layout_type, _StridedLayoutMapping> &&
373
+ _StridedLayoutMapping::is_always_unique() &&
374
+ _StridedLayoutMapping::is_always_strided()
375
+ )
376
+ )
377
+ #else
378
+ template<class _StridedLayoutMapping>
379
+ requires(
380
+ __detail::__layout_mapping_alike<_StridedLayoutMapping> &&
381
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_constructible, extents_type, typename _StridedLayoutMapping::extents_type) &&
382
+ _StridedLayoutMapping::is_always_unique() &&
383
+ _StridedLayoutMapping::is_always_strided()
384
+ )
385
+ #endif
386
+ __MDSPAN_CONDITIONAL_EXPLICIT(
387
+ (!_CUDA_VSTD::is_convertible<typename _StridedLayoutMapping::extents_type, extents_type>::value) &&
388
+ (__detail::__is_mapping_of<layout_left, _StridedLayoutMapping> ||
389
+ __detail::__is_mapping_of<layout_right, _StridedLayoutMapping> ||
390
+ __detail::__is_mapping_of<layout_stride, _StridedLayoutMapping>)
391
+ ) // needs two () due to comma
392
+ __MDSPAN_INLINE_FUNCTION constexpr
393
+ mapping(_StridedLayoutMapping const& __other) noexcept // NOLINT(google-explicit-constructor)
394
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
395
+ : __members{
396
+ #else
397
+ : __base_t(__base_t{__member_pair_t(
398
+ #endif
399
+ __other.extents(), __strides_storage_t(__impl::fill_strides(__other))
400
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
401
+ }
402
+ #else
403
+ )})
404
+ #endif
405
+ {
406
+ /*
407
+ * TODO: check preconditions
408
+ * - __other.stride(i) > 0 is true for all i in the range [0, rank_ ).
409
+ * - __other.required_span_size() is a representable value of type index_type ([basic.fundamental]).
410
+ * - OFFSET(__other) == 0
411
+ */
412
+ }
413
+
414
+ //--------------------------------------------------------------------------------
415
+
416
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED __MDSPAN_CONSTEXPR_14_DEFAULTED
417
+ mapping& operator=(mapping const&) noexcept = default;
418
+
419
+ __MDSPAN_INLINE_FUNCTION constexpr const extents_type& extents() const noexcept {
420
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
421
+ return __members.__first();
422
+ #else
423
+ return this->__base_t::__ref().__first();
424
+ #endif
425
+ };
426
+
427
+ __MDSPAN_INLINE_FUNCTION
428
+ constexpr _CUDA_VSTD::array< index_type, extents_type::rank() > strides() const noexcept {
429
+ return __strides_storage();
430
+ }
431
+
432
+ __MDSPAN_INLINE_FUNCTION
433
+ constexpr index_type required_span_size() const noexcept {
434
+ index_type __span_size = 1;
435
+ for(unsigned __r = 0; __r < extents_type::rank(); __r++) {
436
+ // Return early if any of the extents are zero
437
+ if(extents().extent(__r)==0) return 0;
438
+ __span_size += ( static_cast<index_type>(extents().extent(__r) - 1 ) * __strides_storage()[__r]);
439
+ }
440
+ return __span_size;
441
+ }
442
+
443
+
444
+ __MDSPAN_TEMPLATE_REQUIRES(
445
+ class... _Indices,
446
+ /* requires */ (
447
+ sizeof...(_Indices) == _Extents::rank() &&
448
+ __MDSPAN_FOLD_AND(_LIBCUDACXX_TRAIT(_CUDA_VSTD::is_convertible, _Indices, index_type) /*&& ...*/ ) &&
449
+ __MDSPAN_FOLD_AND(_LIBCUDACXX_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _Indices) /*&& ...*/)
450
+ )
451
+ )
452
+ __MDSPAN_FORCE_INLINE_FUNCTION
453
+ constexpr index_type operator()(_Indices... __idxs) const noexcept {
454
+ // Should the op_impl operate in terms of `index_type` rather than `size_t`?
455
+ // Casting `size_t` to `index_type` here.
456
+ return static_cast<index_type>(__impl::_call_op_impl(*this, static_cast<index_type>(__idxs)...));
457
+ }
458
+
459
+ __MDSPAN_INLINE_FUNCTION static constexpr bool is_always_unique() noexcept { return true; }
460
+ __MDSPAN_INLINE_FUNCTION static constexpr bool is_always_exhaustive() noexcept {
461
+ return false;
462
+ }
463
+ __MDSPAN_INLINE_FUNCTION static constexpr bool is_always_strided() noexcept { return true; }
464
+
465
+ __MDSPAN_INLINE_FUNCTION static constexpr bool is_unique() noexcept { return true; }
466
+ __MDSPAN_INLINE_FUNCTION constexpr bool is_exhaustive() const noexcept {
467
+ return required_span_size() == __get_size( extents(), _CUDA_VSTD::make_index_sequence<extents_type::rank()>());
468
+ }
469
+ __MDSPAN_INLINE_FUNCTION static constexpr bool is_strided() noexcept { return true; }
470
+
471
+
472
+ __MDSPAN_TEMPLATE_REQUIRES(
473
+ class _Ext = _Extents,
474
+ /* requires */ (
475
+ _Ext::rank() > 0
476
+ )
477
+ )
478
+ __MDSPAN_INLINE_FUNCTION
479
+ constexpr index_type stride(rank_type __r) const noexcept {
480
+ return __strides_storage()[__r];
481
+ }
482
+
483
+ #if !(__MDSPAN_USE_CONCEPTS && __MDSPAN_HAS_CXX_20)
484
+ __MDSPAN_TEMPLATE_REQUIRES(
485
+ class _StridedLayoutMapping,
486
+ /* requires */ (
487
+ __detail::__is_mapping_of<typename _StridedLayoutMapping::layout_type, _StridedLayoutMapping> &&
488
+ (extents_type::rank() == _StridedLayoutMapping::extents_type::rank()) &&
489
+ _StridedLayoutMapping::is_always_strided()
490
+ )
491
+ )
492
+ #else
493
+ template<class _StridedLayoutMapping>
494
+ requires(
495
+ __detail::__layout_mapping_alike<_StridedLayoutMapping> &&
496
+ (extents_type::rank() == _StridedLayoutMapping::extents_type::rank()) &&
497
+ _StridedLayoutMapping::is_always_strided()
498
+ )
499
+ #endif
500
+ __MDSPAN_INLINE_FUNCTION
501
+ friend constexpr bool operator==(const mapping& __x, const _StridedLayoutMapping& __y) noexcept {
502
+ bool __strides_match = true;
503
+ for(rank_type __r = 0; __r < extents_type::rank(); __r++)
504
+ __strides_match = __strides_match && (__x.stride(__r) == __y.stride(__r));
505
+ return (__x.extents() == __y.extents()) &&
506
+ (__impl::__OFFSET(__y)== static_cast<typename _StridedLayoutMapping::index_type>(0)) &&
507
+ __strides_match;
508
+ }
509
+
510
+ // This one is not technically part of the proposal. Just here to make implementation a bit more optimal hopefully
511
+ __MDSPAN_TEMPLATE_REQUIRES(
512
+ class _OtherExtents,
513
+ /* requires */ (
514
+ (extents_type::rank() == _OtherExtents::rank())
515
+ )
516
+ )
517
+ __MDSPAN_INLINE_FUNCTION
518
+ friend constexpr bool operator==(mapping const& __lhs, mapping<_OtherExtents> const& __rhs) noexcept {
519
+ return __impl::_eq_impl(__lhs, __rhs);
520
+ }
521
+
522
+ #if !__MDSPAN_HAS_CXX_20
523
+ __MDSPAN_TEMPLATE_REQUIRES(
524
+ class _StridedLayoutMapping,
525
+ /* requires */ (
526
+ __detail::__is_mapping_of<typename _StridedLayoutMapping::layout_type, _StridedLayoutMapping> &&
527
+ (extents_type::rank() == _StridedLayoutMapping::extents_type::rank()) &&
528
+ _StridedLayoutMapping::is_always_strided()
529
+ )
530
+ )
531
+ __MDSPAN_INLINE_FUNCTION
532
+ friend constexpr bool operator!=(const mapping& __x, const _StridedLayoutMapping& __y) noexcept {
533
+ return not (__x == __y);
534
+ }
535
+
536
+ __MDSPAN_TEMPLATE_REQUIRES(
537
+ class _OtherExtents,
538
+ /* requires */ (
539
+ (extents_type::rank() == _OtherExtents::rank())
540
+ )
541
+ )
542
+ __MDSPAN_INLINE_FUNCTION
543
+ friend constexpr bool operator!=(mapping const& __lhs, mapping<_OtherExtents> const& __rhs) noexcept {
544
+ return __impl::_not_eq_impl(__lhs, __rhs);
545
+ }
546
+ #endif
547
+
548
+ };
549
+ };
550
+
551
+ #endif // _LIBCUDACXX_STD_VER > 11
552
+
553
+ _LIBCUDACXX_END_NAMESPACE_STD
554
+
555
+ #endif // _LIBCUDACXX___MDSPAN_LAYOUT_STRIDE_HPP
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__mdspan/macros.h ADDED
@@ -0,0 +1,639 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ //@HEADER
3
+ // ************************************************************************
4
+ //
5
+ // Kokkos v. 2.0
6
+ // Copyright (2019) Sandia Corporation
7
+ //
8
+ // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9
+ // the U.S. Government retains certain rights in this software.
10
+ //
11
+ // Redistribution and use in source and binary forms, with or without
12
+ // modification, are permitted provided that the following conditions are
13
+ // met:
14
+ //
15
+ // 1. Redistributions of source code must retain the above copyright
16
+ // notice, this list of conditions and the following disclaimer.
17
+ //
18
+ // 2. Redistributions in binary form must reproduce the above copyright
19
+ // notice, this list of conditions and the following disclaimer in the
20
+ // documentation and/or other materials provided with the distribution.
21
+ //
22
+ // 3. Neither the name of the Corporation nor the names of the
23
+ // contributors may be used to endorse or promote products derived from
24
+ // this software without specific prior written permission.
25
+ //
26
+ // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
+ //
38
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
39
+ //
40
+ // ************************************************************************
41
+ //@HEADER
42
+ */
43
+
44
+
45
+ #ifndef _LIBCUDACXX___MDSPAN_MACROS_HPP
46
+ #define _LIBCUDACXX___MDSPAN_MACROS_HPP
47
+
48
+ #ifndef __cuda_std__
49
+ #include <__config>
50
+ #endif // __cuda_std__
51
+
52
+ #include "../__mdspan/config.h"
53
+ #include "../__type_traits/enable_if.h"
54
+ #include "../__type_traits/is_void.h"
55
+ #include "../__type_traits/remove_reference.h"
56
+ #include "../__utility/declval.h"
57
+
58
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
59
+ #pragma GCC system_header
60
+ #endif
61
+
62
+ #if _LIBCUDACXX_STD_VER > 11
63
+
64
+ #ifndef __MDSPAN_FORCE_INLINE_FUNCTION
65
+ # ifdef __MDSPAN_COMPILER_MSVC // Microsoft compilers
66
+ # define __MDSPAN_FORCE_INLINE_FUNCTION __forceinline _LIBCUDACXX_HOST_DEVICE
67
+ # else
68
+ # define __MDSPAN_FORCE_INLINE_FUNCTION __attribute__((always_inline)) _LIBCUDACXX_HOST_DEVICE
69
+ # endif
70
+ #endif
71
+
72
+ #ifndef __MDSPAN_INLINE_FUNCTION
73
+ # define __MDSPAN_INLINE_FUNCTION inline _LIBCUDACXX_HOST_DEVICE
74
+ #endif
75
+
76
+ // In CUDA defaulted functions do not need host device markup
77
+ #ifndef __MDSPAN_INLINE_FUNCTION_DEFAULTED
78
+ # define __MDSPAN_INLINE_FUNCTION_DEFAULTED
79
+ #endif
80
+
81
+ //==============================================================================
82
+ // <editor-fold desc="Preprocessor helpers"> {{{1
83
+
84
+ #define __MDSPAN_PP_COUNT(...) \
85
+ __MDSPAN_PP_INTERNAL_EXPAND_ARGS_PRIVATE( \
86
+ __MDSPAN_PP_INTERNAL_ARGS_AUGMENTER(__VA_ARGS__) \
87
+ )
88
+
89
+ #define __MDSPAN_PP_INTERNAL_ARGS_AUGMENTER(...) unused, __VA_ARGS__
90
+ #define __MDSPAN_PP_INTERNAL_EXPAND(x) x
91
+ #define __MDSPAN_PP_INTERNAL_EXPAND_ARGS_PRIVATE(...) \
92
+ __MDSPAN_PP_INTERNAL_EXPAND( \
93
+ __MDSPAN_PP_INTERNAL_COUNT_PRIVATE( \
94
+ __VA_ARGS__, 69, 68, 67, 66, 65, 64, 63, 62, 61, \
95
+ 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, \
96
+ 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, \
97
+ 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, \
98
+ 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, \
99
+ 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 \
100
+ ) \
101
+ )
102
+ # define __MDSPAN_PP_INTERNAL_COUNT_PRIVATE( \
103
+ _1_, _2_, _3_, _4_, _5_, _6_, _7_, _8_, _9_, \
104
+ _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, \
105
+ _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, \
106
+ _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, \
107
+ _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, \
108
+ _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, \
109
+ _60, _61, _62, _63, _64, _65, _66, _67, _68, _69, \
110
+ _70, count, ...) count \
111
+ /**/
112
+
113
+ #define __MDSPAN_PP_STRINGIFY_IMPL(x) #x
114
+ #define __MDSPAN_PP_STRINGIFY(x) __MDSPAN_PP_STRINGIFY_IMPL(x)
115
+
116
+ #define __MDSPAN_PP_CAT_IMPL(x, y) x ## y
117
+ #define __MDSPAN_PP_CAT(x, y) __MDSPAN_PP_CAT_IMPL(x, y)
118
+
119
+ #define __MDSPAN_PP_EVAL(X, ...) X(__VA_ARGS__)
120
+
121
+ #define __MDSPAN_PP_REMOVE_PARENS_IMPL(...) __VA_ARGS__
122
+ #define __MDSPAN_PP_REMOVE_PARENS(...) __MDSPAN_PP_REMOVE_PARENS_IMPL __VA_ARGS__
123
+
124
+ // </editor-fold> end Preprocessor helpers }}}1
125
+ //==============================================================================
126
+
127
+ //==============================================================================
128
+ // <editor-fold desc="Concept emulation"> {{{1
129
+
130
+ // These compatibility macros don't help with partial ordering, but they should do the trick
131
+ // for what we need to do with concepts in mdspan
132
+ #ifdef __MDSPAN_USE_CONCEPTS
133
+ # define __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) > requires REQ
134
+ # define __MDSPAN_FUNCTION_REQUIRES(PAREN_PREQUALS, FNAME, PAREN_PARAMS, QUALS, REQ) \
135
+ __MDSPAN_PP_REMOVE_PARENS(PAREN_PREQUALS) FNAME PAREN_PARAMS QUALS requires REQ \
136
+ /**/
137
+ #else
138
+ # define __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) , typename _CUDA_VSTD::enable_if<(REQ), int>::type = 0>
139
+ # define __MDSPAN_FUNCTION_REQUIRES(PAREN_PREQUALS, FNAME, PAREN_PARAMS, QUALS, REQ) \
140
+ __MDSPAN_TEMPLATE_REQUIRES( \
141
+ class __function_requires_ignored=void, \
142
+ (_CUDA_VSTD::is_void<__function_requires_ignored>::value && REQ) \
143
+ ) __MDSPAN_PP_REMOVE_PARENS(PAREN_PREQUALS) FNAME PAREN_PARAMS QUALS \
144
+ /**/
145
+ #endif
146
+
147
+
148
+ #if defined(__MDSPAN_COMPILER_MSVC)
149
+ # define __MDSPAN_TEMPLATE_REQUIRES(...) \
150
+ __MDSPAN_PP_CAT( \
151
+ __MDSPAN_PP_CAT(__MDSPAN_TEMPLATE_REQUIRES_, __MDSPAN_PP_COUNT(__VA_ARGS__))\
152
+ (__VA_ARGS__), \
153
+ ) \
154
+ /**/
155
+ #else
156
+ # define __MDSPAN_TEMPLATE_REQUIRES(...) \
157
+ __MDSPAN_PP_EVAL( \
158
+ __MDSPAN_PP_CAT(__MDSPAN_TEMPLATE_REQUIRES_, __MDSPAN_PP_COUNT(__VA_ARGS__)), \
159
+ __VA_ARGS__ \
160
+ ) \
161
+ /**/
162
+ #endif
163
+
164
+ #define __MDSPAN_TEMPLATE_REQUIRES_2(TP1, REQ) \
165
+ template<TP1 \
166
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
167
+ /**/
168
+ #define __MDSPAN_TEMPLATE_REQUIRES_3(TP1, TP2, REQ) \
169
+ template<TP1, TP2 \
170
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
171
+ /**/
172
+ #define __MDSPAN_TEMPLATE_REQUIRES_4(TP1, TP2, TP3, REQ) \
173
+ template<TP1, TP2, TP3 \
174
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
175
+ /**/
176
+ #define __MDSPAN_TEMPLATE_REQUIRES_5(TP1, TP2, TP3, TP4, REQ) \
177
+ template<TP1, TP2, TP3, TP4 \
178
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
179
+ /**/
180
+ #define __MDSPAN_TEMPLATE_REQUIRES_6(TP1, TP2, TP3, TP4, TP5, REQ) \
181
+ template<TP1, TP2, TP3, TP4, TP5 \
182
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
183
+ /**/
184
+ #define __MDSPAN_TEMPLATE_REQUIRES_7(TP1, TP2, TP3, TP4, TP5, TP6, REQ) \
185
+ template<TP1, TP2, TP3, TP4, TP5, TP6 \
186
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
187
+ /**/
188
+ #define __MDSPAN_TEMPLATE_REQUIRES_8(TP1, TP2, TP3, TP4, TP5, TP6, TP7, REQ) \
189
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7 \
190
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
191
+ /**/
192
+ #define __MDSPAN_TEMPLATE_REQUIRES_9(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, REQ) \
193
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8 \
194
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
195
+ /**/
196
+ #define __MDSPAN_TEMPLATE_REQUIRES_10(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, REQ) \
197
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9 \
198
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
199
+ /**/
200
+ #define __MDSPAN_TEMPLATE_REQUIRES_11(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, REQ) \
201
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10 \
202
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
203
+ /**/
204
+ #define __MDSPAN_TEMPLATE_REQUIRES_12(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, REQ) \
205
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11 \
206
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
207
+ /**/
208
+ #define __MDSPAN_TEMPLATE_REQUIRES_13(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, REQ) \
209
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12 \
210
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
211
+ /**/
212
+ #define __MDSPAN_TEMPLATE_REQUIRES_14(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, REQ) \
213
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13 \
214
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
215
+ /**/
216
+ #define __MDSPAN_TEMPLATE_REQUIRES_15(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, REQ) \
217
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14 \
218
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
219
+ /**/
220
+ #define __MDSPAN_TEMPLATE_REQUIRES_16(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, REQ) \
221
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15 \
222
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
223
+ /**/
224
+ #define __MDSPAN_TEMPLATE_REQUIRES_17(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16, REQ) \
225
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16 \
226
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
227
+ /**/
228
+ #define __MDSPAN_TEMPLATE_REQUIRES_18(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16, TP17, REQ) \
229
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16, TP17 \
230
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
231
+ /**/
232
+ #define __MDSPAN_TEMPLATE_REQUIRES_19(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16, TP17, TP18, REQ) \
233
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16, TP17, TP18 \
234
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
235
+ /**/
236
+ #define __MDSPAN_TEMPLATE_REQUIRES_20(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16, TP17, TP18, TP19, REQ) \
237
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16, TP17, TP18, TP19 \
238
+ __MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
239
+ /**/
240
+
241
+ #define __MDSPAN_INSTANTIATE_ONLY_IF_USED \
242
+ __MDSPAN_TEMPLATE_REQUIRES( \
243
+ class __instantiate_only_if_used_tparam=void, \
244
+ ( _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_void, __instantiate_only_if_used_tparam) ) \
245
+ ) \
246
+ /**/
247
+
248
+ // </editor-fold> end Concept emulation }}}1
249
+ //==============================================================================
250
+
251
+ //==============================================================================
252
+ // <editor-fold desc="Return type deduction"> {{{1
253
+
254
+ #if __MDSPAN_USE_RETURN_TYPE_DEDUCTION
255
+ # define __MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE(SIGNATURE, BODY) \
256
+ auto __MDSPAN_PP_REMOVE_PARENS(SIGNATURE) { return __MDSPAN_PP_REMOVE_PARENS(BODY); }
257
+ # define __MDSPAN_DEDUCE_DECLTYPE_AUTO_RETURN_TYPE_SINGLE_LINE(SIGNATURE, BODY) \
258
+ decltype(auto) __MDSPAN_PP_REMOVE_PARENS(SIGNATURE) { return __MDSPAN_PP_REMOVE_PARENS(BODY); }
259
+ #else
260
+ # define __MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE(SIGNATURE, BODY) \
261
+ auto __MDSPAN_PP_REMOVE_PARENS(SIGNATURE) \
262
+ -> _CUDA_VSTD::remove_cv_t<_CUDA_VSTD::remove_reference_t<decltype(BODY)>> \
263
+ { return __MDSPAN_PP_REMOVE_PARENS(BODY); }
264
+ # define __MDSPAN_DEDUCE_DECLTYPE_AUTO_RETURN_TYPE_SINGLE_LINE(SIGNATURE, BODY) \
265
+ auto __MDSPAN_PP_REMOVE_PARENS(SIGNATURE) \
266
+ -> decltype(BODY) \
267
+ { return __MDSPAN_PP_REMOVE_PARENS(BODY); }
268
+
269
+ #endif
270
+
271
+ // </editor-fold> end Return type deduction }}}1
272
+ //==============================================================================
273
+
274
+ //==============================================================================
275
+ // <editor-fold desc="fold expressions"> {{{1
276
+
277
+ struct __mdspan_enable_fold_comma { };
278
+
279
+ #ifdef __MDSPAN_USE_FOLD_EXPRESSIONS
280
+ # define __MDSPAN_FOLD_AND(...) ((__VA_ARGS__) && ...)
281
+ # define __MDSPAN_FOLD_AND_TEMPLATE(...) ((__VA_ARGS__) && ...)
282
+ # define __MDSPAN_FOLD_OR(...) ((__VA_ARGS__) || ...)
283
+ # define __MDSPAN_FOLD_ASSIGN_LEFT(__INIT, ...) (__INIT = ... = (__VA_ARGS__))
284
+ # define __MDSPAN_FOLD_ASSIGN_RIGHT(__PACK, ...) (__PACK = ... = (__VA_ARGS__))
285
+ # define __MDSPAN_FOLD_TIMES_RIGHT(__PACK, ...) (__PACK * ... * (__VA_ARGS__))
286
+ # define __MDSPAN_FOLD_PLUS_RIGHT(__PACK, ...) (__PACK + ... + (__VA_ARGS__))
287
+ # define __MDSPAN_FOLD_COMMA(...) ((__VA_ARGS__), ...)
288
+ #else
289
+
290
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
291
+
292
+ namespace __fold_compatibility_impl {
293
+
294
+ // We could probably be more clever here, but at the (small) risk of losing some compiler understanding. For the
295
+ // few operations we need, it's not worth generalizing over the operation
296
+
297
+ #if __MDSPAN_USE_RETURN_TYPE_DEDUCTION
298
+
299
+ __MDSPAN_FORCE_INLINE_FUNCTION
300
+ constexpr decltype(auto) __fold_right_and_impl() {
301
+ return true;
302
+ }
303
+
304
+ template <class _Arg, class... _Args>
305
+ __MDSPAN_FORCE_INLINE_FUNCTION
306
+ constexpr decltype(auto) __fold_right_and_impl(_Arg&& __arg, _Args&&... __args) {
307
+ return ((_Arg&&)__arg) && __fold_compatibility_impl::__fold_right_and_impl((_Args&&)__args...);
308
+ }
309
+
310
+ __MDSPAN_FORCE_INLINE_FUNCTION
311
+ constexpr decltype(auto) __fold_right_or_impl() {
312
+ return false;
313
+ }
314
+
315
+ template <class _Arg, class... _Args>
316
+ __MDSPAN_FORCE_INLINE_FUNCTION
317
+ constexpr auto __fold_right_or_impl(_Arg&& __arg, _Args&&... __args) {
318
+ return ((_Arg&&)__arg) || __fold_compatibility_impl::__fold_right_or_impl((_Args&&)__args...);
319
+ }
320
+
321
+ template <class _Arg1>
322
+ __MDSPAN_FORCE_INLINE_FUNCTION
323
+ constexpr auto __fold_left_assign_impl(_Arg1&& __arg1) {
324
+ return (_Arg1&&)__arg1;
325
+ }
326
+
327
+ template <class _Arg1, class _Arg2, class... _Args>
328
+ __MDSPAN_FORCE_INLINE_FUNCTION
329
+ constexpr auto __fold_left_assign_impl(_Arg1&& __arg1, _Arg2&& __arg2, _Args&&... __args) {
330
+ return __fold_compatibility_impl::__fold_left_assign_impl((((_Arg1&&)__arg1) = ((_Arg2&&)__arg2)), (_Args&&)__args...);
331
+ }
332
+
333
+ template <class _Arg1>
334
+ __MDSPAN_FORCE_INLINE_FUNCTION
335
+ constexpr auto __fold_right_assign_impl(_Arg1&& __arg1) {
336
+ return (_Arg1&&)__arg1;
337
+ }
338
+
339
+ template <class _Arg1, class _Arg2, class... _Args>
340
+ __MDSPAN_FORCE_INLINE_FUNCTION
341
+ constexpr auto __fold_right_assign_impl(_Arg1&& __arg1, _Arg2&& __arg2, _Args&&... __args) {
342
+ return ((_Arg1&&)__arg1) = __fold_compatibility_impl::__fold_right_assign_impl((_Arg2&&)__arg2, (_Args&&)__args...);
343
+ }
344
+
345
+ template <class _Arg1>
346
+ __MDSPAN_FORCE_INLINE_FUNCTION
347
+ constexpr auto __fold_right_plus_impl(_Arg1&& __arg1) {
348
+ return (_Arg1&&)__arg1;
349
+ }
350
+
351
+ template <class _Arg1, class _Arg2, class... _Args>
352
+ __MDSPAN_FORCE_INLINE_FUNCTION
353
+ constexpr auto __fold_right_plus_impl(_Arg1&& __arg1, _Arg2&& __arg2, _Args&&... __args) {
354
+ return ((_Arg1&&)__arg1) + __fold_compatibility_impl::__fold_right_plus_impl((_Arg2&&)__arg2, (_Args&&)__args...);
355
+ }
356
+
357
+ template <class _Arg1>
358
+ __MDSPAN_FORCE_INLINE_FUNCTION
359
+ constexpr auto __fold_right_times_impl(_Arg1&& __arg1) {
360
+ return (_Arg1&&)__arg1;
361
+ }
362
+
363
+ template <class _Arg1, class _Arg2, class... _Args>
364
+ __MDSPAN_FORCE_INLINE_FUNCTION
365
+ constexpr auto __fold_right_times_impl(_Arg1&& __arg1, _Arg2&& __arg2, _Args&&... __args) {
366
+ return ((_Arg1&&)__arg1) * __fold_compatibility_impl::__fold_right_times_impl((_Arg2&&)__arg2, (_Args&&)__args...);
367
+ }
368
+
369
+ #else
370
+
371
+ //------------------------------------------------------------------------------
372
+ // <editor-fold desc="right and"> {{{2
373
+
374
+ template <class... _Args>
375
+ struct __fold_right_and_impl_;
376
+ template <>
377
+ struct __fold_right_and_impl_<> {
378
+ using __rv = bool;
379
+ __MDSPAN_FORCE_INLINE_FUNCTION
380
+ static constexpr __rv
381
+ __impl() noexcept {
382
+ return true;
383
+ }
384
+ };
385
+ template <class _Arg, class... _Args>
386
+ struct __fold_right_and_impl_<_Arg, _Args...> {
387
+ using __next_t = __fold_right_and_impl_<_Args...>;
388
+ using __rv = decltype(_CUDA_VSTD::declval<_Arg>() && _CUDA_VSTD::declval<typename __next_t::__rv>());
389
+ __MDSPAN_FORCE_INLINE_FUNCTION
390
+ static constexpr __rv
391
+ __impl(_Arg&& __arg, _Args&&... __args) noexcept {
392
+ return ((_Arg&&)__arg) && __next_t::__impl((_Args&&)__args...);
393
+ }
394
+ };
395
+
396
+ template <class... _Args>
397
+ __MDSPAN_FORCE_INLINE_FUNCTION
398
+ constexpr typename __fold_right_and_impl_<_Args...>::__rv
399
+ __fold_right_and_impl(_Args&&... __args) {
400
+ return __fold_right_and_impl_<_Args...>::__impl((_Args&&)__args...);
401
+ }
402
+
403
+ // </editor-fold> end right and }}}2
404
+ //------------------------------------------------------------------------------
405
+
406
+ //------------------------------------------------------------------------------
407
+ // <editor-fold desc="right or"> {{{2
408
+
409
+ template <class... _Args>
410
+ struct __fold_right_or_impl_;
411
+ template <>
412
+ struct __fold_right_or_impl_<> {
413
+ using __rv = bool;
414
+ __MDSPAN_FORCE_INLINE_FUNCTION
415
+ static constexpr __rv
416
+ __impl() noexcept {
417
+ return false;
418
+ }
419
+ };
420
+ template <class _Arg, class... _Args>
421
+ struct __fold_right_or_impl_<_Arg, _Args...> {
422
+ using __next_t = __fold_right_or_impl_<_Args...>;
423
+ using __rv = decltype(_CUDA_VSTD::declval<_Arg>() || _CUDA_VSTD::declval<typename __next_t::__rv>());
424
+ __MDSPAN_FORCE_INLINE_FUNCTION
425
+ static constexpr __rv
426
+ __impl(_Arg&& __arg, _Args&&... __args) noexcept {
427
+ return ((_Arg&&)__arg) || __next_t::__impl((_Args&&)__args...);
428
+ }
429
+ };
430
+
431
+ template <class... _Args>
432
+ __MDSPAN_FORCE_INLINE_FUNCTION
433
+ constexpr typename __fold_right_or_impl_<_Args...>::__rv
434
+ __fold_right_or_impl(_Args&&... __args) {
435
+ return __fold_right_or_impl_<_Args...>::__impl((_Args&&)__args...);
436
+ }
437
+
438
+ // </editor-fold> end right or }}}2
439
+ //------------------------------------------------------------------------------
440
+
441
+ //------------------------------------------------------------------------------
442
+ // <editor-fold desc="right plus"> {{{2
443
+
444
+ template <class... _Args>
445
+ struct __fold_right_plus_impl_;
446
+ template <class _Arg>
447
+ struct __fold_right_plus_impl_<_Arg> {
448
+ using __rv = _Arg&&;
449
+ __MDSPAN_FORCE_INLINE_FUNCTION
450
+ static constexpr __rv
451
+ __impl(_Arg&& __arg) noexcept {
452
+ return (_Arg&&)__arg;
453
+ }
454
+ };
455
+ template <class _Arg1, class _Arg2, class... _Args>
456
+ struct __fold_right_plus_impl_<_Arg1, _Arg2, _Args...> {
457
+ using __next_t = __fold_right_plus_impl_<_Arg2, _Args...>;
458
+ using __rv = decltype(_CUDA_VSTD::declval<_Arg1>() + _CUDA_VSTD::declval<typename __next_t::__rv>());
459
+ __MDSPAN_FORCE_INLINE_FUNCTION
460
+ static constexpr __rv
461
+ __impl(_Arg1&& __arg, _Arg2&& __arg2, _Args&&... __args) noexcept {
462
+ return ((_Arg1&&)__arg) + __next_t::__impl((_Arg2&&)__arg2, (_Args&&)__args...);
463
+ }
464
+ };
465
+
466
+ template <class... _Args>
467
+ __MDSPAN_FORCE_INLINE_FUNCTION
468
+ constexpr typename __fold_right_plus_impl_<_Args...>::__rv
469
+ __fold_right_plus_impl(_Args&&... __args) {
470
+ return __fold_right_plus_impl_<_Args...>::__impl((_Args&&)__args...);
471
+ }
472
+
473
+ // </editor-fold> end right plus }}}2
474
+ //------------------------------------------------------------------------------
475
+
476
+ //------------------------------------------------------------------------------
477
+ // <editor-fold desc="right times"> {{{2
478
+
479
+ template <class... _Args>
480
+ struct __fold_right_times_impl_;
481
+ template <class _Arg>
482
+ struct __fold_right_times_impl_<_Arg> {
483
+ using __rv = _Arg&&;
484
+ __MDSPAN_FORCE_INLINE_FUNCTION
485
+ static constexpr __rv
486
+ __impl(_Arg&& __arg) noexcept {
487
+ return (_Arg&&)__arg;
488
+ }
489
+ };
490
+ template <class _Arg1, class _Arg2, class... _Args>
491
+ struct __fold_right_times_impl_<_Arg1, _Arg2, _Args...> {
492
+ using __next_t = __fold_right_times_impl_<_Arg2, _Args...>;
493
+ using __rv = decltype(_CUDA_VSTD::declval<_Arg1>() * _CUDA_VSTD::declval<typename __next_t::__rv>());
494
+ __MDSPAN_FORCE_INLINE_FUNCTION
495
+ static constexpr __rv
496
+ __impl(_Arg1&& __arg, _Arg2&& __arg2, _Args&&... __args) noexcept {
497
+ return ((_Arg1&&)__arg) * __next_t::__impl((_Arg2&&)__arg2, (_Args&&)__args...);
498
+ }
499
+ };
500
+
501
+ template <class... _Args>
502
+ __MDSPAN_FORCE_INLINE_FUNCTION
503
+ constexpr typename __fold_right_times_impl_<_Args...>::__rv
504
+ __fold_right_times_impl(_Args&&... __args) {
505
+ return __fold_right_times_impl_<_Args...>::__impl((_Args&&)__args...);
506
+ }
507
+
508
+ // </editor-fold> end right times }}}2
509
+ //------------------------------------------------------------------------------
510
+
511
+ //------------------------------------------------------------------------------
512
+ // <editor-fold desc="right assign"> {{{2
513
+
514
+ template <class... _Args>
515
+ struct __fold_right_assign_impl_;
516
+ template <class _Arg>
517
+ struct __fold_right_assign_impl_<_Arg> {
518
+ using __rv = _Arg&&;
519
+ __MDSPAN_FORCE_INLINE_FUNCTION
520
+ static constexpr __rv
521
+ __impl(_Arg&& __arg) noexcept {
522
+ return (_Arg&&)__arg;
523
+ }
524
+ };
525
+ template <class _Arg1, class _Arg2, class... _Args>
526
+ struct __fold_right_assign_impl_<_Arg1, _Arg2, _Args...> {
527
+ using __next_t = __fold_right_assign_impl_<_Arg2, _Args...>;
528
+ using __rv = decltype(_CUDA_VSTD::declval<_Arg1>() = _CUDA_VSTD::declval<typename __next_t::__rv>());
529
+ __MDSPAN_FORCE_INLINE_FUNCTION
530
+ static constexpr __rv
531
+ __impl(_Arg1&& __arg, _Arg2&& __arg2, _Args&&... __args) noexcept {
532
+ return ((_Arg1&&)__arg) = __next_t::__impl((_Arg2&&)__arg2, (_Args&&)__args...);
533
+ }
534
+ };
535
+
536
+ template <class... _Args>
537
+ __MDSPAN_FORCE_INLINE_FUNCTION
538
+ constexpr typename __fold_right_assign_impl_<_Args...>::__rv
539
+ __fold_right_assign_impl(_Args&&... __args) {
540
+ return __fold_right_assign_impl_<_Args...>::__impl((_Args&&)__args...);
541
+ }
542
+
543
+ // </editor-fold> end right assign }}}2
544
+ //------------------------------------------------------------------------------
545
+
546
+ //------------------------------------------------------------------------------
547
+ // <editor-fold desc="left assign"> {{{2
548
+
549
+ template <class... _Args>
550
+ struct __fold_left_assign_impl_;
551
+ template <class _Arg>
552
+ struct __fold_left_assign_impl_<_Arg> {
553
+ using __rv = _Arg&&;
554
+ __MDSPAN_FORCE_INLINE_FUNCTION
555
+ static constexpr __rv
556
+ __impl(_Arg&& __arg) noexcept {
557
+ return (_Arg&&)__arg;
558
+ }
559
+ };
560
+ template <class _Arg1, class _Arg2, class... _Args>
561
+ struct __fold_left_assign_impl_<_Arg1, _Arg2, _Args...> {
562
+ using __assign_result_t = decltype(_CUDA_VSTD::declval<_Arg1>() = _CUDA_VSTD::declval<_Arg2>());
563
+ using __next_t = __fold_left_assign_impl_<__assign_result_t, _Args...>;
564
+ using __rv = typename __next_t::__rv;
565
+ __MDSPAN_FORCE_INLINE_FUNCTION
566
+ static constexpr __rv
567
+ __impl(_Arg1&& __arg, _Arg2&& __arg2, _Args&&... __args) noexcept {
568
+ return __next_t::__impl(((_Arg1&&)__arg) = (_Arg2&&)__arg2, (_Args&&)__args...);
569
+ }
570
+ };
571
+
572
+ template <class... _Args>
573
+ __MDSPAN_FORCE_INLINE_FUNCTION
574
+ constexpr typename __fold_left_assign_impl_<_Args...>::__rv
575
+ __fold_left_assign_impl(_Args&&... __args) {
576
+ return __fold_left_assign_impl_<_Args...>::__impl((_Args&&)__args...);
577
+ }
578
+
579
+ // </editor-fold> end left assign }}}2
580
+ //------------------------------------------------------------------------------
581
+
582
+ #endif
583
+
584
+
585
+ template <class... _Args>
586
+ _LIBCUDACXX_HOST_DEVICE
587
+ constexpr __mdspan_enable_fold_comma __fold_comma_impl(_Args&&...) noexcept { return { }; }
588
+
589
+ template <bool... _Bs>
590
+ struct __bools;
591
+
592
+ } // __fold_compatibility_impl
593
+
594
+ _LIBCUDACXX_END_NAMESPACE_STD
595
+
596
+ # define __MDSPAN_FOLD_AND(...) _CUDA_VSTD::__fold_compatibility_impl::__fold_right_and_impl((__VA_ARGS__)...)
597
+ # define __MDSPAN_FOLD_OR(...) _CUDA_VSTD::__fold_compatibility_impl::__fold_right_or_impl((__VA_ARGS__)...)
598
+ # define __MDSPAN_FOLD_ASSIGN_LEFT(__INIT, ...) _CUDA_VSTD::__fold_compatibility_impl::__fold_left_assign_impl(__INIT, (__VA_ARGS__)...)
599
+ # define __MDSPAN_FOLD_ASSIGN_RIGHT(__PACK, ...) _CUDA_VSTD::__fold_compatibility_impl::__fold_right_assign_impl((__PACK)..., __VA_ARGS__)
600
+ # define __MDSPAN_FOLD_TIMES_RIGHT(__PACK, ...) _CUDA_VSTD::__fold_compatibility_impl::__fold_right_times_impl((__PACK)..., __VA_ARGS__)
601
+ # define __MDSPAN_FOLD_PLUS_RIGHT(__PACK, ...) _CUDA_VSTD::__fold_compatibility_impl::__fold_right_plus_impl((__PACK)..., __VA_ARGS__)
602
+ # define __MDSPAN_FOLD_COMMA(...) _CUDA_VSTD::__fold_compatibility_impl::__fold_comma_impl((__VA_ARGS__)...)
603
+
604
+ # define __MDSPAN_FOLD_AND_TEMPLATE(...) \
605
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_same, __fold_compatibility_impl::__bools<(__VA_ARGS__)..., true>, __fold_compatibility_impl::__bools<true, (__VA_ARGS__)...>)
606
+
607
+ #endif
608
+
609
+ // </editor-fold> end Variable template compatibility }}}1
610
+ //==============================================================================
611
+
612
+ //==============================================================================
613
+ // <editor-fold desc="Pre-C++14 constexpr"> {{{1
614
+
615
+ #if __MDSPAN_USE_CONSTEXPR_14
616
+ // Workaround for a bug (I think?) in EDG frontends
617
+ # ifdef __EDG__
618
+ # define __MDSPAN_CONSTEXPR_14_DEFAULTED
619
+ # else
620
+ # define __MDSPAN_CONSTEXPR_14_DEFAULTED constexpr
621
+ # endif
622
+ #else
623
+ # define __MDSPAN_CONSTEXPR_14_DEFAULTED
624
+ #endif
625
+
626
+ // </editor-fold> end Pre-C++14 constexpr }}}1
627
+ //==============================================================================
628
+
629
+ #endif // _LIBCUDACXX_STD_VER > 11
630
+
631
+ #ifndef _LIBCUDACXX_NO_EXCEPTIONS
632
+ #define _LIBCUDACXX_THROW_RUNTIME_ERROR(_COND, _MESSAGE) \
633
+ if (!(_COND)) __throw_runtime_error(_MESSAGE)
634
+ #else
635
+ #define _LIBCUDACXX_THROW_RUNTIME_ERROR(_COND, _MESSAGE) \
636
+ _LIBCUDACXX_ASSERT(_COND, _MESSAGE)
637
+ #endif
638
+
639
+ #endif // _LIBCUDACXX___MDSPAN_MACROS_HPP