ZTWHHH commited on
Commit
55803de
·
verified ·
1 Parent(s): 5395649

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/Openacc/cupti_openacc.h +98 -0
  2. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_double_functions.h +1192 -0
  3. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_double_functions.hpp +197 -0
  4. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/host_config.h +304 -0
  5. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/host_defines.h +276 -0
  6. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/host_runtime.h +288 -0
  7. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/math_functions.h +0 -0
  8. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/math_functions.hpp +0 -0
  9. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/mma.h +754 -0
  10. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_70_rt.h +137 -0
  11. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_70_rt.hpp +192 -0
  12. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_80_rt.h +162 -0
  13. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_80_rt.hpp +148 -0
  14. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_90_rt.h +259 -0
  15. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/storage_class.h +142 -0
  16. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/addressof.h +96 -0
  17. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/construct_at.h +209 -0
  18. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/pointer_traits.h +380 -0
  19. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/voidify.h +34 -0
  20. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_const.h +33 -0
  21. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_cv.h +33 -0
  22. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_lvalue_reference.h +57 -0
  23. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_pointer.h +59 -0
  24. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_volatile.h +33 -0
  25. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/aligned_storage.h +137 -0
  26. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/alignment_of.h +36 -0
  27. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/can_extract_key.h +60 -0
  28. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/common_type.h +123 -0
  29. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/conditional.h +56 -0
  30. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/enable_if.h +34 -0
  31. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/has_virtual_destructor.h +45 -0
  32. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/integral_constant.h +59 -0
  33. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_abstract.h +35 -0
  34. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_array.h +58 -0
  35. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_assignable.h +74 -0
  36. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_base_of.h +70 -0
  37. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_callable.h +36 -0
  38. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_compound.h +52 -0
  39. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_const.h +51 -0
  40. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_convertible.h +147 -0
  41. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_copy_constructible.h +38 -0
  42. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_empty.h +66 -0
  43. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_enum.h +69 -0
  44. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_fundamental.h +55 -0
  45. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_integral.h +79 -0
  46. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_member_function_pointer.h +70 -0
  47. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_move_assignable.h +38 -0
  48. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_move_constructible.h +39 -0
  49. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_nothrow_assignable.h +114 -0
  50. videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_null_pointer.h +45 -0
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/Openacc/cupti_openacc.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #include <cuda_stdint.h>
51
+
52
+ #if !defined(_CUPTI_OPENACC_H_)
53
+ #define _CUPTI_OPENACC_H_
54
+
55
+ #ifndef CUPTIAPI
56
+ #ifdef _WIN32
57
+ #define CUPTIAPI __stdcall
58
+ #else
59
+ #define CUPTIAPI
60
+ #endif
61
+ #endif
62
+
63
+ #if defined(__LP64__)
64
+ #define CUPTILP64 1
65
+ #elif defined(_WIN64)
66
+ #define CUPTILP64 1
67
+ #else
68
+ #undef CUPTILP64
69
+ #endif
70
+
71
+ #if defined(__cplusplus)
72
+ extern "C" {
73
+ #endif
74
+
75
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
76
+ #pragma GCC visibility push(default)
77
+ #endif
78
+
79
+ /**
80
+ * \brief Initialize OpenACC support
81
+ *
82
+ * \param profRegister function of type acc_prof_reg as obtained from acc_register_library
83
+ * \param profUnregister function of type acc_prof_reg as obtained from acc_register_library
84
+ * \param profLookup function of type acc_prof_lookup as obtained from acc_register_library
85
+ */
86
+ CUptiResult CUPTIAPI
87
+ cuptiOpenACCInitialize(void *profRegister, void *profUnregister, void *profLookup);
88
+
89
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
90
+ #pragma GCC visibility pop
91
+ #endif
92
+
93
+ #if defined(__cplusplus)
94
+ }
95
+ #endif
96
+
97
+ #endif /*_CUPTI_OPENACC_H_*/
98
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_double_functions.h ADDED
@@ -0,0 +1,1192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("crt/device_double_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "crt/device_double_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H__
58
+ #endif
59
+
60
+ #if !defined(__DEVICE_DOUBLE_FUNCTIONS_H__)
61
+ #define __DEVICE_DOUBLE_FUNCTIONS_H__
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #if defined(__cplusplus) && defined(__CUDACC__)
70
+
71
+ /*******************************************************************************
72
+ * *
73
+ * *
74
+ * *
75
+ *******************************************************************************/
76
+
77
+ #if defined(__CUDACC_RTC__)
78
+ #define __DEVICE_DOUBLE_FUNCTIONS_DECL__ __device__
79
+ #else
80
+ #define __DEVICE_DOUBLE_FUNCTIONS_DECL__ static __inline__ __device__
81
+ #endif /* __CUDACC_RTC__ */
82
+
83
+ #include "builtin_types.h"
84
+ #include "device_types.h"
85
+ #include "host_defines.h"
86
+
87
+ //NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time)
88
+ #define EXCLUDE_FROM_RTC
89
+
90
+ extern "C"
91
+ {
92
+ /**
93
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
94
+ * \brief Reinterpret bits in a double as a 64-bit signed integer.
95
+ *
96
+ * Reinterpret the bits in the double-precision floating-point value \p x
97
+ * as a signed 64-bit integer.
98
+ * \return Returns reinterpreted value.
99
+ */
100
+ extern __device__ __device_builtin__ long long int __double_as_longlong(double x);
101
+ /**
102
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
103
+ * \brief Reinterpret bits in a 64-bit signed integer as a double.
104
+ *
105
+ * Reinterpret the bits in the 64-bit signed integer value \p x as
106
+ * a double-precision floating-point value.
107
+ * \return Returns reinterpreted value.
108
+ */
109
+ extern __device__ __device_builtin__ double __longlong_as_double(long long int x);
110
+ /**
111
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
112
+ * \brief Compute
113
+ * \latexonly $x \times y + z$ \endlatexonly
114
+ * \xmlonly
115
+ * <d4p_MathML outputclass="xmlonly">
116
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
117
+ * <m:mi>x</m:mi>
118
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
119
+ * <m:mi>y</m:mi>
120
+ * <m:mo>+</m:mo>
121
+ * <m:mi>z</m:mi>
122
+ * </m:math>
123
+ * </d4p_MathML>
124
+ * \endxmlonly
125
+ * as a single operation in round-to-nearest-even mode.
126
+ *
127
+ * Computes the value of
128
+ * \latexonly $x \times y + z$ \endlatexonly
129
+ * \xmlonly
130
+ * <d4p_MathML outputclass="xmlonly">
131
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
132
+ * <m:mi>x</m:mi>
133
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
134
+ * <m:mi>y</m:mi>
135
+ * <m:mo>+</m:mo>
136
+ * <m:mi>z</m:mi>
137
+ * </m:math>
138
+ * </d4p_MathML>
139
+ * \endxmlonly
140
+ * as a single ternary operation, rounding the
141
+ * result once in round-to-nearest-even mode.
142
+ *
143
+ * \return Returns the rounded value of
144
+ * \latexonly $x \times y + z$ \endlatexonly
145
+ * \xmlonly
146
+ * <d4p_MathML outputclass="xmlonly">
147
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
148
+ * <m:mi>x</m:mi>
149
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
150
+ * <m:mi>y</m:mi>
151
+ * <m:mo>+</m:mo>
152
+ * <m:mi>z</m:mi>
153
+ * </m:math>
154
+ * </d4p_MathML>
155
+ * \endxmlonly
156
+ * as a single operation.
157
+ * - fmaf(
158
+ * \latexonly $\pm \infty$ \endlatexonly
159
+ * \xmlonly
160
+ * <d4p_MathML outputclass="xmlonly">
161
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
162
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
163
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
164
+ * </m:math>
165
+ * </d4p_MathML>
166
+ * \endxmlonly
167
+ * ,
168
+ * \latexonly $\pm 0$ \endlatexonly
169
+ * \xmlonly
170
+ * <d4p_MathML outputclass="xmlonly">
171
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
172
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
173
+ * <m:mn>0</m:mn>
174
+ * </m:math>
175
+ * </d4p_MathML>
176
+ * \endxmlonly
177
+ * , \p z) returns NaN.
178
+ * - fmaf(
179
+ * \latexonly $\pm 0$ \endlatexonly
180
+ * \xmlonly
181
+ * <d4p_MathML outputclass="xmlonly">
182
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
183
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
184
+ * <m:mn>0</m:mn>
185
+ * </m:math>
186
+ * </d4p_MathML>
187
+ * \endxmlonly
188
+ * ,
189
+ * \latexonly $\pm \infty$ \endlatexonly
190
+ * \xmlonly
191
+ * <d4p_MathML outputclass="xmlonly">
192
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
193
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
194
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
195
+ * </m:math>
196
+ * </d4p_MathML>
197
+ * \endxmlonly
198
+ * , \p z) returns NaN.
199
+ * - fmaf(\p x, \p y,
200
+ * \latexonly $-\infty$ \endlatexonly
201
+ * \xmlonly
202
+ * <d4p_MathML outputclass="xmlonly">
203
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
204
+ * <m:mo>-</m:mo>
205
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
206
+ * </m:math>
207
+ * </d4p_MathML>
208
+ * \endxmlonly
209
+ * ) returns NaN if
210
+ * \latexonly $x \times y$ \endlatexonly
211
+ * \xmlonly
212
+ * <d4p_MathML outputclass="xmlonly">
213
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
214
+ * <m:mi>x</m:mi>
215
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
216
+ * <m:mi>y</m:mi>
217
+ * </m:math>
218
+ * </d4p_MathML>
219
+ * \endxmlonly
220
+ * is an exact
221
+ * \latexonly $+\infty$ \endlatexonly
222
+ * \xmlonly
223
+ * <d4p_MathML outputclass="xmlonly">
224
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
225
+ * <m:mo>+</m:mo>
226
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
227
+ * </m:math>
228
+ * </d4p_MathML>
229
+ * \endxmlonly
230
+ * .
231
+ * - fmaf(\p x, \p y,
232
+ * \latexonly $+\infty$ \endlatexonly
233
+ * \xmlonly
234
+ * <d4p_MathML outputclass="xmlonly">
235
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
236
+ * <m:mo>+</m:mo>
237
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
238
+ * </m:math>
239
+ * </d4p_MathML>
240
+ * \endxmlonly
241
+ * ) returns NaN if
242
+ * \latexonly $x \times y$ \endlatexonly
243
+ * \xmlonly
244
+ * <d4p_MathML outputclass="xmlonly">
245
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
246
+ * <m:mi>x</m:mi>
247
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
248
+ * <m:mi>y</m:mi>
249
+ * </m:math>
250
+ * </d4p_MathML>
251
+ * \endxmlonly
252
+ * is an exact
253
+ * \latexonly $-\infty$ \endlatexonly
254
+ * \xmlonly
255
+ * <d4p_MathML outputclass="xmlonly">
256
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
257
+ * <m:mo>-</m:mo>
258
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
259
+ * </m:math>
260
+ * </d4p_MathML>
261
+ * \endxmlonly
262
+ * .
263
+ *
264
+ * \note_accuracy_double
265
+ */
266
+ extern __device__ __device_builtin__ double __fma_rn(double x, double y, double z);
267
+ /**
268
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
269
+ * \brief Compute
270
+ * \latexonly $x \times y + z$ \endlatexonly
271
+ * \xmlonly
272
+ * <d4p_MathML outputclass="xmlonly">
273
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
274
+ * <m:mi>x</m:mi>
275
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
276
+ * <m:mi>y</m:mi>
277
+ * <m:mo>+</m:mo>
278
+ * <m:mi>z</m:mi>
279
+ * </m:math>
280
+ * </d4p_MathML>
281
+ * \endxmlonly
282
+ * as a single operation in round-towards-zero mode.
283
+ *
284
+ * Computes the value of
285
+ * \latexonly $x \times y + z$ \endlatexonly
286
+ * \xmlonly
287
+ * <d4p_MathML outputclass="xmlonly">
288
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
289
+ * <m:mi>x</m:mi>
290
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
291
+ * <m:mi>y</m:mi>
292
+ * <m:mo>+</m:mo>
293
+ * <m:mi>z</m:mi>
294
+ * </m:math>
295
+ * </d4p_MathML>
296
+ * \endxmlonly
297
+ * as a single ternary operation, rounding the
298
+ * result once in round-towards-zero mode.
299
+ *
300
+ * \return Returns the rounded value of
301
+ * \latexonly $x \times y + z$ \endlatexonly
302
+ * \xmlonly
303
+ * <d4p_MathML outputclass="xmlonly">
304
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
305
+ * <m:mi>x</m:mi>
306
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
307
+ * <m:mi>y</m:mi>
308
+ * <m:mo>+</m:mo>
309
+ * <m:mi>z</m:mi>
310
+ * </m:math>
311
+ * </d4p_MathML>
312
+ * \endxmlonly
313
+ * as a single operation.
314
+ * - fmaf(
315
+ * \latexonly $\pm \infty$ \endlatexonly
316
+ * \xmlonly
317
+ * <d4p_MathML outputclass="xmlonly">
318
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
319
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
320
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
321
+ * </m:math>
322
+ * </d4p_MathML>
323
+ * \endxmlonly
324
+ * ,
325
+ * \latexonly $\pm 0$ \endlatexonly
326
+ * \xmlonly
327
+ * <d4p_MathML outputclass="xmlonly">
328
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
329
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
330
+ * <m:mn>0</m:mn>
331
+ * </m:math>
332
+ * </d4p_MathML>
333
+ * \endxmlonly
334
+ * , \p z) returns NaN.
335
+ * - fmaf(
336
+ * \latexonly $\pm 0$ \endlatexonly
337
+ * \xmlonly
338
+ * <d4p_MathML outputclass="xmlonly">
339
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
340
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
341
+ * <m:mn>0</m:mn>
342
+ * </m:math>
343
+ * </d4p_MathML>
344
+ * \endxmlonly
345
+ * ,
346
+ * \latexonly $\pm \infty$ \endlatexonly
347
+ * \xmlonly
348
+ * <d4p_MathML outputclass="xmlonly">
349
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
350
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
351
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
352
+ * </m:math>
353
+ * </d4p_MathML>
354
+ * \endxmlonly
355
+ * , \p z) returns NaN.
356
+ * - fmaf(\p x, \p y,
357
+ * \latexonly $-\infty$ \endlatexonly
358
+ * \xmlonly
359
+ * <d4p_MathML outputclass="xmlonly">
360
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
361
+ * <m:mo>-</m:mo>
362
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
363
+ * </m:math>
364
+ * </d4p_MathML>
365
+ * \endxmlonly
366
+ * ) returns NaN if
367
+ * \latexonly $x \times y$ \endlatexonly
368
+ * \xmlonly
369
+ * <d4p_MathML outputclass="xmlonly">
370
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
371
+ * <m:mi>x</m:mi>
372
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
373
+ * <m:mi>y</m:mi>
374
+ * </m:math>
375
+ * </d4p_MathML>
376
+ * \endxmlonly
377
+ * is an exact
378
+ * \latexonly $+\infty$ \endlatexonly
379
+ * \xmlonly
380
+ * <d4p_MathML outputclass="xmlonly">
381
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
382
+ * <m:mo>+</m:mo>
383
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
384
+ * </m:math>
385
+ * </d4p_MathML>
386
+ * \endxmlonly
387
+ * .
388
+ * - fmaf(\p x, \p y,
389
+ * \latexonly $+\infty$ \endlatexonly
390
+ * \xmlonly
391
+ * <d4p_MathML outputclass="xmlonly">
392
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
393
+ * <m:mo>+</m:mo>
394
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
395
+ * </m:math>
396
+ * </d4p_MathML>
397
+ * \endxmlonly
398
+ * ) returns NaN if
399
+ * \latexonly $x \times y$ \endlatexonly
400
+ * \xmlonly
401
+ * <d4p_MathML outputclass="xmlonly">
402
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
403
+ * <m:mi>x</m:mi>
404
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
405
+ * <m:mi>y</m:mi>
406
+ * </m:math>
407
+ * </d4p_MathML>
408
+ * \endxmlonly
409
+ * is an exact
410
+ * \latexonly $-\infty$ \endlatexonly
411
+ * \xmlonly
412
+ * <d4p_MathML outputclass="xmlonly">
413
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
414
+ * <m:mo>-</m:mo>
415
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
416
+ * </m:math>
417
+ * </d4p_MathML>
418
+ * \endxmlonly
419
+ * .
420
+ *
421
+ * \note_accuracy_double
422
+ */
423
+ extern __device__ __device_builtin__ double __fma_rz(double x, double y, double z);
424
+ /**
425
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
426
+ * \brief Compute
427
+ * \latexonly $x \times y + z$ \endlatexonly
428
+ * \xmlonly
429
+ * <d4p_MathML outputclass="xmlonly">
430
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
431
+ * <m:mi>x</m:mi>
432
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
433
+ * <m:mi>y</m:mi>
434
+ * <m:mo>+</m:mo>
435
+ * <m:mi>z</m:mi>
436
+ * </m:math>
437
+ * </d4p_MathML>
438
+ * \endxmlonly
439
+ * as a single operation in round-up mode.
440
+ *
441
+ * Computes the value of
442
+ * \latexonly $x \times y + z$ \endlatexonly
443
+ * \xmlonly
444
+ * <d4p_MathML outputclass="xmlonly">
445
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
446
+ * <m:mi>x</m:mi>
447
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
448
+ * <m:mi>y</m:mi>
449
+ * <m:mo>+</m:mo>
450
+ * <m:mi>z</m:mi>
451
+ * </m:math>
452
+ * </d4p_MathML>
453
+ * \endxmlonly
454
+ * as a single ternary operation, rounding the
455
+ * result once in round-up (to positive infinity) mode.
456
+ *
457
+ * \return Returns the rounded value of
458
+ * \latexonly $x \times y + z$ \endlatexonly
459
+ * \xmlonly
460
+ * <d4p_MathML outputclass="xmlonly">
461
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
462
+ * <m:mi>x</m:mi>
463
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
464
+ * <m:mi>y</m:mi>
465
+ * <m:mo>+</m:mo>
466
+ * <m:mi>z</m:mi>
467
+ * </m:math>
468
+ * </d4p_MathML>
469
+ * \endxmlonly
470
+ * as a single operation.
471
+ * - fmaf(
472
+ * \latexonly $\pm \infty$ \endlatexonly
473
+ * \xmlonly
474
+ * <d4p_MathML outputclass="xmlonly">
475
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
476
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
477
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
478
+ * </m:math>
479
+ * </d4p_MathML>
480
+ * \endxmlonly
481
+ * ,
482
+ * \latexonly $\pm 0$ \endlatexonly
483
+ * \xmlonly
484
+ * <d4p_MathML outputclass="xmlonly">
485
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
486
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
487
+ * <m:mn>0</m:mn>
488
+ * </m:math>
489
+ * </d4p_MathML>
490
+ * \endxmlonly
491
+ * , \p z) returns NaN.
492
+ * - fmaf(
493
+ * \latexonly $\pm 0$ \endlatexonly
494
+ * \xmlonly
495
+ * <d4p_MathML outputclass="xmlonly">
496
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
497
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
498
+ * <m:mn>0</m:mn>
499
+ * </m:math>
500
+ * </d4p_MathML>
501
+ * \endxmlonly
502
+ * ,
503
+ * \latexonly $\pm \infty$ \endlatexonly
504
+ * \xmlonly
505
+ * <d4p_MathML outputclass="xmlonly">
506
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
507
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
508
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
509
+ * </m:math>
510
+ * </d4p_MathML>
511
+ * \endxmlonly
512
+ * , \p z) returns NaN.
513
+ * - fmaf(\p x, \p y,
514
+ * \latexonly $-\infty$ \endlatexonly
515
+ * \xmlonly
516
+ * <d4p_MathML outputclass="xmlonly">
517
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
518
+ * <m:mo>-</m:mo>
519
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
520
+ * </m:math>
521
+ * </d4p_MathML>
522
+ * \endxmlonly
523
+ * ) returns NaN if
524
+ * \latexonly $x \times y$ \endlatexonly
525
+ * \xmlonly
526
+ * <d4p_MathML outputclass="xmlonly">
527
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
528
+ * <m:mi>x</m:mi>
529
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
530
+ * <m:mi>y</m:mi>
531
+ * </m:math>
532
+ * </d4p_MathML>
533
+ * \endxmlonly
534
+ * is an exact
535
+ * \latexonly $+\infty$ \endlatexonly
536
+ * \xmlonly
537
+ * <d4p_MathML outputclass="xmlonly">
538
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
539
+ * <m:mo>+</m:mo>
540
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
541
+ * </m:math>
542
+ * </d4p_MathML>
543
+ * \endxmlonly
544
+ * .
545
+ * - fmaf(\p x, \p y,
546
+ * \latexonly $+\infty$ \endlatexonly
547
+ * \xmlonly
548
+ * <d4p_MathML outputclass="xmlonly">
549
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
550
+ * <m:mo>+</m:mo>
551
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
552
+ * </m:math>
553
+ * </d4p_MathML>
554
+ * \endxmlonly
555
+ * ) returns NaN if
556
+ * \latexonly $x \times y$ \endlatexonly
557
+ * \xmlonly
558
+ * <d4p_MathML outputclass="xmlonly">
559
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
560
+ * <m:mi>x</m:mi>
561
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
562
+ * <m:mi>y</m:mi>
563
+ * </m:math>
564
+ * </d4p_MathML>
565
+ * \endxmlonly
566
+ * is an exact
567
+ * \latexonly $-\infty$ \endlatexonly
568
+ * \xmlonly
569
+ * <d4p_MathML outputclass="xmlonly">
570
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
571
+ * <m:mo>-</m:mo>
572
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
573
+ * </m:math>
574
+ * </d4p_MathML>
575
+ * \endxmlonly
576
+ * .
577
+ *
578
+ * \note_accuracy_double
579
+ */
580
+ extern __device__ __device_builtin__ double __fma_ru(double x, double y, double z);
581
+ /**
582
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
583
+ * \brief Compute
584
+ * \latexonly $x \times y + z$ \endlatexonly
585
+ * \xmlonly
586
+ * <d4p_MathML outputclass="xmlonly">
587
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
588
+ * <m:mi>x</m:mi>
589
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
590
+ * <m:mi>y</m:mi>
591
+ * <m:mo>+</m:mo>
592
+ * <m:mi>z</m:mi>
593
+ * </m:math>
594
+ * </d4p_MathML>
595
+ * \endxmlonly
596
+ * as a single operation in round-down mode.
597
+ *
598
+ * Computes the value of
599
+ * \latexonly $x \times y + z$ \endlatexonly
600
+ * \xmlonly
601
+ * <d4p_MathML outputclass="xmlonly">
602
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
603
+ * <m:mi>x</m:mi>
604
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
605
+ * <m:mi>y</m:mi>
606
+ * <m:mo>+</m:mo>
607
+ * <m:mi>z</m:mi>
608
+ * </m:math>
609
+ * </d4p_MathML>
610
+ * \endxmlonly
611
+ * as a single ternary operation, rounding the
612
+ * result once in round-down (to negative infinity) mode.
613
+ *
614
+ * \return Returns the rounded value of
615
+ * \latexonly $x \times y + z$ \endlatexonly
616
+ * \xmlonly
617
+ * <d4p_MathML outputclass="xmlonly">
618
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
619
+ * <m:mi>x</m:mi>
620
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
621
+ * <m:mi>y</m:mi>
622
+ * <m:mo>+</m:mo>
623
+ * <m:mi>z</m:mi>
624
+ * </m:math>
625
+ * </d4p_MathML>
626
+ * \endxmlonly
627
+ * as a single operation.
628
+ * - fmaf(
629
+ * \latexonly $\pm \infty$ \endlatexonly
630
+ * \xmlonly
631
+ * <d4p_MathML outputclass="xmlonly">
632
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
633
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
634
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
635
+ * </m:math>
636
+ * </d4p_MathML>
637
+ * \endxmlonly
638
+ * ,
639
+ * \latexonly $\pm 0$ \endlatexonly
640
+ * \xmlonly
641
+ * <d4p_MathML outputclass="xmlonly">
642
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
643
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
644
+ * <m:mn>0</m:mn>
645
+ * </m:math>
646
+ * </d4p_MathML>
647
+ * \endxmlonly
648
+ * , \p z) returns NaN.
649
+ * - fmaf(
650
+ * \latexonly $\pm 0$ \endlatexonly
651
+ * \xmlonly
652
+ * <d4p_MathML outputclass="xmlonly">
653
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
654
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
655
+ * <m:mn>0</m:mn>
656
+ * </m:math>
657
+ * </d4p_MathML>
658
+ * \endxmlonly
659
+ * ,
660
+ * \latexonly $\pm \infty$ \endlatexonly
661
+ * \xmlonly
662
+ * <d4p_MathML outputclass="xmlonly">
663
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
664
+ * <m:mo>&#x00B1;<!-- &PlusMinus; --></m:mo>
665
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
666
+ * </m:math>
667
+ * </d4p_MathML>
668
+ * \endxmlonly
669
+ * , \p z) returns NaN.
670
+ * - fmaf(\p x, \p y,
671
+ * \latexonly $-\infty$ \endlatexonly
672
+ * \xmlonly
673
+ * <d4p_MathML outputclass="xmlonly">
674
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
675
+ * <m:mo>-</m:mo>
676
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
677
+ * </m:math>
678
+ * </d4p_MathML>
679
+ * \endxmlonly
680
+ * ) returns NaN if
681
+ * \latexonly $x \times y$ \endlatexonly
682
+ * \xmlonly
683
+ * <d4p_MathML outputclass="xmlonly">
684
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
685
+ * <m:mi>x</m:mi>
686
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
687
+ * <m:mi>y</m:mi>
688
+ * </m:math>
689
+ * </d4p_MathML>
690
+ * \endxmlonly
691
+ * is an exact
692
+ * \latexonly $+\infty$ \endlatexonly
693
+ * \xmlonly
694
+ * <d4p_MathML outputclass="xmlonly">
695
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
696
+ * <m:mo>+</m:mo>
697
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
698
+ * </m:math>
699
+ * </d4p_MathML>
700
+ * \endxmlonly
701
+ * .
702
+ * - fmaf(\p x, \p y,
703
+ * \latexonly $+\infty$ \endlatexonly
704
+ * \xmlonly
705
+ * <d4p_MathML outputclass="xmlonly">
706
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
707
+ * <m:mo>+</m:mo>
708
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
709
+ * </m:math>
710
+ * </d4p_MathML>
711
+ * \endxmlonly
712
+ * ) returns NaN if
713
+ * \latexonly $x \times y$ \endlatexonly
714
+ * \xmlonly
715
+ * <d4p_MathML outputclass="xmlonly">
716
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
717
+ * <m:mi>x</m:mi>
718
+ * <m:mo>&#x00D7;<!-- &Multiply; --></m:mo>
719
+ * <m:mi>y</m:mi>
720
+ * </m:math>
721
+ * </d4p_MathML>
722
+ * \endxmlonly
723
+ * is an exact
724
+ * \latexonly $-\infty$ \endlatexonly
725
+ * \xmlonly
726
+ * <d4p_MathML outputclass="xmlonly">
727
+ * <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
728
+ * <m:mo>-</m:mo>
729
+ * <m:mn>&#x221E;<!-- &Infinity; --></m:mn>
730
+ * </m:math>
731
+ * </d4p_MathML>
732
+ * \endxmlonly
733
+ * .
734
+ *
735
+ * \note_accuracy_double
736
+ */
737
+ extern __device__ __device_builtin__ double __fma_rd(double x, double y, double z);
738
+ /**
739
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
740
+ * \brief Add two floating-point values in round-to-nearest-even mode.
741
+ *
742
+ * Adds two floating-point values \p x and \p y in round-to-nearest-even mode.
743
+ *
744
+ * \return Returns \p x + \p y.
745
+ *
746
+ * \note_accuracy_double
747
+ * \note_nofma
748
+ */
749
+ extern __device__ __device_builtin__ double __dadd_rn(double x, double y);
750
+ /**
751
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
752
+ * \brief Add two floating-point values in round-towards-zero mode.
753
+ *
754
+ * Adds two floating-point values \p x and \p y in round-towards-zero mode.
755
+ *
756
+ * \return Returns \p x + \p y.
757
+ *
758
+ * \note_accuracy_double
759
+ * \note_nofma
760
+ */
761
+ extern __device__ __device_builtin__ double __dadd_rz(double x, double y);
762
+ /**
763
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
764
+ * \brief Add two floating-point values in round-up mode.
765
+ *
766
+ * Adds two floating-point values \p x and \p y in round-up (to positive infinity) mode.
767
+ *
768
+ * \return Returns \p x + \p y.
769
+ *
770
+ * \note_accuracy_double
771
+ * \note_nofma
772
+ */
773
+ extern __device__ __device_builtin__ double __dadd_ru(double x, double y);
774
+ /**
775
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
776
+ * \brief Add two floating-point values in round-down mode.
777
+ *
778
+ * Adds two floating-point values \p x and \p y in round-down (to negative infinity) mode.
779
+ *
780
+ * \return Returns \p x + \p y.
781
+ *
782
+ * \note_accuracy_double
783
+ * \note_nofma
784
+ */
785
+ extern __device__ __device_builtin__ double __dadd_rd(double x, double y);
786
+ /**
787
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
788
+ * \brief Subtract two floating-point values in round-to-nearest-even mode.
789
+ *
790
+ * Subtracts two floating-point values \p x and \p y in round-to-nearest-even mode.
791
+ *
792
+ * \return Returns \p x - \p y.
793
+ *
794
+ * \note_accuracy_double
795
+ * \note_nofma
796
+ */
797
+ extern __device__ __device_builtin__ double __dsub_rn(double x, double y);
798
+ /**
799
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
800
+ * \brief Subtract two floating-point values in round-towards-zero mode.
801
+ *
802
+ * Subtracts two floating-point values \p x and \p y in round-towards-zero mode.
803
+ *
804
+ * \return Returns \p x - \p y.
805
+ *
806
+ * \note_accuracy_double
807
+ * \note_nofma
808
+ */
809
+ extern __device__ __device_builtin__ double __dsub_rz(double x, double y);
810
+ /**
811
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
812
+ * \brief Subtract two floating-point values in round-up mode.
813
+ *
814
+ * Subtracts two floating-point values \p x and \p y in round-up (to positive infinity) mode.
815
+ *
816
+ * \return Returns \p x - \p y.
817
+ *
818
+ * \note_accuracy_double
819
+ * \note_nofma
820
+ */
821
+ extern __device__ __device_builtin__ double __dsub_ru(double x, double y);
822
+ /**
823
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
824
+ * \brief Subtract two floating-point values in round-down mode.
825
+ *
826
+ * Subtracts two floating-point values \p x and \p y in round-down (to negative infinity) mode.
827
+ *
828
+ * \return Returns \p x - \p y.
829
+ *
830
+ * \note_accuracy_double
831
+ * \note_nofma
832
+ */
833
+ extern __device__ __device_builtin__ double __dsub_rd(double x, double y);
834
+ /**
835
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
836
+ * \brief Multiply two floating-point values in round-to-nearest-even mode.
837
+ *
838
+ * Multiplies two floating-point values \p x and \p y in round-to-nearest-even mode.
839
+ *
840
+ * \return Returns \p x * \p y.
841
+ *
842
+ * \note_accuracy_double
843
+ * \note_nofma
844
+ */
845
+ extern __device__ __device_builtin__ double __dmul_rn(double x, double y);
846
+ /**
847
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
848
+ * \brief Multiply two floating-point values in round-towards-zero mode.
849
+ *
850
+ * Multiplies two floating-point values \p x and \p y in round-towards-zero mode.
851
+ *
852
+ * \return Returns \p x * \p y.
853
+ *
854
+ * \note_accuracy_double
855
+ * \note_nofma
856
+ */
857
+ extern __device__ __device_builtin__ double __dmul_rz(double x, double y);
858
+ /**
859
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
860
+ * \brief Multiply two floating-point values in round-up mode.
861
+ *
862
+ * Multiplies two floating-point values \p x and \p y in round-up (to positive infinity) mode.
863
+ *
864
+ * \return Returns \p x * \p y.
865
+ *
866
+ * \note_accuracy_double
867
+ * \note_nofma
868
+ */
869
+ extern __device__ __device_builtin__ double __dmul_ru(double x, double y);
870
+ /**
871
+ * \ingroup CUDA_MATH_INTRINSIC_DOUBLE
872
+ * \brief Multiply two floating-point values in round-down mode.
873
+ *
874
+ * Multiplies two floating-point values \p x and \p y in round-down (to negative infinity) mode.
875
+ *
876
+ * \return Returns \p x * \p y.
877
+ *
878
+ * \note_accuracy_double
879
+ * \note_nofma
880
+ */
881
+ extern __device__ __device_builtin__ double __dmul_rd(double x, double y);
882
+ /**
883
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
884
+ * \brief Convert a double to a float in round-to-nearest-even mode.
885
+ *
886
+ * Convert the double-precision floating-point value \p x to a single-precision
887
+ * floating-point value in round-to-nearest-even mode.
888
+ * \return Returns converted value.
889
+ */
890
+ extern __device__ __device_builtin__ float __double2float_rn(double x);
891
+ /**
892
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
893
+ * \brief Convert a double to a float in round-towards-zero mode.
894
+ *
895
+ * Convert the double-precision floating-point value \p x to a single-precision
896
+ * floating-point value in round-towards-zero mode.
897
+ * \return Returns converted value.
898
+ */
899
+ extern __device__ __device_builtin__ float __double2float_rz(double x);
900
+ /**
901
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
902
+ * \brief Convert a double to a float in round-up mode.
903
+ *
904
+ * Convert the double-precision floating-point value \p x to a single-precision
905
+ * floating-point value in round-up (to positive infinity) mode.
906
+ * \return Returns converted value.
907
+ */
908
+ extern __device__ __device_builtin__ float __double2float_ru(double x);
909
+ /**
910
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
911
+ * \brief Convert a double to a float in round-down mode.
912
+ *
913
+ * Convert the double-precision floating-point value \p x to a single-precision
914
+ * floating-point value in round-down (to negative infinity) mode.
915
+ * \return Returns converted value.
916
+ */
917
+ extern __device__ __device_builtin__ float __double2float_rd(double x);
918
+ /**
919
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
920
+ * \brief Convert a double to a signed int in round-to-nearest-even mode.
921
+ *
922
+ * Convert the double-precision floating-point value \p x to a
923
+ * signed integer value in round-to-nearest-even mode.
924
+ * \return Returns converted value.
925
+ */
926
+ extern __device__ __device_builtin__ int __double2int_rn(double x);
927
+ /**
928
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
929
+ * \brief Convert a double to a signed int in round-up mode.
930
+ *
931
+ * Convert the double-precision floating-point value \p x to a
932
+ * signed integer value in round-up (to positive infinity) mode.
933
+ * \return Returns converted value.
934
+ */
935
+ extern __device__ __device_builtin__ int __double2int_ru(double x);
936
+ /**
937
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
938
+ * \brief Convert a double to a signed int in round-down mode.
939
+ *
940
+ * Convert the double-precision floating-point value \p x to a
941
+ * signed integer value in round-down (to negative infinity) mode.
942
+ * \return Returns converted value.
943
+ */
944
+ extern __device__ __device_builtin__ int __double2int_rd(double x);
945
+ /**
946
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
947
+ * \brief Convert a double to an unsigned int in round-to-nearest-even mode.
948
+ *
949
+ * Convert the double-precision floating-point value \p x to an
950
+ * unsigned integer value in round-to-nearest-even mode.
951
+ * \return Returns converted value.
952
+ */
953
+ extern __device__ __device_builtin__ unsigned int __double2uint_rn(double x);
954
+ /**
955
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
956
+ * \brief Convert a double to an unsigned int in round-up mode.
957
+ *
958
+ * Convert the double-precision floating-point value \p x to an
959
+ * unsigned integer value in round-up (to positive infinity) mode.
960
+ * \return Returns converted value.
961
+ */
962
+ extern __device__ __device_builtin__ unsigned int __double2uint_ru(double x);
963
+ /**
964
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
965
+ * \brief Convert a double to an unsigned int in round-down mode.
966
+ *
967
+ * Convert the double-precision floating-point value \p x to an
968
+ * unsigned integer value in round-down (to negative infinity) mode.
969
+ * \return Returns converted value.
970
+ */
971
+ extern __device__ __device_builtin__ unsigned int __double2uint_rd(double x);
972
+ /**
973
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
974
+ * \brief Convert a double to a signed 64-bit int in round-to-nearest-even mode.
975
+ *
976
+ * Convert the double-precision floating-point value \p x to a
977
+ * signed 64-bit integer value in round-to-nearest-even mode.
978
+ * \return Returns converted value.
979
+ */
980
+ extern __device__ __device_builtin__ long long int __double2ll_rn(double x);
981
+ /**
982
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
983
+ * \brief Convert a double to a signed 64-bit int in round-up mode.
984
+ *
985
+ * Convert the double-precision floating-point value \p x to a
986
+ * signed 64-bit integer value in round-up (to positive infinity) mode.
987
+ * \return Returns converted value.
988
+ */
989
+ extern __device__ __device_builtin__ long long int __double2ll_ru(double x);
990
+ /**
991
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
992
+ * \brief Convert a double to a signed 64-bit int in round-down mode.
993
+ *
994
+ * Convert the double-precision floating-point value \p x to a
995
+ * signed 64-bit integer value in round-down (to negative infinity) mode.
996
+ * \return Returns converted value.
997
+ */
998
+ extern __device__ __device_builtin__ long long int __double2ll_rd(double x);
999
+ /**
1000
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1001
+ * \brief Convert a double to an unsigned 64-bit int in round-to-nearest-even mode.
1002
+ *
1003
+ * Convert the double-precision floating-point value \p x to an
1004
+ * unsigned 64-bit integer value in round-to-nearest-even mode.
1005
+ * \return Returns converted value.
1006
+ */
1007
+ extern __device__ __device_builtin__ unsigned long long int __double2ull_rn(double x);
1008
+ /**
1009
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1010
+ * \brief Convert a double to an unsigned 64-bit int in round-up mode.
1011
+ *
1012
+ * Convert the double-precision floating-point value \p x to an
1013
+ * unsigned 64-bit integer value in round-up (to positive infinity) mode.
1014
+ * \return Returns converted value.
1015
+ */
1016
+ extern __device__ __device_builtin__ unsigned long long int __double2ull_ru(double x);
1017
+ /**
1018
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1019
+ * \brief Convert a double to an unsigned 64-bit int in round-down mode.
1020
+ *
1021
+ * Convert the double-precision floating-point value \p x to an
1022
+ * unsigned 64-bit integer value in round-down (to negative infinity) mode.
1023
+ * \return Returns converted value.
1024
+ */
1025
+ extern __device__ __device_builtin__ unsigned long long int __double2ull_rd(double x);
1026
+ /**
1027
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1028
+ * \brief Convert a signed int to a double.
1029
+ *
1030
+ * Convert the signed integer value \p x to a double-precision floating-point value.
1031
+ * \return Returns converted value.
1032
+ */
1033
+ extern __device__ __device_builtin__ double __int2double_rn(int x);
1034
+ /**
1035
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1036
+ * \brief Convert an unsigned int to a double.
1037
+ *
1038
+ * Convert the unsigned integer value \p x to a double-precision floating-point value.
1039
+ * \return Returns converted value.
1040
+ */
1041
+ extern __device__ __device_builtin__ double __uint2double_rn(unsigned int x);
1042
+ /**
1043
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1044
+ * \brief Convert a signed 64-bit int to a double in round-to-nearest-even mode.
1045
+ *
1046
+ * Convert the signed 64-bit integer value \p x to a double-precision floating-point
1047
+ * value in round-to-nearest-even mode.
1048
+ * \return Returns converted value.
1049
+ */
1050
+ extern __device__ __device_builtin__ double __ll2double_rn(long long int x);
1051
+ /**
1052
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1053
+ * \brief Convert a signed 64-bit int to a double in round-towards-zero mode.
1054
+ *
1055
+ * Convert the signed 64-bit integer value \p x to a double-precision floating-point
1056
+ * value in round-towards-zero mode.
1057
+ * \return Returns converted value.
1058
+ */
1059
+ extern __device__ __device_builtin__ double __ll2double_rz(long long int x);
1060
+ /**
1061
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1062
+ * \brief Convert a signed 64-bit int to a double in round-up mode.
1063
+ *
1064
+ * Convert the signed 64-bit integer value \p x to a double-precision floating-point
1065
+ * value in round-up (to positive infinity) mode.
1066
+ * \return Returns converted value.
1067
+ */
1068
+ extern __device__ __device_builtin__ double __ll2double_ru(long long int x);
1069
+ /**
1070
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1071
+ * \brief Convert a signed 64-bit int to a double in round-down mode.
1072
+ *
1073
+ * Convert the signed 64-bit integer value \p x to a double-precision floating-point
1074
+ * value in round-down (to negative infinity) mode.
1075
+ * \return Returns converted value.
1076
+ */
1077
+ extern __device__ __device_builtin__ double __ll2double_rd(long long int x);
1078
+ /**
1079
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1080
+ * \brief Convert an unsigned 64-bit int to a double in round-to-nearest-even mode.
1081
+ *
1082
+ * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point
1083
+ * value in round-to-nearest-even mode.
1084
+ * \return Returns converted value.
1085
+ */
1086
+ extern __device__ __device_builtin__ double __ull2double_rn(unsigned long long int x);
1087
+ /**
1088
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1089
+ * \brief Convert an unsigned 64-bit int to a double in round-towards-zero mode.
1090
+ *
1091
+ * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point
1092
+ * value in round-towards-zero mode.
1093
+ * \return Returns converted value.
1094
+ */
1095
+ extern __device__ __device_builtin__ double __ull2double_rz(unsigned long long int x);
1096
+ /**
1097
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1098
+ * \brief Convert an unsigned 64-bit int to a double in round-up mode.
1099
+ *
1100
+ * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point
1101
+ * value in round-up (to positive infinity) mode.
1102
+ * \return Returns converted value.
1103
+ */
1104
+ extern __device__ __device_builtin__ double __ull2double_ru(unsigned long long int x);
1105
+ /**
1106
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1107
+ * \brief Convert an unsigned 64-bit int to a double in round-down mode.
1108
+ *
1109
+ * Convert the unsigned 64-bit integer value \p x to a double-precision floating-point
1110
+ * value in round-down (to negative infinity) mode.
1111
+ * \return Returns converted value.
1112
+ */
1113
+ extern __device__ __device_builtin__ double __ull2double_rd(unsigned long long int x);
1114
+ /**
1115
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1116
+ * \brief Reinterpret high 32 bits in a double as a signed integer.
1117
+ *
1118
+ * Reinterpret the high 32 bits in the double-precision floating-point value \p x
1119
+ * as a signed integer.
1120
+ * \return Returns reinterpreted value.
1121
+ */
1122
+ extern __device__ __device_builtin__ int __double2hiint(double x);
1123
+ /**
1124
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1125
+ * \brief Reinterpret low 32 bits in a double as a signed integer.
1126
+ *
1127
+ * Reinterpret the low 32 bits in the double-precision floating-point value \p x
1128
+ * as a signed integer.
1129
+ * \return Returns reinterpreted value.
1130
+ */
1131
+ extern __device__ __device_builtin__ int __double2loint(double x);
1132
+ /**
1133
+ * \ingroup CUDA_MATH_INTRINSIC_CAST
1134
+ * \brief Reinterpret high and low 32-bit integer values as a double.
1135
+ *
1136
+ * Reinterpret the integer value of \p hi as the high 32 bits of a
1137
+ * double-precision floating-point value and the integer value of \p lo
1138
+ * as the low 32 bits of the same double-precision floating-point value.
1139
+ * \return Returns reinterpreted value.
1140
+ */
1141
+ extern __device__ __device_builtin__ double __hiloint2double(int hi, int lo);
1142
+ }
1143
+
1144
+ /*******************************************************************************
1145
+ * *
1146
+ * *
1147
+ * *
1148
+ *******************************************************************************/
1149
+
1150
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double fma(double a, double b, double c, enum cudaRoundMode mode);
1151
+
1152
+ #undef EXCLUDE_FROM_RTC
1153
+
1154
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double dmul(double a, double b, enum cudaRoundMode mode = cudaRoundNearest);
1155
+
1156
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double dadd(double a, double b, enum cudaRoundMode mode = cudaRoundNearest);
1157
+
1158
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double dsub(double a, double b, enum cudaRoundMode mode = cudaRoundNearest);
1159
+
1160
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ int double2int(double a, enum cudaRoundMode mode = cudaRoundZero);
1161
+
1162
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned int double2uint(double a, enum cudaRoundMode mode = cudaRoundZero);
1163
+
1164
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ long long int double2ll(double a, enum cudaRoundMode mode = cudaRoundZero);
1165
+
1166
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned long long int double2ull(double a, enum cudaRoundMode mode = cudaRoundZero);
1167
+
1168
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double ll2double(long long int a, enum cudaRoundMode mode = cudaRoundNearest);
1169
+
1170
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double ull2double(unsigned long long int a, enum cudaRoundMode mode = cudaRoundNearest);
1171
+
1172
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double int2double(int a, enum cudaRoundMode mode = cudaRoundNearest);
1173
+
1174
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double uint2double(unsigned int a, enum cudaRoundMode mode = cudaRoundNearest);
1175
+
1176
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double float2double(float a, enum cudaRoundMode mode = cudaRoundNearest);
1177
+
1178
+ #undef __DEVICE_DOUBLE_FUNCTIONS_DECL__
1179
+
1180
+
1181
+ #endif /* __cplusplus && __CUDACC__ */
1182
+
1183
+ #if !defined(__CUDACC_RTC__)
1184
+ #include "device_double_functions.hpp"
1185
+ #endif /* !__CUDACC_RTC__ */
1186
+
1187
+ #endif /* !__DEVICE_DOUBLE_FUNCTIONS_H__ */
1188
+
1189
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H__)
1190
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
1191
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H__
1192
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_double_functions.hpp ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("crt/device_double_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "crt/device_double_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_HPP__
58
+ #endif
59
+
60
+ #if !defined(__DEVICE_DOUBLE_FUNCTIONS_HPP__)
61
+ #define __DEVICE_DOUBLE_FUNCTIONS_HPP__
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #if defined(__cplusplus) && defined(__CUDACC__)
70
+
71
+ /*******************************************************************************
72
+ * *
73
+ * *
74
+ * *
75
+ *******************************************************************************/
76
+
77
+ #if defined(__CUDACC_RTC__)
78
+ #define __DEVICE_DOUBLE_FUNCTIONS_DECL__ __device__
79
+ #else
80
+ #define __DEVICE_DOUBLE_FUNCTIONS_DECL__ static __inline__ __device__
81
+ #endif /* __CUDACC_RTC__ */
82
+
83
+ #include "builtin_types.h"
84
+ #include "device_types.h"
85
+ #include "host_defines.h"
86
+
87
+ /*******************************************************************************
88
+ * *
89
+ * *
90
+ * *
91
+ *******************************************************************************/
92
+
93
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double fma(double a, double b, double c, enum cudaRoundMode mode)
94
+ {
95
+ return mode == cudaRoundZero ? __fma_rz(a, b, c) :
96
+ mode == cudaRoundPosInf ? __fma_ru(a, b, c) :
97
+ mode == cudaRoundMinInf ? __fma_rd(a, b, c) :
98
+ __fma_rn(a, b, c);
99
+ }
100
+
101
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double dmul(double a, double b, enum cudaRoundMode mode)
102
+ {
103
+ return mode == cudaRoundZero ? __dmul_rz(a, b) :
104
+ mode == cudaRoundPosInf ? __dmul_ru(a, b) :
105
+ mode == cudaRoundMinInf ? __dmul_rd(a, b) :
106
+ __dmul_rn(a, b);
107
+ }
108
+
109
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double dadd(double a, double b, enum cudaRoundMode mode)
110
+ {
111
+ return mode == cudaRoundZero ? __dadd_rz(a, b) :
112
+ mode == cudaRoundPosInf ? __dadd_ru(a, b) :
113
+ mode == cudaRoundMinInf ? __dadd_rd(a, b) :
114
+ __dadd_rn(a, b);
115
+ }
116
+
117
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double dsub(double a, double b, enum cudaRoundMode mode)
118
+ {
119
+ return mode == cudaRoundZero ? __dsub_rz(a, b) :
120
+ mode == cudaRoundPosInf ? __dsub_ru(a, b) :
121
+ mode == cudaRoundMinInf ? __dsub_rd(a, b) :
122
+ __dsub_rn(a, b);
123
+ }
124
+
125
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ int double2int(double a, enum cudaRoundMode mode)
126
+ {
127
+ return mode == cudaRoundNearest ? __double2int_rn(a) :
128
+ mode == cudaRoundPosInf ? __double2int_ru(a) :
129
+ mode == cudaRoundMinInf ? __double2int_rd(a) :
130
+ __double2int_rz(a);
131
+ }
132
+
133
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned int double2uint(double a, enum cudaRoundMode mode)
134
+ {
135
+ return mode == cudaRoundNearest ? __double2uint_rn(a) :
136
+ mode == cudaRoundPosInf ? __double2uint_ru(a) :
137
+ mode == cudaRoundMinInf ? __double2uint_rd(a) :
138
+ __double2uint_rz(a);
139
+ }
140
+
141
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ long long int double2ll(double a, enum cudaRoundMode mode)
142
+ {
143
+ return mode == cudaRoundNearest ? __double2ll_rn(a) :
144
+ mode == cudaRoundPosInf ? __double2ll_ru(a) :
145
+ mode == cudaRoundMinInf ? __double2ll_rd(a) :
146
+ __double2ll_rz(a);
147
+ }
148
+
149
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned long long int double2ull(double a, enum cudaRoundMode mode)
150
+ {
151
+ return mode == cudaRoundNearest ? __double2ull_rn(a) :
152
+ mode == cudaRoundPosInf ? __double2ull_ru(a) :
153
+ mode == cudaRoundMinInf ? __double2ull_rd(a) :
154
+ __double2ull_rz(a);
155
+ }
156
+
157
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double ll2double(long long int a, enum cudaRoundMode mode)
158
+ {
159
+ return mode == cudaRoundZero ? __ll2double_rz(a) :
160
+ mode == cudaRoundPosInf ? __ll2double_ru(a) :
161
+ mode == cudaRoundMinInf ? __ll2double_rd(a) :
162
+ __ll2double_rn(a);
163
+ }
164
+
165
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double ull2double(unsigned long long int a, enum cudaRoundMode mode)
166
+ {
167
+ return mode == cudaRoundZero ? __ull2double_rz(a) :
168
+ mode == cudaRoundPosInf ? __ull2double_ru(a) :
169
+ mode == cudaRoundMinInf ? __ull2double_rd(a) :
170
+ __ull2double_rn(a);
171
+ }
172
+
173
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double int2double(int a, enum cudaRoundMode mode)
174
+ {
175
+ return (double)a;
176
+ }
177
+
178
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double uint2double(unsigned int a, enum cudaRoundMode mode)
179
+ {
180
+ return (double)a;
181
+ }
182
+
183
+ __DEVICE_DOUBLE_FUNCTIONS_DECL__ double float2double(float a, enum cudaRoundMode mode)
184
+ {
185
+ return (double)a;
186
+ }
187
+
188
+ #undef __DEVICE_DOUBLE_FUNCTIONS_DECL__
189
+
190
+ #endif /* __cplusplus && __CUDACC__ */
191
+
192
+ #endif /* !__DEVICE_DOUBLE_FUNCTIONS_HPP__ */
193
+
194
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_HPP__)
195
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
196
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_HPP__
197
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/host_config.h ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("crt/host_config.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "crt/host_config.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H__
58
+ #endif
59
+
60
+ #if !defined(__HOST_CONFIG_H__)
61
+ #define __HOST_CONFIG_H__
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #if defined(__CUDACC__)
70
+
71
+ #if defined(__CUDACC_RTC__)
72
+
73
+ #define _CRTIMP
74
+ #define __THROW
75
+
76
+ #else /* __CUDACC_RTC__ */
77
+
78
+ /* check for host compilers that are compatible with nvcc */
79
+ #if !defined(__GNUC__) && !defined(_WIN32)
80
+
81
+ #error --- !!! UNSUPPORTED COMPILER !!! ---
82
+
83
+ #endif /* !__GNUC__ && !_WIN32 */
84
+
85
+ /* check invalid configurations */
86
+ #if defined(__PGIC__)
87
+ #if !defined(__GNUC__) || !defined(__LP64__) || !defined(__linux__)
88
+ #error -- unsupported pgc++ configuration! pgc++ is supported only on Linux x86_64!
89
+ #endif /* !defined(__GNUC__) || !defined(__LP64__) || !defined(__linux__) */
90
+ #endif /* defined(__PGIC__) */
91
+
92
+ #if defined(__powerpc__)
93
+ #if !defined(__powerpc64__) || !defined(__LITTLE_ENDIAN__)
94
+ #error -- unsupported PPC platform! Only 64-bit little endian PPC is supported!
95
+ #endif /* !__powerpc64__ || !__LITTLE_ENDIAN__ */
96
+ #endif /* __powerpc__ */
97
+
98
+ #if defined(__APPLE__) && defined(__MACH__) && !defined(__clang__)
99
+ #error -- clang and clang++ are the only supported host compilers on Mac OS X!
100
+ #endif /* __APPLE__ && __MACH__ && !__clang__ */
101
+
102
+
103
+ /* check host compiler version */
104
+ #if !__NV_NO_HOST_COMPILER_CHECK
105
+
106
+ #if defined(__ICC)
107
+
108
+ #if (__ICC != 1500 && __ICC != 1600 && __ICC != 1700 && __ICC != 1800 && !(__ICC >= 1900 && __ICC <= 2021)) || !defined(__GNUC__) || !defined(__LP64__)
109
+
110
+ #error -- unsupported ICC configuration! Only ICC 15.0, ICC 16.0, ICC 17.0, ICC 18.0, ICC 19.x and 20.x on Linux x86_64 are supported! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
111
+
112
+ #endif /* (__ICC != 1500 && __ICC != 1600 && __ICC != 1700 && __ICC != 1800 && __ICC != 1900) || !__GNUC__ || !__LP64__ */
113
+
114
+ #endif /* __ICC */
115
+
116
+ #if defined(__GRCO_CLANG_COMPILER__)
117
+ #if (__GRCO_CLANG_COMPILER__ == 1) && ((__clang_major__ < 16) || (__clang_major__ > 17))
118
+ #error -- unsupported Grace clang version! The version must be 16.x to 17.x. The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
119
+ #endif /* (__GRCO_CLANG_COMPILER__ == 1) && ((__clang_major__ < 16) || (__clang_major__ > 17)) */
120
+
121
+ #endif /* __GRCO_CLANG_COMPILER__ */
122
+
123
+ #if defined(__INTEL_CLANG_COMPILER)
124
+ #error -- unsupported Intel ICX compiler! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
125
+ #endif /* __INTEL_CLANG_COMPILER */
126
+
127
+ #if defined(__powerpc__)
128
+
129
+ #if defined(__ibmxl_vrm__) && !(__ibmxl_vrm__ >= 0x0d010000 && __ibmxl_vrm__ < 0x0d020000) && \
130
+ !(__ibmxl_vrm__ >= 0x10010000 && __ibmxl_vrm__ < 0x10020000)
131
+
132
+ #error -- unsupported xlC version! only xlC 13.1 and 16.1 are supported. The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
133
+
134
+ #endif /* __ibmxl_vrm__ && !(__ibmxl_vrm__ >= 0x0d010000 && __ibmxl_vrm__ < 0x0d020000) &&
135
+ !(__ibmxl_vrm__ >= 0x10010000 && __ibmxl_vrm__ < 0x10020000) */
136
+
137
+ #endif /* __powerpc__ */
138
+
139
+ #if defined(__GNUC__)
140
+
141
+ #if __GNUC__ > 12
142
+
143
+ #error -- unsupported GNU version! gcc versions later than 12 are not supported! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
144
+
145
+ #endif /* __GNUC__ > 12 */
146
+
147
+
148
+ #if defined(__clang__) && !defined(__ibmxl_vrm__) && !defined(__ICC) && !defined(__HORIZON__) && !defined(__APPLE__) && !defined(__GRCO_CLANG_COMPILER__)
149
+
150
+ #if (__clang_major__ >= 17) || (__clang_major__ < 3) || ((__clang_major__ == 3) && (__clang_minor__ < 3))
151
+ #error -- unsupported clang version! clang version must be less than 16 and greater than 3.2 . The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
152
+
153
+ #endif /* (__clang_major__ >= 17) || (__clang_major__ < 3) || ((__clang_major__ == 3) && (__clang_minor__ < 3)) */
154
+
155
+ #endif /* defined(__clang__) && !defined(__ibmxl_vrm__) && !defined(__ICC) && !defined(__HORIZON__) && !defined(__APPLE__) && !defined(__GRCO_CLANG_COMPILER__) */
156
+
157
+
158
+ #endif /* __GNUC__ */
159
+
160
+ #if defined(_WIN32)
161
+
162
+ #if _MSC_VER < 1910 || _MSC_VER >= 1940
163
+
164
+ #error -- unsupported Microsoft Visual Studio version! Only the versions between 2017 and 2022 (inclusive) are supported! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
165
+
166
+ #elif _MSC_VER >= 1910 && _MSC_VER < 1910
167
+
168
+ #pragma message("support for this version of Microsoft Visual Studio has been deprecated! Only the versions between 2017 and 2022 (inclusive) are supported!")
169
+
170
+ #endif /* (_MSC_VER < 1910 || _MSC_VER >= 1940) || (_MSC_VER >= 1910 && _MSC_VER < 1910) */
171
+
172
+ #endif /* _WIN32 */
173
+ #endif /* !__NV_NO_HOST_COMPILER_CHECK */
174
+
175
+
176
+ /* configure host compiler */
177
+ #if defined(__APPLE__)
178
+
179
+ #define _CRTIMP
180
+ #define _ACRTIMP
181
+ #define __THROW
182
+
183
+ #if defined(__BLOCKS__) /* nvcc does not support closures */
184
+
185
+ #undef __BLOCKS__
186
+
187
+ #endif /* __BLOCKS__ */
188
+
189
+ #elif defined(__ANDROID__)
190
+
191
+ #define _CRTIMP
192
+ #define _ACRTIMP
193
+ #define __THROW
194
+
195
+ #elif defined(__QNX__)
196
+
197
+ #define _CRTIMP
198
+ #define _ACRTIMP
199
+ #define __THROW
200
+
201
+ #elif defined(__HORIZON__)
202
+
203
+ #define _CRTIMP
204
+ #define _ACRTIMP
205
+ #define __THROW
206
+
207
+ #elif defined(__GNUC__)
208
+
209
+ #define _CRTIMP
210
+ #define _ACRTIMP
211
+
212
+ #include <features.h> /* for __THROW */
213
+
214
+ #elif defined(_WIN32)
215
+
216
+ #if _MSC_VER >= 1500
217
+
218
+ #undef _USE_DECLSPECS_FOR_SAL
219
+ #define _USE_DECLSPECS_FOR_SAL \
220
+ 1
221
+
222
+ #endif /* _MSC_VER >= 1500 */
223
+
224
+ #if !defined(_CRT_NONSTDC_NO_WARNINGS)
225
+
226
+ #define _CRT_NONSTDC_NO_WARNINGS /* to suppress warnings */
227
+
228
+ #endif /* !_CRT_NONSTDC_NO_WARNINGS */
229
+
230
+ #if !defined(_CRT_SECURE_NO_WARNINGS)
231
+
232
+ #define _CRT_SECURE_NO_WARNINGS /* to suppress warnings */
233
+
234
+ #endif /* !_CRT_SECURE_NO_WARNINGS */
235
+
236
+ #if !defined(NOMINMAX)
237
+
238
+ #define NOMINMAX /* min and max are part of cuda runtime */
239
+
240
+ #endif /* !NOMINMAX */
241
+
242
+ #include <crtdefs.h> /* for _CRTIMP */
243
+ #if _MSC_VER >= 1900
244
+ #include <corecrt.h> /* for _ACRTIMP */
245
+ #endif /* _MSC_VER >= 1900 */
246
+
247
+ #define __THROW
248
+
249
+ #endif /* __APPLE__ */
250
+
251
+ #endif /* __CUDACC_RTC__ */
252
+
253
+
254
+ #if defined(__cplusplus) && defined(__CUDA_ARCH__) && (defined(__PGIC__) || defined(__CUDACC_RTC__) || (defined(_WIN32) && defined(_MSC_VER)))
255
+
256
+ #if __CUDACC_RTC__
257
+ typedef char *va_list;
258
+ #else /* !__CUDACC_RTC__ */
259
+ #include <cstdarg>
260
+ #endif /* __CUDACC_RTC__ */
261
+
262
+
263
+ #undef va_start
264
+ #undef va_end
265
+ #undef va_arg
266
+
267
+ #ifdef __PGIC__
268
+
269
+ #undef __builtin_va_end
270
+
271
+ #define va_start(v,l) __builtin_alt_va_start(v,l)
272
+ #define va_end(v) __builtin_va_end(v)
273
+ #define va_arg(v,l) __builtin_alt_va_arg(v,l)
274
+
275
+ #if (__cplusplus >= 201103L)
276
+ #undef va_copy
277
+ #define va_copy(d,s) __builtin_va_copy(d,s)
278
+ #endif
279
+
280
+ #else /* !__PGIC__ */
281
+
282
+
283
+ #define va_start(ap, x) (__cu_va_start(&ap, x))
284
+ #define va_end(ap) (__cu_va_end(&ap))
285
+ #define va_arg(ap, t) (*((t *)__cu_va_arg(&ap, (t *)0)))
286
+
287
+ #if (_MSC_VER >= 1800) || (defined(__CUDACC_RTC__) && (__cplusplus >= 201103L))
288
+ #undef va_copy
289
+ #define va_copy(apd, aps) (__cu_va_copy(&(apd), &(aps)))
290
+ #endif /* (_MSC_VER >= 1800) || (defined(__CUDACC_RTC__) && (__cplusplus >= 201103L)) */
291
+ #endif /* __PGIC__ */
292
+
293
+ #endif /* defined(__cplusplus) && (defined(__CUDACC_RTC__) || (defined(_WIN32) && defined(_MSC_VER))) */
294
+
295
+
296
+
297
+ #endif /* __CUDACC__ */
298
+
299
+ #endif /* !__HOST_CONFIG_H__ */
300
+
301
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H__)
302
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
303
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H__
304
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/host_defines.h ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("crt/host_defines.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "crt/host_defines.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H__
58
+ #endif
59
+
60
+ #if !defined(__HOST_DEFINES_H__)
61
+ #define __HOST_DEFINES_H__
62
+
63
+ #if defined(__CUDACC__) && !defined(__CUDACC_RTC__) && !defined(__CUDADEVRT_INTERNAL__) && !defined(_ALLOW_UNSUPPORTED_LIBCPP)
64
+ #include <ctype.h>
65
+ #if ((defined(_MSC_VER ) && (defined(_M_X64) || defined(_M_AMD64))) ||\
66
+ (defined(__x86_64__) || defined(__amd64__))) && defined(_LIBCPP_VERSION) && !(defined(__HORIZON__) || defined(__ANDROID__) || defined(__QNX__))
67
+ #error "libc++ is not supported on x86 system"
68
+ #endif
69
+ #endif
70
+
71
+ /* CUDA JIT mode (__CUDACC_RTC__) also uses GNU style attributes */
72
+ #if defined(__GNUC__) || (defined(__PGIC__) && defined(__linux__)) || defined(__CUDA_LIBDEVICE__) || defined(__CUDACC_RTC__)
73
+
74
+ #if defined(__CUDACC_RTC__)
75
+ #define __volatile__ volatile
76
+ #endif /* __CUDACC_RTC__ */
77
+
78
+ #define __no_return__ \
79
+ __attribute__((noreturn))
80
+
81
+ #if defined(__CUDACC__) || defined(__CUDA_ARCH__) || defined(__CUDA_LIBDEVICE__)
82
+ /* gcc allows users to define attributes with underscores,
83
+ e.g., __attribute__((__noinline__)).
84
+ Consider a non-CUDA source file (e.g. .cpp) that has the
85
+ above attribute specification, and includes this header file. In that case,
86
+ defining __noinline__ as below would cause a gcc compilation error.
87
+ Hence, only define __noinline__ when the code is being processed
88
+ by a CUDA compiler component.
89
+ */
90
+ #define __noinline__ \
91
+ __attribute__((noinline))
92
+ #endif /* __CUDACC__ || __CUDA_ARCH__ || __CUDA_LIBDEVICE__ */
93
+
94
+ #undef __forceinline__
95
+ #define __forceinline__ \
96
+ __inline__ __attribute__((always_inline))
97
+ #define __inline_hint__ \
98
+ __attribute__((nv_inline_hint))
99
+ #define __align__(n) \
100
+ __attribute__((aligned(n)))
101
+ #define __thread__ \
102
+ __thread
103
+ #define __import__
104
+ #define __export__
105
+ #define __cdecl
106
+ #define __annotate__(a) \
107
+ __attribute__((a))
108
+ #define __location__(a) \
109
+ __annotate__(a)
110
+ #define CUDARTAPI
111
+ #define CUDARTAPI_CDECL
112
+
113
+ #elif defined(_MSC_VER)
114
+
115
+ #if _MSC_VER >= 1400
116
+
117
+ #define __restrict__ \
118
+ __restrict
119
+
120
+ #else /* _MSC_VER >= 1400 */
121
+
122
+ #define __restrict__
123
+
124
+ #endif /* _MSC_VER >= 1400 */
125
+
126
+ #define __inline__ \
127
+ __inline
128
+ #define __no_return__ \
129
+ __declspec(noreturn)
130
+ #define __noinline__ \
131
+ __declspec(noinline)
132
+ #define __forceinline__ \
133
+ __forceinline
134
+ #define __inline_hint__ \
135
+ __declspec(nv_inline_hint)
136
+ #define __align__(n) \
137
+ __declspec(align(n))
138
+ #define __thread__ \
139
+ __declspec(thread)
140
+ #define __import__ \
141
+ __declspec(dllimport)
142
+ #define __export__ \
143
+ __declspec(dllexport)
144
+ #define __annotate__(a) \
145
+ __declspec(a)
146
+ #define __location__(a) \
147
+ __annotate__(__##a##__)
148
+ #define CUDARTAPI \
149
+ __stdcall
150
+ #define CUDARTAPI_CDECL \
151
+ __cdecl
152
+
153
+ #else /* __GNUC__ || __CUDA_LIBDEVICE__ || __CUDACC_RTC__ */
154
+
155
+ #define __inline__
156
+
157
+ #if !defined(__align__)
158
+
159
+ #error --- !!! UNKNOWN COMPILER: please provide a CUDA compatible definition for '__align__' !!! ---
160
+
161
+ #endif /* !__align__ */
162
+
163
+ #if !defined(CUDARTAPI)
164
+
165
+ #error --- !!! UNKNOWN COMPILER: please provide a CUDA compatible definition for 'CUDARTAPI' !!! ---
166
+
167
+ #endif /* !CUDARTAPI */
168
+
169
+ #endif /* __GNUC__ || __CUDA_LIBDEVICE__ || __CUDACC_RTC__ */
170
+
171
+ #if (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3 && !defined(__clang__)))) || \
172
+ (defined(_MSC_VER) && _MSC_VER < 1900) || \
173
+ (!defined(__GNUC__) && !defined(_MSC_VER))
174
+
175
+ #define __specialization_static \
176
+ static
177
+
178
+ #else /* (__GNUC__ && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3 && !__clang__))) ||
179
+ (_MSC_VER && _MSC_VER < 1900) ||
180
+ (!__GNUC__ && !_MSC_VER) */
181
+
182
+ #define __specialization_static
183
+
184
+ #endif /* (__GNUC__ && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3 && !__clang__))) ||
185
+ (_MSC_VER && _MSC_VER < 1900) ||
186
+ (!__GNUC__ && !_MSC_VER) */
187
+
188
+ #if !defined(__CUDACC__) && !defined(__CUDA_LIBDEVICE__)
189
+
190
+ #undef __annotate__
191
+ #define __annotate__(a)
192
+
193
+ #else /* !__CUDACC__ && !__CUDA_LIBDEVICE__ */
194
+
195
+ #define __launch_bounds__(...) \
196
+ __annotate__(launch_bounds(__VA_ARGS__))
197
+
198
+ #endif /* !__CUDACC__ && !__CUDA_LIBDEVICE__ */
199
+
200
+ #if defined(__CUDACC__) || defined(__CUDA_LIBDEVICE__) || \
201
+ defined(__GNUC__) || defined(_WIN64)
202
+
203
+ #define __builtin_align__(a) \
204
+ __align__(a)
205
+
206
+ #else /* __CUDACC__ || __CUDA_LIBDEVICE__ || __GNUC__ || _WIN64 */
207
+
208
+ #define __builtin_align__(a)
209
+
210
+ #endif /* __CUDACC__ || __CUDA_LIBDEVICE__ || __GNUC__ || _WIN64 */
211
+
212
+ #if defined(__CUDACC__) || !defined(__grid_constant__)
213
+ #define __grid_constant__ \
214
+ __location__(grid_constant)
215
+ #endif /* defined(__CUDACC__) || !defined(__grid_constant__) */
216
+
217
+ #if defined(__CUDACC__) || !defined(__host__)
218
+ #define __host__ \
219
+ __location__(host)
220
+ #endif /* defined(__CUDACC__) || !defined(__host__) */
221
+ #if defined(__CUDACC__) || !defined(__device__)
222
+ #define __device__ \
223
+ __location__(device)
224
+ #endif /* defined(__CUDACC__) || !defined(__device__) */
225
+ #if defined(__CUDACC__) || !defined(__global__)
226
+ #define __global__ \
227
+ __location__(global)
228
+ #endif /* defined(__CUDACC__) || !defined(__global__) */
229
+ #if defined(__CUDACC__) || !defined(__shared__)
230
+ #define __shared__ \
231
+ __location__(shared)
232
+ #endif /* defined(__CUDACC__) || !defined(__shared__) */
233
+ #if defined(__CUDACC__) || !defined(__constant__)
234
+ #define __constant__ \
235
+ __location__(constant)
236
+ #endif /* defined(__CUDACC__) || !defined(__constant__) */
237
+ #if defined(__CUDACC__) || !defined(__managed__)
238
+ #define __managed__ \
239
+ __location__(managed)
240
+ #endif /* defined(__CUDACC__) || !defined(__managed__) */
241
+
242
+ #if !defined(__CUDACC__)
243
+ #define __device_builtin__
244
+ #define __device_builtin_texture_type__
245
+ #define __device_builtin_surface_type__
246
+ #define __cudart_builtin__
247
+ #else /* defined(__CUDACC__) */
248
+ #define __device_builtin__ \
249
+ __location__(device_builtin)
250
+ #define __device_builtin_texture_type__ \
251
+ __location__(device_builtin_texture_type)
252
+ #define __device_builtin_surface_type__ \
253
+ __location__(device_builtin_surface_type)
254
+ #define __cudart_builtin__ \
255
+ __location__(cudart_builtin)
256
+ #endif /* !defined(__CUDACC__) */
257
+
258
+ #if defined(__CUDACC__) || !defined(__cluster_dims__)
259
+ #if defined(_MSC_VER)
260
+ #define __cluster_dims__(...) \
261
+ __declspec(__cluster_dims__(__VA_ARGS__))
262
+
263
+ #else /* !defined(_MSC_VER) */
264
+ #define __cluster_dims__(...) \
265
+ __attribute__((cluster_dims(__VA_ARGS__)))
266
+ #endif /* defined(_MSC_VER) */
267
+ #endif /* defined(__CUDACC__) || !defined(__cluster_dims__) */
268
+
269
+ #define __CUDA_ARCH_HAS_FEATURE__(_FEAT) __CUDA_ARCH_FEAT_##_FEAT
270
+
271
+ #endif /* !__HOST_DEFINES_H__ */
272
+
273
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H__)
274
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
275
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H__
276
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/host_runtime.h ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * NVIDIA_COPYRIGHT_BEGIN
3
+ *
4
+ * Copyright (c) 2008-2018, NVIDIA CORPORATION. All rights reserved.
5
+ *
6
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
7
+ * and proprietary rights in and to this software, related documentation
8
+ * and any modifications thereto. Any use, reproduction, disclosure or
9
+ * distribution of this software and related documentation without an express
10
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
11
+ *
12
+ * NVIDIA_COPYRIGHT_END
13
+ */
14
+
15
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
16
+ #if defined(_MSC_VER)
17
+ #pragma message("crt/device_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
18
+ #else
19
+ #warning "crt/device_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
20
+ #endif
21
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
22
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_RUNTIME_H__
23
+ #endif
24
+
25
+ #if !defined(__CUDA_INTERNAL_COMPILATION__)
26
+
27
+ #define __CUDA_INTERNAL_COMPILATION__
28
+ #define __text__
29
+ #define __surf__
30
+ #define __name__shadow_var(c, cpp) \
31
+ #c
32
+ #define __name__text_var(c, cpp) \
33
+ #cpp
34
+ #define __host__shadow_var(c, cpp) \
35
+ cpp
36
+ #define __text_var(c, cpp) \
37
+ cpp
38
+ #define __device_fun(fun) \
39
+ #fun
40
+ #define __device_var(var) \
41
+ #var
42
+ #define __device__text_var(c, cpp) \
43
+ #c
44
+ #define __device__shadow_var(c, cpp) \
45
+ #c
46
+
47
+ #if defined(_WIN32) && !defined(_WIN64)
48
+
49
+ #define __pad__(f) \
50
+ f
51
+
52
+ #else /* _WIN32 && !_WIN64 */
53
+
54
+ #define __pad__(f)
55
+
56
+ #endif /* _WIN32 && !_WIN64 */
57
+
58
+ #include "builtin_types.h"
59
+ #include "storage_class.h"
60
+
61
+ #else /* !__CUDA_INTERNAL_COMPILATION__ */
62
+
63
+ template <typename T>
64
+ static inline T *__cudaAddressOf(T &val)
65
+ {
66
+ return (T *)((void *)(&(const_cast<char &>(reinterpret_cast<const volatile char &>(val)))));
67
+ }
68
+
69
+ #define __cudaRegisterBinary(X) \
70
+ __cudaFatCubinHandle = __cudaRegisterFatBinary((void*)&__fatDeviceText); \
71
+ { void (*callback_fp)(void **) = (void (*)(void **))(X); (*callback_fp)(__cudaFatCubinHandle); __cudaRegisterFatBinaryEnd(__cudaFatCubinHandle); }\
72
+ atexit(__cudaUnregisterBinaryUtil)
73
+
74
+ #define __cudaRegisterVariable(handle, var, ext, size, constant, global) \
75
+ __cudaRegisterVar(handle, (char*)&__host##var, (char*)__device##var, __name##var, ext, size, constant, global)
76
+ #define __cudaRegisterManagedVariable(handle, var, ext, size, constant, global) \
77
+ __cudaRegisterManagedVar(handle, (void **)&__host##var, (char*)__device##var, __name##var, ext, size, constant, global)
78
+
79
+ #define __cudaRegisterGlobalTexture(handle, tex, dim, norm, ext) \
80
+ __cudaRegisterTexture(handle, (const struct textureReference*)&tex, (const void**)(void*)__device##tex, __name##tex, dim, norm, ext)
81
+ #define __cudaRegisterGlobalSurface(handle, surf, dim, ext) \
82
+ __cudaRegisterSurface(handle, (const struct surfaceReference*)&surf, (const void**)(void*)__device##surf, __name##surf, dim, ext)
83
+ #define __cudaRegisterEntry(handle, funptr, fun, thread_limit) \
84
+ __cudaRegisterFunction(handle, (const char*)funptr, (char*)__device_fun(fun), #fun, -1, (uint3*)0, (uint3*)0, (dim3*)0, (dim3*)0, (int*)0)
85
+
86
+ extern "C" cudaError_t CUDARTAPI __cudaPopCallConfiguration(
87
+ dim3 *gridDim,
88
+ dim3 *blockDim,
89
+ size_t *sharedMem,
90
+ void *stream
91
+ );
92
+
93
+ #define __cudaLaunchPrologue(size) \
94
+ void * __args_arr[size]; \
95
+ int __args_idx = 0
96
+
97
+ #define __cudaSetupArg(arg, offset) \
98
+ __args_arr[__args_idx] = (void *)__cudaAddressOf(arg); ++__args_idx
99
+
100
+ #define __cudaSetupArgSimple(arg, offset) \
101
+ __args_arr[__args_idx] = (void *)(char *)&arg; ++__args_idx
102
+
103
+ #if defined(__GNUC__)
104
+ #define __NV_ATTR_UNUSED_FOR_LAUNCH __attribute__((unused))
105
+ #else /* !__GNUC__ */
106
+ #define __NV_ATTR_UNUSED_FOR_LAUNCH
107
+ #endif /* __GNUC__ */
108
+
109
+ /* the use of __args_idx in the expression below avoids host compiler warning about it being an
110
+ unused variable when the launch has no arguments */
111
+ #define __cudaLaunch(fun) \
112
+ { volatile static char *__f __NV_ATTR_UNUSED_FOR_LAUNCH; __f = fun; \
113
+ dim3 __gridDim, __blockDim;\
114
+ size_t __sharedMem; \
115
+ cudaStream_t __stream; \
116
+ if (__cudaPopCallConfiguration(&__gridDim, &__blockDim, &__sharedMem, &__stream) != cudaSuccess) \
117
+ return; \
118
+ if (__args_idx == 0) {\
119
+ (void)cudaLaunchKernel(fun, __gridDim, __blockDim, &__args_arr[__args_idx], __sharedMem, __stream);\
120
+ } else { \
121
+ (void)cudaLaunchKernel(fun, __gridDim, __blockDim, &__args_arr[0], __sharedMem, __stream);\
122
+ }\
123
+ }
124
+
125
+ #if defined(__GNUC__)
126
+ #define __nv_dummy_param_ref(param) \
127
+ { volatile static void **__ref __attribute__((unused)); __ref = (volatile void **)param; }
128
+ #else /* __GNUC__ */
129
+ #define __nv_dummy_param_ref(param) \
130
+ { volatile static void **__ref; __ref = (volatile void **)param; }
131
+ #endif /* __GNUC__ */
132
+
133
+ static void ____nv_dummy_param_ref(void *param) __nv_dummy_param_ref(param)
134
+
135
+ #define __REGISTERFUNCNAME_CORE(X) __cudaRegisterLinkedBinary##X
136
+ #define __REGISTERFUNCNAME(X) __REGISTERFUNCNAME_CORE(X)
137
+
138
+ extern "C" {
139
+ void __REGISTERFUNCNAME( __NV_MODULE_ID ) ( void (*)(void **), void *, void *, void (*)(void *));
140
+ }
141
+
142
+ #define __TO_STRING_CORE(X) #X
143
+ #define __TO_STRING(X) __TO_STRING_CORE(X)
144
+
145
+ extern "C" {
146
+ #if defined(_WIN32)
147
+ #pragma data_seg("__nv_module_id")
148
+ static const __declspec(allocate("__nv_module_id")) unsigned char __module_id_str[] = __TO_STRING(__NV_MODULE_ID);
149
+ #pragma data_seg()
150
+ #elif defined(__APPLE__)
151
+ static const unsigned char __module_id_str[] __attribute__((section ("__NV_CUDA,__nv_module_id"))) = __TO_STRING(__NV_MODULE_ID);
152
+ #else
153
+ static const unsigned char __module_id_str[] __attribute__((section ("__nv_module_id"))) = __TO_STRING(__NV_MODULE_ID);
154
+ #endif
155
+
156
+ #undef __FATIDNAME_CORE
157
+ #undef __FATIDNAME
158
+ #define __FATIDNAME_CORE(X) __fatbinwrap##X
159
+ #define __FATIDNAME(X) __FATIDNAME_CORE(X)
160
+
161
+ #define ____cudaRegisterLinkedBinary(X) \
162
+ { __REGISTERFUNCNAME(__NV_MODULE_ID) (( void (*)(void **))(X), (void *)&__FATIDNAME(__NV_MODULE_ID), (void *)&__module_id_str, (void (*)(void *))&____nv_dummy_param_ref); }
163
+
164
+ }
165
+
166
+ extern "C" {
167
+ extern void** CUDARTAPI __cudaRegisterFatBinary(
168
+ void *fatCubin
169
+ );
170
+
171
+ extern void CUDARTAPI __cudaRegisterFatBinaryEnd(
172
+ void **fatCubinHandle
173
+ );
174
+
175
+ extern void CUDARTAPI __cudaUnregisterFatBinary(
176
+ void **fatCubinHandle
177
+ );
178
+
179
+ extern void CUDARTAPI __cudaRegisterVar(
180
+ void **fatCubinHandle,
181
+ char *hostVar,
182
+ char *deviceAddress,
183
+ const char *deviceName,
184
+ int ext,
185
+ size_t size,
186
+ int constant,
187
+ int global
188
+ );
189
+
190
+ extern void CUDARTAPI __cudaRegisterManagedVar(
191
+ void **fatCubinHandle,
192
+ void **hostVarPtrAddress,
193
+ char *deviceAddress,
194
+ const char *deviceName,
195
+ int ext,
196
+ size_t size,
197
+ int constant,
198
+ int global
199
+ );
200
+
201
+ extern char CUDARTAPI __cudaInitModule(
202
+ void **fatCubinHandle
203
+ );
204
+
205
+ extern void CUDARTAPI __cudaRegisterTexture(
206
+ void **fatCubinHandle,
207
+ const struct textureReference *hostVar,
208
+ const void **deviceAddress,
209
+ const char *deviceName,
210
+ int dim,
211
+ int norm,
212
+ int ext
213
+ );
214
+
215
+ extern void CUDARTAPI __cudaRegisterSurface(
216
+ void **fatCubinHandle,
217
+ const struct surfaceReference *hostVar,
218
+ const void **deviceAddress,
219
+ const char *deviceName,
220
+ int dim,
221
+ int ext
222
+ );
223
+
224
+ extern void CUDARTAPI __cudaRegisterFunction(
225
+ void **fatCubinHandle,
226
+ const char *hostFun,
227
+ char *deviceFun,
228
+ const char *deviceName,
229
+ int thread_limit,
230
+ uint3 *tid,
231
+ uint3 *bid,
232
+ dim3 *bDim,
233
+ dim3 *gDim,
234
+ int *wSize
235
+ );
236
+
237
+ #if defined(__APPLE__)
238
+ extern "C" int atexit(void (*)(void));
239
+
240
+ #elif defined(__GNUC__) && !defined(__ANDROID__) && !defined(__HORIZON__)
241
+ extern int atexit(void(*)(void)) throw();
242
+
243
+ #elif defined(__HORIZON__)
244
+
245
+ // __TEMP_WAR__ 200132570 HOS : Disable atexit call until it works
246
+ #define atexit(p)
247
+
248
+ #else /* __GNUC__ && !__ANDROID__ */
249
+ extern int __cdecl atexit(void(__cdecl *)(void));
250
+ #endif
251
+
252
+ }
253
+
254
+ static void **__cudaFatCubinHandle;
255
+
256
+ static void __cdecl __cudaUnregisterBinaryUtil(void)
257
+ {
258
+ ____nv_dummy_param_ref((void *)&__cudaFatCubinHandle);
259
+ __cudaUnregisterFatBinary(__cudaFatCubinHandle);
260
+ }
261
+
262
+ static char __nv_init_managed_rt_with_module(void **handle)
263
+ {
264
+ return __cudaInitModule(handle);
265
+ }
266
+
267
+ #include "common_functions.h"
268
+
269
+ #pragma pack()
270
+
271
+ #if defined(_WIN32)
272
+
273
+ #pragma warning(disable: 4099)
274
+
275
+ #if !defined(_WIN64)
276
+
277
+ #pragma warning(disable: 4408)
278
+
279
+ #endif /* !_WIN64 */
280
+
281
+ #endif /* _WIN32 */
282
+
283
+ #endif /* !__CUDA_INTERNAL_COMPILATION__ */
284
+
285
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_RUNTIME_H__)
286
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
287
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_RUNTIME_H__
288
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/math_functions.h ADDED
The diff for this file is too large to render. See raw diff
 
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/math_functions.hpp ADDED
The diff for this file is too large to render. See raw diff
 
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/mma.h ADDED
@@ -0,0 +1,754 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2017-2020 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("crt/mma.h is an internal header file and must not be used directly. Please use mma.h instead.")
53
+ #else
54
+ #warning "crt/mma.h is an internal header file and must not be used directly. Please use mma.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H__
58
+ #endif
59
+
60
+ #if !defined(__CUDA_MMA_H__)
61
+ #define __CUDA_MMA_H__
62
+
63
+ #include <cuda_fp16.h>
64
+ #include <cuda_bf16.h>
65
+
66
+ #define __CUDA_MMA_DEVICE_DECL__ static __device__ __inline__
67
+
68
+ #if defined(__cplusplus) && defined(__CUDACC__)
69
+
70
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
71
+
72
+
73
+ #ifndef __CUDA_ARCH__
74
+ #define __DEF_IF_HOST { }
75
+ #else /* !__CUDA_ARCH__ */
76
+ #define __DEF_IF_HOST ;
77
+ #endif /* __CUDA_ARCH__ */
78
+
79
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720
80
+ #define __CUDA_IMMA__ 1
81
+ #endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720 */
82
+
83
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730
84
+ #define __CUDA_SUBBYTE_IMMA__ 1
85
+ #endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730 */
86
+
87
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
88
+ #define __CUDA_AMPERE_MMA__ 1
89
+ #endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 */
90
+
91
+ namespace nvcuda {
92
+ namespace wmma {
93
+
94
+ // utility functions
95
+ #ifdef __CUDA_AMPERE_MMA__
96
+ inline __device__ float __float_to_tf32(float in)
97
+ {
98
+ float ret;
99
+ asm("{\n .reg .b32 __$1;"
100
+ "\n cvt.rna.tf32.f32 __$1, %1;"
101
+ "\n mov.b32 %0, __$1;\n}\n" : "=f"(ret) : "f"(in) );
102
+ return ret;
103
+ }
104
+ #endif /* __CUDA_AMPERE_MMA__ */
105
+
106
+ //
107
+ // tags
108
+ //
109
+ struct row_major;
110
+ struct col_major;
111
+ struct matrix_a;
112
+ struct matrix_b;
113
+ struct accumulator;
114
+
115
+ #ifdef __CUDA_AMPERE_MMA__
116
+ namespace precision {
117
+ struct tf32;
118
+ }
119
+ #endif /* __CUDA_AMPERE_MMA__ */
120
+ #ifdef __CUDA_SUBBYTE_IMMA__
121
+ namespace experimental {
122
+ namespace precision {
123
+ struct u4; // 4-bit unsigned
124
+ struct s4; // 4-bit signed
125
+ struct b1; // 1-bit
126
+ }
127
+ enum bmmaBitOp { bmmaBitOpXOR = 1
128
+ #ifdef __CUDA_AMPERE_MMA__
129
+ , bmmaBitOpAND = 2
130
+ #endif /* __CUDA_AMPERE_MMA__ */
131
+ };
132
+ enum bmmaAccumulateOp { bmmaAccumulateOpPOPC = 1 };
133
+ }
134
+ #endif /* __CUDA_SUBBYTE_IMMA__ */
135
+
136
+ //
137
+ // layout
138
+ //
139
+ enum layout_t {
140
+ mem_row_major, mem_col_major
141
+ };
142
+
143
+ template <typename T>
144
+ struct helper_traits {
145
+ typedef T element_type;
146
+ typedef T storage_element_type;
147
+ typedef T fill_argument_type;
148
+ };
149
+
150
+ #ifdef __CUDA_SUBBYTE_IMMA__
151
+ template<> struct helper_traits<experimental::precision::u4> {
152
+ typedef experimental::precision::u4 element_type;
153
+ typedef unsigned int storage_element_type;
154
+ typedef unsigned int fill_argument_type;
155
+ };
156
+
157
+ template<> struct helper_traits<experimental::precision::s4> {
158
+ typedef experimental::precision::s4 element_type;
159
+ typedef int storage_element_type;
160
+ typedef int fill_argument_type;
161
+ };
162
+
163
+ template<> struct helper_traits<experimental::precision::b1> {
164
+ typedef experimental::precision::b1 element_type;
165
+ typedef unsigned int storage_element_type;
166
+ typedef unsigned int fill_argument_type;
167
+ };
168
+ #endif /* __CUDA_SUBBYTE_IMMA__ */
169
+
170
+ #ifdef __CUDA_AMPERE_MMA__
171
+ template<> struct helper_traits<precision::tf32> {
172
+ typedef precision::tf32 element_type;
173
+ typedef float storage_element_type;
174
+ typedef float fill_argument_type;
175
+ };
176
+ #endif /* __CUDA_AMPERE_MMA__ */
177
+
178
+ //
179
+ // The base fragment type
180
+ //
181
+ /* note: alignment required for compiler implementation */
182
+ template <typename T, int size, int packed_size = size>
183
+ struct __align__(8) __frag_base {
184
+
185
+ /* Number of elements in the fragment */
186
+ enum {num_elements = size};
187
+
188
+ /* Number of storage elements in the fragment.
189
+
190
+ The elements of the fragment are packed together when the
191
+ fragment element type is experimental::precision::u4,
192
+ experimental::precision::s4 or experimental::precision::b1.
193
+ When elements are packed, num_storage_elements
194
+ will be smaller than num_elements.
195
+ */
196
+ enum {num_storage_elements = packed_size};
197
+
198
+ /* element type of the fragment */
199
+ typedef T element_type;
200
+
201
+ /* element type of the storage representation.
202
+
203
+ The mapping from element_type to storage_element_type is as follows:
204
+ experimental::precision::u4 -> unsigned (8 elements in 1 storage element)
205
+ experimental::precision::s4 -> int (8 elements in 1 storage element)
206
+ experimental::precision::b1 -> unsigned (32 elements in 1 storage element)
207
+ precision::tf32 -> float (1 element in 1 storage element)
208
+ all other types T -> T
209
+ */
210
+ typedef typename helper_traits<T>::storage_element_type storage_element_type;
211
+
212
+ /* Storage for the (possibly packed) fragment elements. */
213
+ storage_element_type x[num_storage_elements];
214
+ };
215
+
216
+ template <typename FragEleType, typename StorageType, typename ArgType>
217
+ static inline __device__ StorageType __get_storage_value(ArgType in) { return in; }
218
+
219
+ #ifdef __CUDA_SUBBYTE_IMMA__
220
+ template<>
221
+ __device__ inline unsigned
222
+ __get_storage_value<experimental::precision::u4, unsigned, unsigned>(unsigned in)
223
+ {
224
+ /* For experimental::precision::u4 fragment element type, pack 8 elements into a single
225
+ 32-bit unsigned int storage element */
226
+ unsigned val = in & 0xf;
227
+ return (val | (val << 4) | (val << 8) | (val << 12) | (val << 16) |
228
+ (val << 20) | (val << 24) | (val << 28));
229
+ };
230
+
231
+ template<>
232
+ __device__ inline int
233
+ __get_storage_value<experimental::precision::s4, int, int>(int in)
234
+ {
235
+ /* For experimental::precision::s4 fragment element type, pack 8 elements into a single
236
+ 32-bit signed int storage element */
237
+ int val = in & 0xf;
238
+ return (val | (val << 4) | (val << 8) | (val << 12) | (val << 16) |
239
+ (val << 20) | (val << 24) | (val << 28));
240
+ };
241
+
242
+ template<>
243
+ __device__ inline unsigned
244
+ __get_storage_value<experimental::precision::b1, unsigned, unsigned>(unsigned in)
245
+ {
246
+ /* For experimental::precision::b1 fragment element type, pack 32 elements into a
247
+ single 32-bit unsigned int storage element */
248
+ return (in & 0x1) ? 0xFFFFFFFFU : 0;
249
+ }
250
+ #endif /* __CUDA_SUBBYTE_IMMA__ */
251
+
252
+ template <typename FragEleType, int size, int packed_size>
253
+ __CUDA_MMA_DEVICE_DECL__ void fill_fragment(__frag_base<FragEleType, size, packed_size>& f,
254
+ /* The mapping from fragment element type (FragEleType) to fill_argument_type is:
255
+ experimental::precision::u4 -> unsigned (only lower 4 bits taken)
256
+ experimental::precision::s4 -> int (only lower 4 bits taken)
257
+ experimental::precision::b1 -> unsigned (only lowest 1 bit taken)
258
+ precision::tf32 -> float
259
+ all other types T -> T
260
+ */
261
+ const typename helper_traits<FragEleType>::fill_argument_type & in) {
262
+
263
+ /* get the (possibly packed) storage element value. See the specializations above for fragment
264
+ element types where the storage representation is packed */
265
+ typedef typename helper_traits<FragEleType>::storage_element_type storage_type;
266
+ storage_type v = __get_storage_value<FragEleType, storage_type>(in);
267
+ #pragma unroll
268
+ for (int i=0; i< f.num_storage_elements; i++)
269
+ f.x[i] = v;
270
+ }
271
+
272
+ //
273
+ // Fragment template
274
+ //
275
+ template<typename Use, int m, int n, int k, typename T, typename Layout=void> class fragment;
276
+
277
+ //
278
+ // Fragments for 16x16x16
279
+ //
280
+ template<> class fragment<matrix_a, 16, 16, 16, __half, row_major> : public __frag_base<__half, 16> {};
281
+ template<> class fragment<matrix_a, 16, 16, 16, __half, col_major> : public __frag_base<__half, 16> {};
282
+ template<> class fragment<matrix_b, 16, 16, 16, __half, row_major> : public __frag_base<__half, 16> {};
283
+ template<> class fragment<matrix_b, 16, 16, 16, __half, col_major> : public __frag_base<__half, 16> {};
284
+ template<> class fragment<accumulator, 16, 16, 16, __half> : public __frag_base<__half, 8> {};
285
+ template<> class fragment<accumulator, 16, 16, 16, float> : public __frag_base<float, 8> {};
286
+
287
+ #ifdef __CUDA_IMMA__
288
+ template<> class fragment<matrix_a, 16, 16, 16, signed char, row_major> : public __frag_base<signed char, 8> {};
289
+ template<> class fragment<matrix_a, 16, 16, 16, signed char, col_major> : public __frag_base<signed char, 8> {};
290
+ template<> class fragment<matrix_a, 16, 16, 16, unsigned char, row_major> : public __frag_base<unsigned char, 8> {};
291
+ template<> class fragment<matrix_a, 16, 16, 16, unsigned char, col_major> : public __frag_base<unsigned char, 8> {};
292
+ template<> class fragment<matrix_b, 16, 16, 16, signed char, row_major> : public __frag_base<signed char, 8> {};
293
+ template<> class fragment<matrix_b, 16, 16, 16, signed char, col_major> : public __frag_base<signed char, 8> {};
294
+ template<> class fragment<matrix_b, 16, 16, 16, unsigned char, row_major> : public __frag_base<unsigned char, 8> {};
295
+ template<> class fragment<matrix_b, 16, 16, 16, unsigned char, col_major> : public __frag_base<unsigned char, 8> {};
296
+ template<> class fragment<accumulator, 16, 16, 16, int> : public __frag_base<int, 8> {};
297
+ #endif /* __CUDA_IMMA__ */
298
+
299
+ #ifdef __CUDA_AMPERE_MMA__
300
+ template<> class fragment<matrix_a, 16, 16, 16, __nv_bfloat16, row_major> : public __frag_base<__nv_bfloat16, 8> {};
301
+ template<> class fragment<matrix_a, 16, 16, 16, __nv_bfloat16, col_major> : public __frag_base<__nv_bfloat16, 8> {};
302
+ template<> class fragment<matrix_b, 16, 16, 16, __nv_bfloat16, row_major> : public __frag_base<__nv_bfloat16, 8> {};
303
+ template<> class fragment<matrix_b, 16, 16, 16, __nv_bfloat16, col_major> : public __frag_base<__nv_bfloat16, 8> {};
304
+ #endif /* __CUDA_AMPERE_MMA__ */
305
+
306
+ //
307
+ // Fragments for 32x8x16
308
+ //
309
+ template<> class fragment<matrix_a, 32, 8, 16, __half, row_major> : public __frag_base<__half, 16> {};
310
+ template<> class fragment<matrix_a, 32, 8, 16, __half, col_major> : public __frag_base<__half, 16> {};
311
+ template<> class fragment<matrix_b, 32, 8, 16, __half, row_major> : public __frag_base<__half, 16> {};
312
+ template<> class fragment<matrix_b, 32, 8, 16, __half, col_major> : public __frag_base<__half, 16> {};
313
+ template<> class fragment<accumulator, 32, 8, 16, __half> : public __frag_base<__half, 8> {};
314
+ template<> class fragment<accumulator, 32, 8, 16, float> : public __frag_base<float, 8> {};
315
+
316
+ #ifdef __CUDA_IMMA__
317
+ template<> class fragment<matrix_a, 32, 8, 16, signed char, row_major> : public __frag_base<signed char, 16> {};
318
+ template<> class fragment<matrix_a, 32, 8, 16, signed char, col_major> : public __frag_base<signed char, 16> {};
319
+ template<> class fragment<matrix_a, 32, 8, 16, unsigned char, row_major> : public __frag_base<unsigned char, 16> {};
320
+ template<> class fragment<matrix_a, 32, 8, 16, unsigned char, col_major> : public __frag_base<unsigned char, 16> {};
321
+ template<> class fragment<matrix_b, 32, 8, 16, signed char, row_major> : public __frag_base<signed char, 4> {};
322
+ template<> class fragment<matrix_b, 32, 8, 16, signed char, col_major> : public __frag_base<signed char, 4> {};
323
+ template<> class fragment<matrix_b, 32, 8, 16, unsigned char, row_major> : public __frag_base<unsigned char, 4> {};
324
+ template<> class fragment<matrix_b, 32, 8, 16, unsigned char, col_major> : public __frag_base<unsigned char, 4> {};
325
+ template<> class fragment<accumulator, 32, 8, 16, int> : public __frag_base<int, 8> {};
326
+ #endif /* __CUDA_IMMA__ */
327
+
328
+ #ifdef __CUDA_AMPERE_MMA__
329
+ template<> class fragment<matrix_a, 32, 8, 16, __nv_bfloat16, row_major> : public __frag_base<__nv_bfloat16, 16> {};
330
+ template<> class fragment<matrix_a, 32, 8, 16, __nv_bfloat16, col_major> : public __frag_base<__nv_bfloat16, 16> {};
331
+ template<> class fragment<matrix_b, 32, 8, 16, __nv_bfloat16, row_major> : public __frag_base<__nv_bfloat16, 4> {};
332
+ template<> class fragment<matrix_b, 32, 8, 16, __nv_bfloat16, col_major> : public __frag_base<__nv_bfloat16, 4> {};
333
+ #endif /* __CUDA_AMPERE_MMA__ */
334
+
335
+ //
336
+ // Fragments for 8x32x16
337
+ //
338
+ template<> class fragment<matrix_a, 8, 32, 16, __half, row_major> : public __frag_base<__half, 16> {};
339
+ template<> class fragment<matrix_a, 8, 32, 16, __half, col_major> : public __frag_base<__half, 16> {};
340
+ template<> class fragment<matrix_b, 8, 32, 16, __half, row_major> : public __frag_base<__half, 16> {};
341
+ template<> class fragment<matrix_b, 8, 32, 16, __half, col_major> : public __frag_base<__half, 16> {};
342
+ template<> class fragment<accumulator, 8, 32, 16, __half> : public __frag_base<__half, 8> {};
343
+ template<> class fragment<accumulator, 8, 32, 16, float> : public __frag_base<float, 8> {};
344
+
345
+ #ifdef __CUDA_IMMA__
346
+ template<> class fragment<matrix_a, 8, 32, 16, signed char, row_major> : public __frag_base<signed char, 4> {};
347
+ template<> class fragment<matrix_a, 8, 32, 16, signed char, col_major> : public __frag_base<signed char, 4> {};
348
+ template<> class fragment<matrix_a, 8, 32, 16, unsigned char, row_major> : public __frag_base<unsigned char, 4> {};
349
+ template<> class fragment<matrix_a, 8, 32, 16, unsigned char, col_major> : public __frag_base<unsigned char, 4> {};
350
+ template<> class fragment<matrix_b, 8, 32, 16, signed char, row_major> : public __frag_base<signed char, 16> {};
351
+ template<> class fragment<matrix_b, 8, 32, 16, signed char, col_major> : public __frag_base<signed char, 16> {};
352
+ template<> class fragment<matrix_b, 8, 32, 16, unsigned char, row_major> : public __frag_base<unsigned char, 16> {};
353
+ template<> class fragment<matrix_b, 8, 32, 16, unsigned char, col_major> : public __frag_base<unsigned char, 16> {};
354
+ template<> class fragment<accumulator, 8, 32, 16, int> : public __frag_base<int, 8> {};
355
+ #endif /* __CUDA_IMMA__ */
356
+
357
+ #ifdef __CUDA_AMPERE_MMA__
358
+ template<> class fragment<matrix_a, 8, 32, 16, __nv_bfloat16, row_major> : public __frag_base<__nv_bfloat16, 4> {};
359
+ template<> class fragment<matrix_a, 8, 32, 16, __nv_bfloat16, col_major> : public __frag_base<__nv_bfloat16, 4> {};
360
+ template<> class fragment<matrix_b, 8, 32, 16, __nv_bfloat16, row_major> : public __frag_base<__nv_bfloat16, 16> {};
361
+ template<> class fragment<matrix_b, 8, 32, 16, __nv_bfloat16, col_major> : public __frag_base<__nv_bfloat16, 16> {};
362
+ #endif /* __CUDA_AMPERE_MMA__ */
363
+
364
+ #ifdef __CUDA_SUBBYTE_IMMA__
365
+ //
366
+ // Fragments for 8x8x32
367
+ //
368
+ template<> class fragment<matrix_a, 8, 8, 32, experimental::precision::u4, row_major> : public __frag_base<experimental::precision::u4, 8, 1> {};
369
+ template<> class fragment<matrix_a, 8, 8, 32, experimental::precision::s4, row_major> : public __frag_base<experimental::precision::s4, 8, 1> {};
370
+ template<> class fragment<matrix_b, 8, 8, 32, experimental::precision::u4, col_major> : public __frag_base<experimental::precision::u4, 8, 1> {};
371
+ template<> class fragment<matrix_b, 8, 8, 32, experimental::precision::s4, col_major> : public __frag_base<experimental::precision::s4, 8, 1> {};
372
+ template<> class fragment<accumulator, 8, 8, 32, int> : public __frag_base<int, 2> {};
373
+
374
+ //
375
+ // Fragments for 8x8x128
376
+ //
377
+ template<> class fragment<matrix_a, 8, 8, 128, experimental::precision::b1, row_major> : public __frag_base<experimental::precision::b1, 32, 1> {};
378
+ template<> class fragment<matrix_b, 8, 8, 128, experimental::precision::b1, col_major> : public __frag_base<experimental::precision::b1, 32, 1> {};
379
+ template<> class fragment<accumulator, 8, 8, 128, int> : public __frag_base<int, 2> {};
380
+ #endif /* __CUDA_SUBBYTE_IMMA__ */
381
+
382
+ #ifdef __CUDA_AMPERE_MMA__
383
+ //
384
+ // Fragments for 16x16x8
385
+ //
386
+ template<> class fragment<matrix_a, 16, 16, 8, precision::tf32, row_major> : public __frag_base<precision::tf32, 4> {};
387
+ template<> class fragment<matrix_a, 16, 16, 8, precision::tf32, col_major> : public __frag_base<precision::tf32, 4> {};
388
+ template<> class fragment<matrix_b, 16, 16, 8, precision::tf32, row_major> : public __frag_base<precision::tf32, 4> {};
389
+ template<> class fragment<matrix_b, 16, 16, 8, precision::tf32, col_major> : public __frag_base<precision::tf32, 4> {};
390
+ template<> class fragment<accumulator, 16, 16, 8, float> : public __frag_base<float, 8> {};
391
+
392
+ //
393
+ // Fragments for 8x8x4
394
+ //
395
+ template<> class fragment<matrix_a, 8, 8, 4, double, row_major> : public __frag_base<double, 1> {};
396
+ template<> class fragment<matrix_a, 8, 8, 4, double, col_major> : public __frag_base<double, 1> {};
397
+ template<> class fragment<matrix_b, 8, 8, 4, double, row_major> : public __frag_base<double, 1> {};
398
+ template<> class fragment<matrix_b, 8, 8, 4, double, col_major> : public __frag_base<double, 1> {};
399
+ template<> class fragment<accumulator, 8, 8, 4, double> : public __frag_base<double, 2> {};
400
+ #endif /* __CUDA_AMPERE_MMA__ */
401
+
402
+
403
+ //
404
+ // Load functions for frags of shape m16n16k16
405
+ //
406
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
407
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
408
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, __half, row_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
409
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, __half, col_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
410
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 16, 16, 16, __half>& a, const __half* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
411
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 16, 16, 16, float>& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
412
+
413
+ #ifdef __CUDA_IMMA__
414
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
415
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
416
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
417
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
418
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
419
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
420
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
421
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
422
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 16, 16, 16, int>& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
423
+ #endif /* __CUDA_IMMA__ */
424
+
425
+ #ifdef __CUDA_AMPERE_MMA__
426
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
427
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
428
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
429
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
430
+ #endif /* __CUDA_AMPERE_MMA__ */
431
+
432
+ //
433
+ // Load functions for frags of shape m32n8k16
434
+ //
435
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
436
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
437
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __half, row_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
438
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __half, col_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
439
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 32, 8, 16, __half>& a, const __half* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
440
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 32, 8, 16, float>& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
441
+
442
+ #ifdef __CUDA_IMMA__
443
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
444
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
445
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
446
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
447
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
448
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
449
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
450
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
451
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 32, 8, 16, int>& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
452
+ #endif /* __CUDA_IMMA__ */
453
+
454
+ #ifdef __CUDA_AMPERE_MMA__
455
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
456
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
457
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
458
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
459
+ #endif /* __CUDA_AMPERE_MMA__ */
460
+
461
+ //
462
+ // Load functions for frags of shape m8n32k16
463
+ //
464
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
465
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
466
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __half, row_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
467
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __half, col_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
468
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 32, 16, __half>& a, const __half* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
469
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 32, 16, float>& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
470
+
471
+ #ifdef __CUDA_IMMA__
472
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
473
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
474
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
475
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
476
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
477
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
478
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
479
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
480
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 32, 16, int>& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
481
+ #endif /* __CUDA_IMMA__ */
482
+
483
+ #ifdef __CUDA_AMPERE_MMA__
484
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
485
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
486
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
487
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
488
+ #endif /* __CUDA_AMPERE_MMA__ */
489
+
490
+ #ifdef __CUDA_SUBBYTE_IMMA__
491
+ //
492
+ // Load functions for frags of shape m8n8k32
493
+ //
494
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 32, experimental::precision::s4, row_major>& a, const void* p, unsigned ldm) __DEF_IF_HOST
495
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 32, experimental::precision::u4, row_major>& a, const void* p, unsigned ldm) __DEF_IF_HOST
496
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 32, experimental::precision::s4, col_major>& a, const void* p, unsigned ldm) __DEF_IF_HOST
497
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 32, experimental::precision::u4, col_major>& a, const void* p, unsigned ldm) __DEF_IF_HOST
498
+
499
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 8, 32, int>& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
500
+
501
+ //
502
+ // Load functions for frags of shape m8n8k128
503
+ //
504
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 128, experimental::precision::b1, row_major>& a, const void* p, unsigned ldm) __DEF_IF_HOST
505
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 128, experimental::precision::b1, col_major>& a, const void* p, unsigned ldm) __DEF_IF_HOST
506
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 8, 128, int>& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
507
+
508
+ #endif /* __CUDA_SUBBYTE_IMMA__ */
509
+
510
+
511
+ #ifdef __CUDA_AMPERE_MMA__
512
+ //
513
+ // Load functions for frags of shape m16n16k8
514
+ //
515
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 8, precision::tf32, row_major>& a, const float* p, unsigned ldm) __DEF_IF_HOST
516
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 8, precision::tf32, col_major>& a, const float* p, unsigned ldm) __DEF_IF_HOST
517
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 8, precision::tf32, row_major>& a, const float* p, unsigned ldm) __DEF_IF_HOST
518
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 8, precision::tf32, col_major>& a, const float* p, unsigned ldm) __DEF_IF_HOST
519
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 16, 16, 8, float>& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
520
+
521
+ //
522
+ // Load functions for frags of shape m8n8k4
523
+ //
524
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 4, double, row_major>& a, const double* p, unsigned ldm) __DEF_IF_HOST
525
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 4, double, col_major>& a, const double* p, unsigned ldm) __DEF_IF_HOST
526
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 4, double, row_major>& a, const double* p, unsigned ldm) __DEF_IF_HOST
527
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 4, double, col_major>& a, const double* p, unsigned ldm) __DEF_IF_HOST
528
+ __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 8, 4, double>& a, const double* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
529
+ #endif /* __CUDA_AMPERE_MMA__ */
530
+
531
+ //
532
+ // Store functions for frags of shape m16n16k16
533
+ //
534
+ __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment<accumulator, 16, 16, 16, __half>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
535
+ __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator, 16, 16, 16, float>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
536
+ #ifdef __CUDA_IMMA__
537
+ __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 16, 16, 16, int>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
538
+ #endif /* __CUDA_IMMA__ */
539
+
540
+ //
541
+ // Store functions for frags of shape m32n8k16
542
+ //
543
+ __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment<accumulator, 32, 8, 16, __half>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
544
+ __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator, 32, 8, 16, float>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
545
+ #ifdef __CUDA_IMMA__
546
+ __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 32, 8, 16, int>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
547
+ #endif /* __CUDA_IMMA__ */
548
+
549
+ //
550
+ // Store functions for frags of shape m8n32k16
551
+ //
552
+ __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment<accumulator, 8, 32, 16, __half>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
553
+ __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator, 8, 32, 16, float>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
554
+ #ifdef __CUDA_IMMA__
555
+ __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 8, 32, 16, int>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
556
+ #endif /* __CUDA_IMMA__ */
557
+
558
+ #ifdef __CUDA_SUBBYTE_IMMA__
559
+ //
560
+ // Store functions for frags of shape m8n8k32
561
+ //
562
+ __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 8, 8, 32, int>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
563
+
564
+ //
565
+ // Store functions for frags of shape m8n8k128
566
+ //
567
+ __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 8, 8, 128, int>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
568
+
569
+ #endif /* __CUDA_SUBBYTE_IMMA__ */
570
+
571
+ #ifdef __CUDA_AMPERE_MMA__
572
+ //
573
+ // Store functions for frags of shape m16n16k8
574
+ //
575
+ __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator, 16, 16, 8, float>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
576
+
577
+ //
578
+ // Store functions for frags of shape m8n8k4
579
+ //
580
+ __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(double *p, const fragment<accumulator, 8, 8, 4, double>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
581
+ #endif /* __CUDA_AMPERE_MMA__ */
582
+
583
+ //
584
+ // MMA functions for shape m16n16k16
585
+ //
586
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
587
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
588
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
589
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
590
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
591
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
592
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
593
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
594
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
595
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
596
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
597
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
598
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
599
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
600
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
601
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
602
+
603
+ #ifdef __CUDA_IMMA__
604
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, row_major>& a, const fragment<matrix_b,16, 16, 16, signed char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
605
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, col_major>& a, const fragment<matrix_b,16, 16, 16, signed char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
606
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, row_major>& a, const fragment<matrix_b,16, 16, 16, signed char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
607
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, col_major>& a, const fragment<matrix_b,16, 16, 16, signed char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
608
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, row_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
609
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, col_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
610
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, row_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
611
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, col_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
612
+ #endif /* __CUDA_IMMA__ */
613
+
614
+ #ifdef __CUDA_AMPERE_MMA__
615
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
616
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
617
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
618
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
619
+ #endif /* __CUDA_AMPERE_MMA__ */
620
+
621
+ //
622
+ // MMA functions for shape m32n8k16
623
+ //
624
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
625
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
626
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
627
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
628
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
629
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
630
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
631
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
632
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
633
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
634
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
635
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
636
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
637
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
638
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
639
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
640
+
641
+ #ifdef __CUDA_IMMA__
642
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, row_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
643
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, col_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
644
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, row_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
645
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, col_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
646
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, row_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
647
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, col_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
648
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, row_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
649
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, col_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
650
+ #endif /* __CUDA_IMMA__ */
651
+
652
+ #ifdef __CUDA_AMPERE_MMA__
653
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
654
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
655
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
656
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
657
+ #endif /* __CUDA_AMPERE_MMA__ */
658
+
659
+ //
660
+ // MMA functions for shape m8n32k16
661
+ //
662
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
663
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
664
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
665
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
666
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
667
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
668
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
669
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
670
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
671
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
672
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
673
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
674
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
675
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
676
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
677
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
678
+
679
+ #ifdef __CUDA_IMMA__
680
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, row_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
681
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, col_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
682
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, row_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
683
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, col_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
684
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, row_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
685
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, col_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
686
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, row_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
687
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, col_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
688
+ #endif /* __CUDA_IMMA__ */
689
+
690
+ #ifdef __CUDA_AMPERE_MMA__
691
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
692
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
693
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
694
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
695
+ #endif /* __CUDA_AMPERE_MMA__ */
696
+
697
+ #ifdef __CUDA_SUBBYTE_IMMA__
698
+ //
699
+ // MMA functions for shape m8n8k32
700
+ //
701
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 32, int>& d, const fragment<matrix_a, 8, 8, 32, experimental::precision::s4, row_major>& a, const fragment<matrix_b, 8, 8, 32, experimental::precision::s4, col_major>& b, const fragment<accumulator, 8, 8, 32, int>& c, bool satf=false) __DEF_IF_HOST
702
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 32, int>& d, const fragment<matrix_a, 8, 8, 32, experimental::precision::u4, row_major>& a, const fragment<matrix_b, 8, 8, 32, experimental::precision::u4, col_major>& b, const fragment<accumulator, 8, 8, 32, int>& c, bool satf=false) __DEF_IF_HOST
703
+
704
+
705
+ //
706
+ // MMA functions for shape m8n8k128
707
+ //
708
+ __CUDA_MMA_DEVICE_DECL__ void bmma_sync(fragment<accumulator, 8, 8, 128, int>& d, const fragment<matrix_a, 8, 8, 128, experimental::precision::b1, row_major>& a, const fragment<matrix_b, 8, 8, 128, experimental::precision::b1, col_major>& b, const fragment<accumulator, 8, 8, 128, int>& c,
709
+ experimental::bmmaBitOp = experimental::bmmaBitOpXOR,
710
+ experimental::bmmaAccumulateOp = experimental::bmmaAccumulateOpPOPC) __DEF_IF_HOST
711
+
712
+ #endif /* __CUDA_SUBBYTE_IMMA__ */
713
+
714
+ #ifdef __CUDA_AMPERE_MMA__
715
+ //
716
+ // MMA functions for shape m16n16k8
717
+ //
718
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, row_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, col_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) __DEF_IF_HOST
719
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, col_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, col_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) __DEF_IF_HOST
720
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, row_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, row_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) __DEF_IF_HOST
721
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, col_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, row_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) __DEF_IF_HOST
722
+
723
+ //
724
+ // MMA functions for shape m8n8k4
725
+ //
726
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, row_major>& a, const fragment<matrix_b, 8, 8, 4, double, col_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) __DEF_IF_HOST
727
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, col_major>& a, const fragment<matrix_b, 8, 8, 4, double, col_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) __DEF_IF_HOST
728
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, row_major>& a, const fragment<matrix_b, 8, 8, 4, double, row_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) __DEF_IF_HOST
729
+ __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, col_major>& a, const fragment<matrix_b, 8, 8, 4, double, row_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) __DEF_IF_HOST
730
+ #endif /* __CUDA_AMPERE_MMA__ */
731
+ };
732
+ };
733
+
734
+ #undef __DEF_IF_HOST
735
+ #undef __CUDA_IMMA__
736
+ #undef __CUDA_SUBBYTE_IMMA__
737
+ #undef __CUDA_AMPERE_MMA__
738
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */
739
+
740
+ #endif /* __cplusplus && __CUDACC__ */
741
+
742
+ #undef __CUDA_MMA_DEVICE_DECL__
743
+
744
+ #if defined(__CUDA_ARCH__)
745
+ #include "mma.hpp"
746
+ #endif /* defined(__CUDA_ARCH__) */
747
+
748
+
749
+ #endif /* !__CUDA_MMA_H__ */
750
+
751
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H__)
752
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
753
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H__
754
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_70_rt.h ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2017-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ //NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time)
51
+ #define EXCLUDE_FROM_RTC
52
+
53
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
54
+ #if defined(_MSC_VER)
55
+ #pragma message("crt/sm_70_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
56
+ #else
57
+ #warning "crt/sm_70_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
58
+ #endif
59
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
60
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_H__
61
+ #endif
62
+
63
+ #if !defined(__SM_70_RT_H__)
64
+ #define __SM_70_RT_H__
65
+
66
+ #if defined(__CUDACC_RTC__)
67
+ #define __SM_70_RT_DECL__ __host__ __device__
68
+ #else /* !__CUDACC_RTC__ */
69
+ #define __SM_70_RT_DECL__ static __device__ __inline__
70
+ #endif /* __CUDACC_RTC__ */
71
+
72
+ #if defined(__cplusplus) && defined(__CUDACC__)
73
+
74
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
75
+
76
+ /*******************************************************************************
77
+ * *
78
+ * *
79
+ * *
80
+ *******************************************************************************/
81
+
82
+ #include "builtin_types.h"
83
+ #include "device_types.h"
84
+ #include "host_defines.h"
85
+
86
+ #ifndef __CUDA_ARCH__
87
+ #define __DEF_IF_HOST { }
88
+ #else /* !__CUDA_ARCH__ */
89
+ #define __DEF_IF_HOST ;
90
+ #endif /* __CUDA_ARCH__ */
91
+
92
+
93
+ /******************************************************************************
94
+ * match *
95
+ ******************************************************************************/
96
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned value) __DEF_IF_HOST
97
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, int value) __DEF_IF_HOST
98
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long value) __DEF_IF_HOST
99
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long value) __DEF_IF_HOST
100
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long long value) __DEF_IF_HOST
101
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long long value) __DEF_IF_HOST
102
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, float value) __DEF_IF_HOST
103
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, double value) __DEF_IF_HOST
104
+
105
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned value, int *pred) __DEF_IF_HOST
106
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, int value, int *pred) __DEF_IF_HOST
107
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long value, int *pred) __DEF_IF_HOST
108
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long value, int *pred) __DEF_IF_HOST
109
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long long value, int *pred) __DEF_IF_HOST
110
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long long value, int *pred) __DEF_IF_HOST
111
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, float value, int *pred) __DEF_IF_HOST
112
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, double value, int *pred) __DEF_IF_HOST
113
+
114
+ __SM_70_RT_DECL__ void __nanosleep(unsigned int ns) __DEF_IF_HOST
115
+
116
+ __SM_70_RT_DECL__ unsigned short int atomicCAS(unsigned short int *address, unsigned short int compare, unsigned short int val) __DEF_IF_HOST
117
+
118
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */
119
+
120
+ #endif /* __cplusplus && __CUDACC__ */
121
+
122
+ #undef __DEF_IF_HOST
123
+ #undef __SM_70_RT_DECL__
124
+
125
+ #if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)
126
+ #include "sm_70_rt.hpp"
127
+ #endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */
128
+
129
+ #endif /* !__SM_70_RT_H__ */
130
+
131
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_H__)
132
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
133
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_H__
134
+ #endif
135
+
136
+
137
+ #undef EXCLUDE_FROM_RTC
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_70_rt.hpp ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2017-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("crt/sm_70_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "crt/sm_70_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_HPP__
58
+ #endif
59
+
60
+ #if !defined(__SM_70_RT_HPP__)
61
+ #define __SM_70_RT_HPP__
62
+
63
+ #if defined(__CUDACC_RTC__)
64
+ #define __SM_70_RT_DECL__ __host__ __device__
65
+ #else /* !__CUDACC_RTC__ */
66
+ #define __SM_70_RT_DECL__ static __device__ __inline__
67
+ #endif /* __CUDACC_RTC__ */
68
+
69
+ #if defined(__cplusplus) && defined(__CUDACC__)
70
+
71
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
72
+
73
+ /*******************************************************************************
74
+ * *
75
+ * *
76
+ * *
77
+ *******************************************************************************/
78
+
79
+ #include "builtin_types.h"
80
+ #include "device_types.h"
81
+ #include "host_defines.h"
82
+
83
+ /*******************************************************************************
84
+ * *
85
+ * Below are implementations of SM-7.0 builtin functions which are included as *
86
+ * source (instead of being built in to the compiler) *
87
+ * *
88
+ *******************************************************************************/
89
+
90
+ //
91
+ // __match_any_sync
92
+ //
93
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned value) {
94
+ return __match32_any_sync(mask, value);
95
+ }
96
+
97
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, int value) {
98
+ return __match32_any_sync(mask, value);
99
+ }
100
+
101
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long value) {
102
+ return (sizeof(long) == sizeof(long long)) ?
103
+ __match64_any_sync(mask, (unsigned long long)value):
104
+ __match32_any_sync(mask, (unsigned)value);
105
+ }
106
+
107
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long value) {
108
+ return (sizeof(long) == sizeof(long long)) ?
109
+ __match64_any_sync(mask, (unsigned long long)value):
110
+ __match32_any_sync(mask, (unsigned)value);
111
+ }
112
+
113
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long long value) {
114
+ return __match64_any_sync(mask, value);
115
+ }
116
+
117
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long long value) {
118
+ return __match64_any_sync(mask, value);
119
+ }
120
+
121
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, float value) {
122
+ return __match32_any_sync(mask, __float_as_uint(value));
123
+ }
124
+
125
+ __SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, double value) {
126
+ return __match64_any_sync(mask, __double_as_longlong(value));
127
+ }
128
+
129
+ //
130
+ // __match_all_sync
131
+ //
132
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned value, int *pred) {
133
+ return __match32_all_sync(mask, value, pred);
134
+ }
135
+
136
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, int value, int *pred) {
137
+ return __match32_all_sync(mask, value, pred);
138
+ }
139
+
140
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long value, int *pred) {
141
+ return (sizeof(long) == sizeof(long long)) ?
142
+ __match64_all_sync(mask, (unsigned long long)value, pred):
143
+ __match32_all_sync(mask, (unsigned)value, pred);
144
+ }
145
+
146
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long value, int *pred) {
147
+ return (sizeof(long) == sizeof(long long)) ?
148
+ __match64_all_sync(mask, (unsigned long long)value, pred):
149
+ __match32_all_sync(mask, (unsigned)value, pred);
150
+ }
151
+
152
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long long value, int *pred) {
153
+ return __match64_all_sync(mask, value, pred);
154
+ }
155
+
156
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long long value, int *pred) {
157
+ return __match64_all_sync(mask, value, pred);
158
+ }
159
+
160
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, float value, int *pred) {
161
+ return __match32_all_sync(mask, __float_as_uint(value), pred);
162
+ }
163
+
164
+ __SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, double value, int *pred) {
165
+ return __match64_all_sync(mask, __double_as_longlong(value), pred);
166
+ }
167
+
168
+ __SM_70_RT_DECL__ void __nanosleep(unsigned int ns) {
169
+ asm volatile("nanosleep.u32 %0;" :: "r"(ns));
170
+ }
171
+
172
+
173
+ extern "C" __device__ __device_builtin__
174
+ unsigned short __usAtomicCAS(unsigned short *, unsigned short, unsigned short);
175
+
176
+ __SM_70_RT_DECL__ unsigned short int atomicCAS(unsigned short int *address, unsigned short int compare, unsigned short int val) {
177
+ return __usAtomicCAS(address, compare, val);
178
+ }
179
+
180
+
181
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */
182
+
183
+ #endif /* __cplusplus && __CUDACC__ */
184
+
185
+ #undef __SM_70_RT_DECL__
186
+
187
+ #endif /* !__SM_70_RT_HPP__ */
188
+
189
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_HPP__)
190
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
191
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_HPP__
192
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_80_rt.h ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2017-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("crt/sm_80_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "crt/sm_80_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_H__
58
+ #endif
59
+
60
+ #if !defined(__SM_80_RT_H__)
61
+ #define __SM_80_RT_H__
62
+
63
+ #if defined(__CUDACC_RTC__)
64
+ #define __SM_80_RT_DECL__ __host__ __device__
65
+ #else /* !__CUDACC_RTC__ */
66
+ #define __SM_80_RT_DECL__ static __device__ __inline__
67
+ #endif /* __CUDACC_RTC__ */
68
+
69
+ #if defined(__cplusplus) && defined(__CUDACC__)
70
+
71
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
72
+
73
+ /*******************************************************************************
74
+ * *
75
+ * *
76
+ * *
77
+ *******************************************************************************/
78
+
79
+ #include "builtin_types.h"
80
+ #include "device_types.h"
81
+ #include "host_defines.h"
82
+
83
+ #ifndef __CUDA_ARCH__
84
+ #define __DEF_IF_HOST { }
85
+ #else /* !__CUDA_ARCH__ */
86
+ #define __DEF_IF_HOST ;
87
+ #endif /* __CUDA_ARCH__ */
88
+
89
+
90
+ //NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time)
91
+ #define EXCLUDE_FROM_RTC
92
+ /******************************************************************************
93
+ * reduce *
94
+ ******************************************************************************/
95
+ __SM_80_RT_DECL__ unsigned __reduce_add_sync(unsigned mask, unsigned value) __DEF_IF_HOST
96
+ __SM_80_RT_DECL__ unsigned __reduce_min_sync(unsigned mask, unsigned value) __DEF_IF_HOST
97
+ __SM_80_RT_DECL__ unsigned __reduce_max_sync(unsigned mask, unsigned value) __DEF_IF_HOST
98
+
99
+ __SM_80_RT_DECL__ int __reduce_add_sync(unsigned mask, int value) __DEF_IF_HOST
100
+ __SM_80_RT_DECL__ int __reduce_min_sync(unsigned mask, int value) __DEF_IF_HOST
101
+ __SM_80_RT_DECL__ int __reduce_max_sync(unsigned mask, int value) __DEF_IF_HOST
102
+
103
+ __SM_80_RT_DECL__ unsigned __reduce_and_sync(unsigned mask, unsigned value) __DEF_IF_HOST
104
+ __SM_80_RT_DECL__ unsigned __reduce_or_sync(unsigned mask, unsigned value) __DEF_IF_HOST
105
+ __SM_80_RT_DECL__ unsigned __reduce_xor_sync(unsigned mask, unsigned value) __DEF_IF_HOST
106
+
107
+ #undef EXCLUDE_FROM_RTC
108
+
109
+
110
+ extern "C" {
111
+ inline __device__ void *__nv_associate_access_property(const void *ptr,
112
+ unsigned long long property) {
113
+ extern __device__ void *__nv_associate_access_property_impl(const void *,
114
+ unsigned long long);
115
+ return __nv_associate_access_property_impl(ptr, property);
116
+ }
117
+
118
+ inline __device__ void __nv_memcpy_async_shared_global_4(void *dst,
119
+ const void *src,
120
+ unsigned src_size) {
121
+ extern __device__ void __nv_memcpy_async_shared_global_4_impl(void *,
122
+ const void *,
123
+ unsigned);
124
+ __nv_memcpy_async_shared_global_4_impl(dst, src, src_size);
125
+ }
126
+
127
+ inline __device__ void __nv_memcpy_async_shared_global_8(void *dst,
128
+ const void *src,
129
+ unsigned src_size) {
130
+ extern __device__ void __nv_memcpy_async_shared_global_8_impl(void *,
131
+ const void *,
132
+ unsigned);
133
+ __nv_memcpy_async_shared_global_8_impl(dst, src, src_size);
134
+ }
135
+
136
+ inline __device__ void __nv_memcpy_async_shared_global_16(void *dst,
137
+ const void *src,
138
+ unsigned src_size) {
139
+ extern __device__ void __nv_memcpy_async_shared_global_16_impl(void *,
140
+ const void *,
141
+ unsigned);
142
+ __nv_memcpy_async_shared_global_16_impl(dst, src, src_size);
143
+ }
144
+
145
+ }
146
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 800 */
147
+
148
+ #endif /* __cplusplus && __CUDACC__ */
149
+
150
+ #undef __DEF_IF_HOST
151
+ #undef __SM_80_RT_DECL__
152
+
153
+ #if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)
154
+ #include "sm_80_rt.hpp"
155
+ #endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */
156
+
157
+ #endif /* !__SM_80_RT_H__ */
158
+
159
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_H__)
160
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
161
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_H__
162
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_80_rt.hpp ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2017-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("crt/sm_80_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "crt/sm_80_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_HPP__
58
+ #endif
59
+
60
+ #if !defined(__SM_80_RT_HPP__)
61
+ #define __SM_80_RT_HPP__
62
+
63
+ #if defined(__CUDACC_RTC__)
64
+ #define __SM_80_RT_DECL__ __host__ __device__
65
+ #else /* !__CUDACC_RTC__ */
66
+ #define __SM_80_RT_DECL__ static __device__ __inline__
67
+ #endif /* __CUDACC_RTC__ */
68
+
69
+ #if defined(__cplusplus) && defined(__CUDACC__)
70
+
71
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
72
+
73
+ /*******************************************************************************
74
+ * *
75
+ * *
76
+ * *
77
+ *******************************************************************************/
78
+
79
+ #include "builtin_types.h"
80
+ #include "device_types.h"
81
+ #include "host_defines.h"
82
+
83
+ /*******************************************************************************
84
+ * *
85
+ * Below are implementations of SM-8.0 builtin functions which are included as *
86
+ * source (instead of being built in to the compiler) *
87
+ * *
88
+ *******************************************************************************/
89
+
90
+ extern "C" {
91
+ __device_builtin__ __device__ unsigned __reduce_add_sync_unsigned_impl(unsigned, unsigned);
92
+ __device_builtin__ __device__ unsigned __reduce_min_sync_unsigned_impl(unsigned, unsigned);
93
+ __device_builtin__ __device__ unsigned __reduce_max_sync_unsigned_impl(unsigned, unsigned);
94
+ __device_builtin__ __device__ int __reduce_add_sync_signed_impl(unsigned, int);
95
+ __device_builtin__ __device__ int __reduce_min_sync_signed_impl(unsigned, int);
96
+ __device_builtin__ __device__ int __reduce_max_sync_signed_impl(unsigned, int);
97
+ __device_builtin__ __device__ unsigned __reduce_or_sync_unsigned_impl(unsigned, unsigned);
98
+ __device_builtin__ __device__ unsigned __reduce_and_sync_unsigned_impl(unsigned, unsigned);
99
+ __device_builtin__ __device__ unsigned __reduce_xor_sync_unsigned_impl(unsigned, unsigned);
100
+ }
101
+
102
+ __SM_80_RT_DECL__ unsigned __reduce_add_sync(unsigned mask, unsigned value) {
103
+ return __reduce_add_sync_unsigned_impl(mask, value);
104
+ }
105
+
106
+ __SM_80_RT_DECL__ unsigned __reduce_min_sync(unsigned mask, unsigned value) {
107
+ return __reduce_min_sync_unsigned_impl(mask, value);
108
+ }
109
+
110
+ __SM_80_RT_DECL__ unsigned __reduce_max_sync(unsigned mask, unsigned value) {
111
+ return __reduce_max_sync_unsigned_impl(mask, value);
112
+ }
113
+
114
+ __SM_80_RT_DECL__ int __reduce_add_sync(unsigned mask, int value) {
115
+ return __reduce_add_sync_signed_impl(mask, value);
116
+ }
117
+
118
+ __SM_80_RT_DECL__ int __reduce_min_sync(unsigned mask, int value) {
119
+ return __reduce_min_sync_signed_impl(mask, value);
120
+ }
121
+
122
+ __SM_80_RT_DECL__ int __reduce_max_sync(unsigned mask, int value) {
123
+ return __reduce_max_sync_signed_impl(mask, value);
124
+ }
125
+
126
+ __SM_80_RT_DECL__ unsigned __reduce_and_sync(unsigned mask, unsigned value) {
127
+ return __reduce_and_sync_unsigned_impl(mask, value);
128
+ }
129
+
130
+ __SM_80_RT_DECL__ unsigned __reduce_or_sync(unsigned mask, unsigned value) {
131
+ return __reduce_or_sync_unsigned_impl(mask, value);
132
+ }
133
+
134
+ __SM_80_RT_DECL__ unsigned __reduce_xor_sync(unsigned mask, unsigned value) {
135
+ return __reduce_xor_sync_unsigned_impl(mask, value);
136
+ }
137
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 800 */
138
+
139
+ #endif /* __cplusplus && __CUDACC__ */
140
+
141
+ #undef __SM_80_RT_DECL__
142
+
143
+ #endif /* !__SM_80_RT_HPP__ */
144
+
145
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_HPP__)
146
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
147
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_HPP__
148
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_90_rt.h ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2022-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("crt/sm_90_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "crt/sm_90_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_H__
58
+ #endif
59
+
60
+ #if !defined(__SM_90_RT_H__)
61
+ #define __SM_90_RT_H__
62
+
63
+ #if defined(__CUDACC_RTC__)
64
+ #define __SM_90_RT_DECL__ __host__ __device__
65
+ #else /* !__CUDACC_RTC__ */
66
+ #define __SM_90_RT_DECL__ static __device__ __inline__
67
+ #endif /* __CUDACC_RTC__ */
68
+
69
+ #if defined(__cplusplus) && defined(__CUDACC__)
70
+
71
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
72
+
73
+ /*******************************************************************************
74
+ * *
75
+ * *
76
+ * *
77
+ *******************************************************************************/
78
+
79
+ #include "builtin_types.h"
80
+ #include "device_types.h"
81
+ #include "host_defines.h"
82
+
83
+ #ifndef __CUDA_ARCH__
84
+ #define __DEF_IF_HOST { }
85
+ #else /* !__CUDA_ARCH__ */
86
+ #define __DEF_IF_HOST ;
87
+ #endif /* __CUDA_ARCH__ */
88
+
89
+ //NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time)
90
+ #define EXCLUDE_FROM_RTC
91
+
92
+ __SM_90_RT_DECL__ unsigned __isCtaShared(const void *ptr) __DEF_IF_HOST
93
+ __SM_90_RT_DECL__ unsigned __isClusterShared(const void *ptr) __DEF_IF_HOST
94
+ __SM_90_RT_DECL__ void *__cluster_map_shared_rank(const void *ptr, unsigned target_block_rank) __DEF_IF_HOST
95
+ __SM_90_RT_DECL__ unsigned __cluster_query_shared_rank(const void *ptr) __DEF_IF_HOST
96
+ __SM_90_RT_DECL__ uint2 __cluster_map_shared_multicast(const void *ptr, unsigned cluster_cta_mask) __DEF_IF_HOST
97
+ __SM_90_RT_DECL__ unsigned __clusterDimIsSpecified() __DEF_IF_HOST
98
+ __SM_90_RT_DECL__ dim3 __clusterDim() __DEF_IF_HOST
99
+ __SM_90_RT_DECL__ dim3 __clusterRelativeBlockIdx() __DEF_IF_HOST
100
+ __SM_90_RT_DECL__ dim3 __clusterGridDimInClusters() __DEF_IF_HOST
101
+ __SM_90_RT_DECL__ dim3 __clusterIdx() __DEF_IF_HOST
102
+ __SM_90_RT_DECL__ unsigned __clusterRelativeBlockRank() __DEF_IF_HOST
103
+ __SM_90_RT_DECL__ unsigned __clusterSizeInBlocks() __DEF_IF_HOST
104
+ __SM_90_RT_DECL__ void __cluster_barrier_arrive() __DEF_IF_HOST
105
+ __SM_90_RT_DECL__ void __cluster_barrier_arrive_relaxed() __DEF_IF_HOST
106
+ __SM_90_RT_DECL__ void __cluster_barrier_wait() __DEF_IF_HOST
107
+ __SM_90_RT_DECL__ void __threadfence_cluster() __DEF_IF_HOST
108
+
109
+ __SM_90_RT_DECL__ float2 atomicAdd(float2 *__address, float2 val) __DEF_IF_HOST
110
+ __SM_90_RT_DECL__ float2 atomicAdd_block(float2 *__address, float2 val) __DEF_IF_HOST
111
+ __SM_90_RT_DECL__ float2 atomicAdd_system(float2 *__address, float2 val) __DEF_IF_HOST
112
+ __SM_90_RT_DECL__ float4 atomicAdd(float4 *__address, float4 val) __DEF_IF_HOST
113
+ __SM_90_RT_DECL__ float4 atomicAdd_block(float4 *__address, float4 val) __DEF_IF_HOST
114
+ __SM_90_RT_DECL__ float4 atomicAdd_system(float4 *__address, float4 val) __DEF_IF_HOST
115
+
116
+ #undef EXCLUDE_FROM_RTC
117
+
118
+ //Note: below atomic functions are templates, so cannot be represented in NVRTC
119
+ //builtins representation, so they have to be parsed on every NVRTC compilation.
120
+ //(notice 'EXCLUDE_FROM_RTC' ends above)
121
+
122
+
123
+ #ifndef __NV_DISABLE_128_ATOMICS
124
+ // lgen definitions for 128b atomics
125
+ extern "C" {
126
+ __device__ __device_builtin__ void __u128AtomicCAS(void *, void *, void *, void *);
127
+ __device__ __device_builtin__ void __u128AtomicCAS_block(void *, void *, void *, void *);
128
+ __device__ __device_builtin__ void __u128AtomicExch(void *, void *, void *);
129
+ __device__ __device_builtin__ void __u128AtomicExch_block(void *, void *, void *);
130
+ }
131
+
132
+ // macro to get address of object, to workaround situations where the type overloads the "&" operator
133
+ #define __NV_ATOMIC_ADDRESSOF(__val) \
134
+ (void *)(&(const_cast<char &>(reinterpret_cast<const volatile char &>(__val))))
135
+
136
+ // enable_if
137
+ template<bool __b, typename _T>
138
+ struct __nv_atomic_enable_if { };
139
+
140
+ template<typename _T>
141
+ struct __nv_atomic_enable_if<true, _T> { typedef _T __type; };
142
+
143
+ // alignof
144
+ #if defined(__CUDACC_RTC__)
145
+ #define __NV_ATOMIC_ALIGNOF __alignof__
146
+ #else
147
+ #define __NV_ATOMIC_ALIGNOF __alignof
148
+ #endif
149
+
150
+ // trivially copyable
151
+ template <typename _T>
152
+ struct __nv_atomic_triv_cp_helper {
153
+ #if defined(__GNUC__)
154
+ #if (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 3)
155
+ static const bool __val = true;
156
+ #elif (__GNUC__ < 5)
157
+ static const bool __val = __has_trivial_copy(_T);
158
+ #else
159
+ static const bool __val = __is_trivially_copyable(_T);
160
+ #endif
161
+ #else
162
+ static const bool __val = __is_trivially_copyable(_T);
163
+ #endif
164
+ };
165
+ #define __NV_ATOMIC_TRIVIALLY_COPYABLE(_T) \
166
+ __nv_atomic_triv_cp_helper<_T>::__val
167
+
168
+ // return type
169
+ #if __cplusplus >= 202002L // C++20 or greater
170
+ #define __NV_ATOMIC_RET_TYPE(_T) _T
171
+ #else
172
+ #define __NV_ATOMIC_RET_TYPE(_T) typename \
173
+ __nv_atomic_enable_if<sizeof(_T) == 16 && \
174
+ __NV_ATOMIC_ALIGNOF(_T) >= 16 && \
175
+ __NV_ATOMIC_TRIVIALLY_COPYABLE(_T), _T>::__type
176
+ #endif
177
+
178
+ // requires
179
+ #if __cplusplus >= 202002L // C++20 or greater
180
+ #define __NV_ATOMIC_REQUIRES(_T) \
181
+ requires(sizeof(_T) == 16 && \
182
+ __NV_ATOMIC_ALIGNOF(_T) >= 16 && \
183
+ __NV_ATOMIC_TRIVIALLY_COPYABLE(_T))
184
+ #else
185
+ #define __NV_ATOMIC_REQUIRES(_T)
186
+ #endif
187
+
188
+ // temp value and return value
189
+ #if __cplusplus >= 201103L || defined(_MSC_VER) // C++11 or greater, or MSC
190
+ #define __NV_ATOMIC_TEMP(_T) union _U \
191
+ {_T __ret; __device__ __inline__ _U() {}}; _U __u
192
+ #define __NV_ATOMIC_RET(_T) __u.__ret
193
+ #else
194
+ #define __NV_ATOMIC_TEMP(_T) _T __ret
195
+ #define __NV_ATOMIC_RET(_T) __ret
196
+ #endif
197
+
198
+ // templated 128-bit atomics
199
+ template <typename _T>
200
+ __SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T)
201
+ atomicCAS(_T *__address, _T __compare, _T __val) __NV_ATOMIC_REQUIRES(_T) {
202
+ __NV_ATOMIC_TEMP(_T);
203
+ __u128AtomicCAS((void *)(__address),
204
+ __NV_ATOMIC_ADDRESSOF(__compare),
205
+ __NV_ATOMIC_ADDRESSOF(__val),
206
+ __NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T)));
207
+ return __NV_ATOMIC_RET(_T);
208
+ }
209
+
210
+ template <typename _T>
211
+ __SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T)
212
+ atomicCAS_block(_T *__address, _T __compare, _T __val) __NV_ATOMIC_REQUIRES(_T) {
213
+ __NV_ATOMIC_TEMP(_T);
214
+ __u128AtomicCAS_block((void *)(__address),
215
+ __NV_ATOMIC_ADDRESSOF(__compare),
216
+ __NV_ATOMIC_ADDRESSOF(__val),
217
+ __NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T)));
218
+ return __NV_ATOMIC_RET(_T);
219
+ }
220
+
221
+ template <typename _T>
222
+ __SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T)
223
+ atomicExch(_T *__address, _T __val) __NV_ATOMIC_REQUIRES(_T) {
224
+ __NV_ATOMIC_TEMP(_T);
225
+ __u128AtomicExch((void *)(__address),
226
+ __NV_ATOMIC_ADDRESSOF(__val),
227
+ __NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T)));
228
+ return __NV_ATOMIC_RET(_T);
229
+ }
230
+
231
+ template <typename _T>
232
+ __SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T)
233
+ atomicExch_block(_T *__address, _T __val) __NV_ATOMIC_REQUIRES(_T) {
234
+ __NV_ATOMIC_TEMP(_T);
235
+ __u128AtomicExch_block((void *)(__address),
236
+ __NV_ATOMIC_ADDRESSOF(__val),
237
+ __NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T)));
238
+ return __NV_ATOMIC_RET(_T);
239
+ }
240
+ #endif /* !__NV_DISABLE_128_ATOMICS */
241
+
242
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 900 */
243
+
244
+ #endif /* __cplusplus && __CUDACC__ */
245
+
246
+ #undef __DEF_IF_HOST
247
+ #undef __SM_90_RT_DECL__
248
+
249
+ #if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)
250
+ #include "sm_90_rt.hpp"
251
+ #endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */
252
+
253
+ #endif /* !__SM_90_RT_H__ */
254
+
255
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_H__)
256
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
257
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_H__
258
+ #endif
259
+
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/storage_class.h ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * NVIDIA_COPYRIGHT_BEGIN
3
+ *
4
+ * Copyright (c) 2008-2018, NVIDIA CORPORATION. All rights reserved.
5
+ *
6
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
7
+ * and proprietary rights in and to this software, related documentation
8
+ * and any modifications thereto. Any use, reproduction, disclosure or
9
+ * distribution of this software and related documentation without an express
10
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
11
+ *
12
+ * NVIDIA_COPYRIGHT_END
13
+ */
14
+
15
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
16
+ #if defined(_MSC_VER)
17
+ #pragma message("crt/storage_class.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
18
+ #else
19
+ #warning "crt/storage_class.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
20
+ #endif
21
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
22
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_STORAGE_CLASS_H__
23
+ #endif
24
+
25
+ #if !defined(__STORAGE_CLASS_H__)
26
+ #define __STORAGE_CLASS_H__
27
+
28
+ #if !defined(__var_used__)
29
+
30
+ #define __var_used__
31
+
32
+ #endif /* __var_used__ */
33
+
34
+ #if !defined(__loc_sc__)
35
+
36
+ #define __loc_sc__(loc, size, sc) \
37
+ __storage##_##sc##size##loc loc
38
+
39
+ #endif /* !__loc_sc__ */
40
+
41
+ #if !defined(__storage___device__)
42
+ #define __storage___device__ static __var_used__
43
+ #endif /* __storage___device__ */
44
+
45
+ #if !defined(__storage_extern__device__)
46
+ #define __storage_extern__device__ static __var_used__
47
+ #endif /* __storage_extern__device__ */
48
+
49
+ #if !defined(__storage_auto__device__)
50
+ #define __storage_auto__device__ @@@ COMPILER @@@ ERROR @@@
51
+ #endif /* __storage_auto__device__ */
52
+
53
+ #if !defined(__storage_static__device__)
54
+ #define __storage_static__device__ static __var_used__
55
+ #endif /* __storage_static__device__ */
56
+
57
+ #if !defined(__storage___constant__)
58
+ #define __storage___constant__ static __var_used__
59
+ #endif /* __storage___constant__ */
60
+
61
+ #if !defined(__storage_extern__constant__)
62
+ #define __storage_extern__constant__ static __var_used__
63
+ #endif /* __storage_extern__constant__ */
64
+
65
+ #if !defined(__storage_auto__constant__)
66
+ #define __storage_auto__constant__ @@@ COMPILER @@@ ERROR @@@
67
+ #endif /* __storage_auto__constant__ */
68
+
69
+ #if !defined(__storage_static__constant__)
70
+ #define __storage_static__constant__ static __var_used__
71
+ #endif /* __storage_static__constant__ */
72
+
73
+ #if !defined(__storage___shared__)
74
+ #define __storage___shared__ static __var_used__
75
+ #endif /* __storage___shared__ */
76
+
77
+ #if !defined(__storage_extern__shared__)
78
+ #define __storage_extern__shared__ static __var_used__
79
+ #endif /* __storage_extern__shared__ */
80
+
81
+ #if !defined(__storage_auto__shared__)
82
+ #define __storage_auto__shared__ static
83
+ #endif /* __storage_auto__shared__ */
84
+
85
+ #if !defined(__storage_static__shared__)
86
+ #define __storage_static__shared__ static __var_used__
87
+ #endif /* __storage_static__shared__ */
88
+
89
+ #if !defined(__storage__unsized__shared__)
90
+ #define __storage__unsized__shared__ @@@ COMPILER @@@ ERROR @@@
91
+ #endif /* __storage__unsized__shared__ */
92
+
93
+ #if !defined(__storage_extern_unsized__shared__)
94
+ #define __storage_extern_unsized__shared__ static __var_used__
95
+ #endif /* __storage_extern_unsized__shared__ */
96
+
97
+ #if !defined(__storage_auto_unsized__shared__)
98
+ #define __storage_auto_unsized__shared__ @@@ COMPILER @@@ ERROR @@@
99
+ #endif /* __storage_auto_unsized__shared__ */
100
+
101
+ #if !defined(__storage_static_unsized__shared__)
102
+ #define __storage_static_unsized__shared__ @@@ COMPILER @@@ ERROR @@@
103
+ #endif /* __storage_static_unsized__shared__ */
104
+
105
+ #if !defined(__storage___text__)
106
+ #define __storage___text__ static __var_used__
107
+ #endif /* __storage___text__ */
108
+
109
+ #if !defined(__storage_extern__text__)
110
+ #define __storage_extern__text__ static __var_used__
111
+ #endif /* __storage_extern__text__ */
112
+
113
+ #if !defined(__storage_auto__text__)
114
+ #define __storage_auto__text__ @@@ COMPILER @@@ ERROR @@@
115
+ #endif /* __storage_auto__text__ */
116
+
117
+ #if !defined(__storage_static__text__)
118
+ #define __storage_static__text__ static __var_used__
119
+ #endif /* __storage_static__text__ */
120
+
121
+ #if !defined(__storage___surf__)
122
+ #define __storage___surf__ static __var_used__
123
+ #endif /* __storage___surf__ */
124
+
125
+ #if !defined(__storage_extern__surf__)
126
+ #define __storage_extern__surf__ static __var_used__
127
+ #endif /* __storage_extern__surf__ */
128
+
129
+ #if !defined(__storage_auto__surf__)
130
+ #define __storage_auto__surf__ @@@ COMPILER @@@ ERROR @@@
131
+ #endif /* __storage_auto__surf__ */
132
+
133
+ #if !defined(__storage_static__surf__)
134
+ #define __storage_static__surf__ static __var_used__
135
+ #endif /* __storage_static__surf__ */
136
+
137
+ #endif /* !__STORAGE_CLASS_H__ */
138
+
139
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_STORAGE_CLASS_H__)
140
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
141
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_STORAGE_CLASS_H__
142
+ #endif
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/addressof.h ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___MEMORY_ADDRESSOF_H
12
+ #define _LIBCUDACXX___MEMORY_ADDRESSOF_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif //__cuda_std__
17
+
18
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
19
+ #pragma GCC system_header
20
+ #endif
21
+
22
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
23
+
24
+ // addressof
25
+ // NVCXX has the builtin defined but did not mark it as supported
26
+ #if defined(_LIBCUDACXX_ADDRESSOF)
27
+
28
+ template <class _Tp>
29
+ inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
30
+ _LIBCUDACXX_NO_CFI _LIBCUDACXX_INLINE_VISIBILITY
31
+ _Tp*
32
+ addressof(_Tp& __x) _NOEXCEPT
33
+ {
34
+ return __builtin_addressof(__x);
35
+ }
36
+
37
+ #else
38
+
39
+ template <class _Tp>
40
+ inline _LIBCUDACXX_NO_CFI _LIBCUDACXX_INLINE_VISIBILITY
41
+ _Tp*
42
+ addressof(_Tp& __x) _NOEXCEPT
43
+ {
44
+ return reinterpret_cast<_Tp *>(
45
+ const_cast<char *>(&reinterpret_cast<const volatile char &>(__x)));
46
+ }
47
+
48
+ #endif // defined(_LIBCUDACXX_ADDRESSOF)
49
+
50
+ #if defined(_LIBCUDACXX_HAS_OBJC_ARC) && !defined(_LIBCUDACXX_PREDEFINED_OBJC_ARC_ADDRESSOF)
51
+ // Objective-C++ Automatic Reference Counting uses qualified pointers
52
+ // that require special addressof() signatures. When
53
+ // _LIBCUDACXX_PREDEFINED_OBJC_ARC_ADDRESSOF is defined, the compiler
54
+ // itself is providing these definitions. Otherwise, we provide them.
55
+ template <class _Tp>
56
+ inline _LIBCUDACXX_INLINE_VISIBILITY
57
+ __strong _Tp*
58
+ addressof(__strong _Tp& __x) _NOEXCEPT
59
+ {
60
+ return &__x;
61
+ }
62
+
63
+ #ifdef _LIBCUDACXX_HAS_OBJC_ARC_WEAK
64
+ template <class _Tp>
65
+ inline _LIBCUDACXX_INLINE_VISIBILITY
66
+ __weak _Tp*
67
+ addressof(__weak _Tp& __x) _NOEXCEPT
68
+ {
69
+ return &__x;
70
+ }
71
+ #endif
72
+
73
+ template <class _Tp>
74
+ inline _LIBCUDACXX_INLINE_VISIBILITY
75
+ __autoreleasing _Tp*
76
+ addressof(__autoreleasing _Tp& __x) _NOEXCEPT
77
+ {
78
+ return &__x;
79
+ }
80
+
81
+ template <class _Tp>
82
+ inline _LIBCUDACXX_INLINE_VISIBILITY
83
+ __unsafe_unretained _Tp*
84
+ addressof(__unsafe_unretained _Tp& __x) _NOEXCEPT
85
+ {
86
+ return &__x;
87
+ }
88
+ #endif
89
+
90
+ #if !defined(_LIBCUDACXX_CXX03_LANG)
91
+ template <class _Tp> _Tp* addressof(const _Tp&&) noexcept = delete;
92
+ #endif
93
+
94
+ _LIBCUDACXX_END_NAMESPACE_STD
95
+
96
+ #endif // _LIBCUDACXX___MEMORY_ADDRESSOF_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/construct_at.h ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___MEMORY_CONSTRUCT_AT_H
12
+ #define _LIBCUDACXX___MEMORY_CONSTRUCT_AT_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif //__cuda_std__
17
+
18
+ #include "../__assert"
19
+ #include "../__iterator/access.h"
20
+ #include "../__memory/addressof.h"
21
+ #include "../__memory/voidify.h"
22
+ #include "../__type_traits/enable_if.h"
23
+ #include "../__type_traits/is_array.h"
24
+ #include "../__type_traits/is_constant_evaluated.h"
25
+ #include "../__type_traits/is_trivially_move_assignable.h"
26
+ #include "../__type_traits/is_trivially_constructible.h"
27
+ #include "../__utility/forward.h"
28
+ #include "../__utility/move.h"
29
+
30
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
31
+ #pragma GCC system_header
32
+ #endif
33
+
34
+ #if defined(__cuda_std__) && _LIBCUDACXX_STD_VER > 17 // need to backfill ::std::construct_at
35
+ #ifndef _LIBCUDACXX_COMPILER_NVRTC
36
+ #include <memory>
37
+ #endif // _LIBCUDACXX_COMPILER_NVRTC
38
+
39
+ #ifndef __cpp_lib_constexpr_dynamic_alloc
40
+ namespace std {
41
+ template <class _Tp, class... _Args, class = decltype(::new(_CUDA_VSTD::declval<void*>()) _Tp(_CUDA_VSTD::declval<_Args>()...))>
42
+ _LIBCUDACXX_INLINE_VISIBILITY constexpr _Tp* construct_at(_Tp* __location, _Args&&... __args) {
43
+ #if defined(_LIBCUDACXX_ADDRESSOF)
44
+ return ::new (_CUDA_VSTD::__voidify(*__location)) _Tp(_CUDA_VSTD::forward<_Args>(__args)...);
45
+ #else
46
+ return ::new (const_cast<void*>(static_cast<const volatile void*>(__location))) _Tp(_CUDA_VSTD::forward<_Args>(__args)...);
47
+ #endif
48
+ }
49
+ } // namespace std
50
+ #endif // __cpp_lib_constexpr_dynamic_alloc
51
+ #endif // __cuda_std__ && _LIBCUDACXX_STD_VER > 17
52
+
53
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
54
+
55
+ // There is a performance issue with placement new, where EDG based compiler insert a nullptr check that is superfluous
56
+ // Because this is a noticable performance regression, we specialize for trivially constructible types
57
+ // This is possible because we are calling ::new ignoring any user defined overloads of operator placement new
58
+
59
+ // construct_at
60
+ #if _LIBCUDACXX_STD_VER > 17
61
+
62
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
63
+ template <class _Tp, class... _Args, class = decltype(::new(_CUDA_VSTD::declval<void*>()) _Tp(_CUDA_VSTD::declval<_Args>()...))>
64
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
65
+ __enable_if_t<!is_trivially_constructible_v<_Tp, _Args...> ||
66
+ !is_trivially_move_assignable_v<_Tp>, _Tp*>
67
+ construct_at(_Tp* __location, _Args&&... __args) {
68
+ _LIBCUDACXX_ASSERT(__location != nullptr, "null pointer given to construct_at");
69
+ #if defined(__cuda_std__)
70
+ // Need to go through `std::construct_at` as that is the explicitly blessed function
71
+ if (__libcpp_is_constant_evaluated()) {
72
+ return ::std::construct_at(__location, _CUDA_VSTD::forward<_Args>(__args)...);
73
+ }
74
+ #endif // __cuda_std__
75
+ return ::new (_CUDA_VSTD::__voidify(*__location)) _Tp(_CUDA_VSTD::forward<_Args>(__args)...);
76
+ }
77
+
78
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
79
+ template <class _Tp, class... _Args, class = decltype(::new(_CUDA_VSTD::declval<void*>()) _Tp(_CUDA_VSTD::declval<_Args>()...))>
80
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
81
+ __enable_if_t<is_trivially_constructible_v<_Tp, _Args...> &&
82
+ is_trivially_move_assignable_v<_Tp>, _Tp*>
83
+ construct_at(_Tp* __location, _Args&&... __args) {
84
+ _LIBCUDACXX_ASSERT(__location != nullptr, "null pointer given to construct_at");
85
+ #if defined(__cuda_std__)
86
+ // Need to go through `std::construct_at` as that is the explicitly blessed function
87
+ if (__libcpp_is_constant_evaluated()) {
88
+ return ::std::construct_at(__location, _CUDA_VSTD::forward<_Args>(__args)...);
89
+ }
90
+ *__location = _Tp{_CUDA_VSTD::forward<_Args>(__args)...};
91
+ return __location;
92
+ #else // ^^^ __cuda_std__ ^^^ / vvv !__cuda_std__ vvv
93
+ // NVCC always considers construction + move assignment, other compilers are smarter using copy construction
94
+ // So rather than adding all kinds of workarounds simply fall back to the correct implementation for libcxx mode
95
+ return ::new (_CUDA_VSTD::__voidify(*__location)) _Tp(_CUDA_VSTD::forward<_Args>(__args)...);
96
+ #endif // !__cuda_std__
97
+ }
98
+
99
+ #endif // _LIBCUDACXX_STD_VER > 17
100
+
101
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
102
+ template <class _Tp, class... _Args>
103
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
104
+ __enable_if_t<!_LIBCUDACXX_TRAIT(is_trivially_constructible, _Tp, _Args...) || !_LIBCUDACXX_TRAIT(is_trivially_move_assignable, _Tp), _Tp*>
105
+ __construct_at(_Tp* __location, _Args&&... __args) {
106
+ _LIBCUDACXX_ASSERT(__location != nullptr, "null pointer given to construct_at");
107
+ #if defined(__cuda_std__) && _LIBCUDACXX_STD_VER > 17
108
+ // Need to go through `std::construct_at` as that is the explicitly blessed function
109
+ if (__libcpp_is_constant_evaluated()) {
110
+ return ::std::construct_at(__location, _CUDA_VSTD::forward<_Args>(__args)...);
111
+ }
112
+ #endif // __cuda_std__ && _LIBCUDACXX_STD_VER > 17
113
+ return ::new (_CUDA_VSTD::__voidify(*__location)) _Tp(_CUDA_VSTD::forward<_Args>(__args)...);
114
+ }
115
+
116
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
117
+ template <class _Tp, class... _Args>
118
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
119
+ __enable_if_t<_LIBCUDACXX_TRAIT(is_trivially_constructible, _Tp, _Args...) && _LIBCUDACXX_TRAIT(is_trivially_move_assignable, _Tp), _Tp*>
120
+ __construct_at(_Tp* __location, _Args&&... __args) {
121
+ _LIBCUDACXX_ASSERT(__location != nullptr, "null pointer given to construct_at");
122
+ #if defined(__cuda_std__) && _LIBCUDACXX_STD_VER > 17
123
+ // Need to go through `std::construct_at` as that is the explicitly blessed function
124
+ if (__libcpp_is_constant_evaluated()) {
125
+ return ::std::construct_at(__location, _CUDA_VSTD::forward<_Args>(__args)...);
126
+ }
127
+ #endif // __cuda_std__ && _LIBCUDACXX_STD_VER > 17
128
+ *__location = _Tp{_CUDA_VSTD::forward<_Args>(__args)...};
129
+ return __location;
130
+ }
131
+
132
+ // destroy_at
133
+
134
+ // The internal functions are available regardless of the language version (with the exception of the `__destroy_at`
135
+ // taking an array).
136
+ template <class _ForwardIterator>
137
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
138
+ _ForwardIterator __destroy(_ForwardIterator, _ForwardIterator);
139
+
140
+ template <class _Tp, __enable_if_t<!is_array<_Tp>::value, int> = 0>
141
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
142
+ void __destroy_at(_Tp* __loc) {
143
+ _LIBCUDACXX_ASSERT(__loc != nullptr, "null pointer given to destroy_at");
144
+ __loc->~_Tp();
145
+ }
146
+
147
+ #if _LIBCUDACXX_STD_VER > 17
148
+ template <class _Tp, __enable_if_t<is_array<_Tp>::value, int> = 0>
149
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
150
+ void __destroy_at(_Tp* __loc) {
151
+ _LIBCUDACXX_ASSERT(__loc != nullptr, "null pointer given to destroy_at");
152
+ _CUDA_VSTD::__destroy(_CUDA_VSTD::begin(*__loc), _CUDA_VSTD::end(*__loc));
153
+ }
154
+ #endif
155
+
156
+ template <class _ForwardIterator>
157
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
158
+ _ForwardIterator __destroy(_ForwardIterator __first, _ForwardIterator __last) {
159
+ for (; __first != __last; ++__first)
160
+ _CUDA_VSTD::__destroy_at(_CUDA_VSTD::addressof(*__first));
161
+ return __first;
162
+ }
163
+
164
+ template <class _BidirectionalIterator>
165
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
166
+ _BidirectionalIterator __reverse_destroy(_BidirectionalIterator __first, _BidirectionalIterator __last) {
167
+ while (__last != __first) {
168
+ --__last;
169
+ _CUDA_VSTD::__destroy_at(_CUDA_VSTD::addressof(*__last));
170
+ }
171
+ return __last;
172
+ }
173
+
174
+ #if _LIBCUDACXX_STD_VER > 14
175
+
176
+ template <class _Tp, enable_if_t<!is_array_v<_Tp>, int> = 0>
177
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
178
+ void destroy_at(_Tp* __loc) {
179
+ _LIBCUDACXX_ASSERT(__loc != nullptr, "null pointer given to destroy_at");
180
+ __loc->~_Tp();
181
+ }
182
+
183
+ #if _LIBCUDACXX_STD_VER > 17
184
+ template <class _Tp, enable_if_t<is_array_v<_Tp>, int> = 0>
185
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
186
+ void destroy_at(_Tp* __loc) {
187
+ _CUDA_VSTD::__destroy_at(__loc);
188
+ }
189
+ #endif // _LIBCUDACXX_STD_VER > 17
190
+
191
+ template <class _ForwardIterator>
192
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
193
+ void destroy(_ForwardIterator __first, _ForwardIterator __last) {
194
+ (void)_CUDA_VSTD::__destroy(_CUDA_VSTD::move(__first), _CUDA_VSTD::move(__last));
195
+ }
196
+
197
+ template <class _ForwardIterator, class _Size>
198
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
199
+ _ForwardIterator destroy_n(_ForwardIterator __first, _Size __n) {
200
+ for (; __n > 0; (void)++__first, --__n)
201
+ _CUDA_VSTD::__destroy_at(_CUDA_VSTD::addressof(*__first));
202
+ return __first;
203
+ }
204
+
205
+ #endif // _LIBCUDACXX_STD_VER > 14
206
+
207
+ _LIBCUDACXX_END_NAMESPACE_STD
208
+
209
+ #endif // _LIBCUDACXX___MEMORY_CONSTRUCT_AT_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/pointer_traits.h ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___MEMORY_POINTER_TRAITS_H
11
+ #define _LIBCUDACXX___MEMORY_POINTER_TRAITS_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif //__cuda_std__
16
+
17
+ #include "../__memory/addressof.h"
18
+ #include "../__type_traits/conjunction.h"
19
+ #include "../__type_traits/conditional.h"
20
+ #include "../__type_traits/decay.h"
21
+ #include "../__type_traits/enable_if.h"
22
+ #include "../__type_traits/integral_constant.h"
23
+ #include "../__type_traits/is_class.h"
24
+ #include "../__type_traits/is_function.h"
25
+ #include "../__type_traits/is_void.h"
26
+ #include "../__type_traits/void_t.h"
27
+ #include "../__utility/declval.h"
28
+ #include "../cstddef"
29
+
30
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
31
+ #pragma GCC system_header
32
+ #endif
33
+
34
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
35
+
36
+ template <class _Tp, class = void>
37
+ struct __has_element_type : false_type {};
38
+
39
+ template <class _Tp>
40
+ struct __has_element_type<_Tp,
41
+ __void_t<typename _Tp::element_type>> : true_type {};
42
+
43
+ template <class _Ptr, bool = __has_element_type<_Ptr>::value>
44
+ struct __pointer_traits_element_type;
45
+
46
+ template <class _Ptr>
47
+ struct __pointer_traits_element_type<_Ptr, true>
48
+ {
49
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename _Ptr::element_type type;
50
+ };
51
+
52
+ #ifndef _LIBCUDACXX_HAS_NO_VARIADICS
53
+
54
+ template <template <class, class...> class _Sp, class _Tp, class ..._Args>
55
+ struct __pointer_traits_element_type<_Sp<_Tp, _Args...>, true>
56
+ {
57
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename _Sp<_Tp, _Args...>::element_type type;
58
+ };
59
+
60
+ template <template <class, class...> class _Sp, class _Tp, class ..._Args>
61
+ struct __pointer_traits_element_type<_Sp<_Tp, _Args...>, false>
62
+ {
63
+ typedef _LIBCUDACXX_NODEBUG_TYPE _Tp type;
64
+ };
65
+
66
+ #else // _LIBCUDACXX_HAS_NO_VARIADICS
67
+
68
+ template <template <class> class _Sp, class _Tp>
69
+ struct __pointer_traits_element_type<_Sp<_Tp>, true>
70
+ {
71
+ typedef typename _Sp<_Tp>::element_type type;
72
+ };
73
+
74
+ template <template <class> class _Sp, class _Tp>
75
+ struct __pointer_traits_element_type<_Sp<_Tp>, false>
76
+ {
77
+ typedef _Tp type;
78
+ };
79
+
80
+ template <template <class, class> class _Sp, class _Tp, class _A0>
81
+ struct __pointer_traits_element_type<_Sp<_Tp, _A0>, true>
82
+ {
83
+ typedef typename _Sp<_Tp, _A0>::element_type type;
84
+ };
85
+
86
+ template <template <class, class> class _Sp, class _Tp, class _A0>
87
+ struct __pointer_traits_element_type<_Sp<_Tp, _A0>, false>
88
+ {
89
+ typedef _Tp type;
90
+ };
91
+
92
+ template <template <class, class, class> class _Sp, class _Tp, class _A0, class _A1>
93
+ struct __pointer_traits_element_type<_Sp<_Tp, _A0, _A1>, true>
94
+ {
95
+ typedef typename _Sp<_Tp, _A0, _A1>::element_type type;
96
+ };
97
+
98
+ template <template <class, class, class> class _Sp, class _Tp, class _A0, class _A1>
99
+ struct __pointer_traits_element_type<_Sp<_Tp, _A0, _A1>, false>
100
+ {
101
+ typedef _Tp type;
102
+ };
103
+
104
+ template <template <class, class, class, class> class _Sp, class _Tp, class _A0,
105
+ class _A1, class _A2>
106
+ struct __pointer_traits_element_type<_Sp<_Tp, _A0, _A1, _A2>, true>
107
+ {
108
+ typedef typename _Sp<_Tp, _A0, _A1, _A2>::element_type type;
109
+ };
110
+
111
+ template <template <class, class, class, class> class _Sp, class _Tp, class _A0,
112
+ class _A1, class _A2>
113
+ struct __pointer_traits_element_type<_Sp<_Tp, _A0, _A1, _A2>, false>
114
+ {
115
+ typedef _Tp type;
116
+ };
117
+
118
+ #endif // _LIBCUDACXX_HAS_NO_VARIADICS
119
+
120
+ template <class _Tp, class = void>
121
+ struct __has_difference_type : false_type {};
122
+
123
+ template <class _Tp>
124
+ struct __has_difference_type<_Tp,
125
+ __void_t<typename _Tp::difference_type>> : true_type {};
126
+
127
+ template <class _Ptr, bool = __has_difference_type<_Ptr>::value>
128
+ struct __pointer_traits_difference_type
129
+ {
130
+ typedef _LIBCUDACXX_NODEBUG_TYPE ptrdiff_t type;
131
+ };
132
+
133
+ template <class _Ptr>
134
+ struct __pointer_traits_difference_type<_Ptr, true>
135
+ {
136
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename _Ptr::difference_type type;
137
+ };
138
+
139
+ template <class _Tp, class _Up>
140
+ struct __has_rebind
141
+ {
142
+ private:
143
+ template <class _Xp> _LIBCUDACXX_INLINE_VISIBILITY static false_type __test(...);
144
+ _LIBCUDACXX_SUPPRESS_DEPRECATED_PUSH
145
+ template <class _Xp> _LIBCUDACXX_INLINE_VISIBILITY static true_type __test(typename _Xp::template rebind<_Up>* = 0);
146
+ _LIBCUDACXX_SUPPRESS_DEPRECATED_POP
147
+ public:
148
+ static const bool value = decltype(__test<_Tp>(0))::value;
149
+ };
150
+
151
+ template <class _Tp, class _Up, bool = __has_rebind<_Tp, _Up>::value>
152
+ struct __pointer_traits_rebind
153
+ {
154
+ #ifndef _LIBCUDACXX_CXX03_LANG
155
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename _Tp::template rebind<_Up> type;
156
+ #else
157
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename _Tp::template rebind<_Up>::other type;
158
+ #endif
159
+ };
160
+
161
+ #ifndef _LIBCUDACXX_HAS_NO_VARIADICS
162
+
163
+ template <template <class, class...> class _Sp, class _Tp, class ..._Args, class _Up>
164
+ struct __pointer_traits_rebind<_Sp<_Tp, _Args...>, _Up, true>
165
+ {
166
+ #ifndef _LIBCUDACXX_CXX03_LANG
167
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename _Sp<_Tp, _Args...>::template rebind<_Up> type;
168
+ #else
169
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename _Sp<_Tp, _Args...>::template rebind<_Up>::other type;
170
+ #endif
171
+ };
172
+
173
+ template <template <class, class...> class _Sp, class _Tp, class ..._Args, class _Up>
174
+ struct __pointer_traits_rebind<_Sp<_Tp, _Args...>, _Up, false>
175
+ {
176
+ typedef _Sp<_Up, _Args...> type;
177
+ };
178
+
179
+ #else // _LIBCUDACXX_HAS_NO_VARIADICS
180
+
181
+ template <template <class> class _Sp, class _Tp, class _Up>
182
+ struct __pointer_traits_rebind<_Sp<_Tp>, _Up, true>
183
+ {
184
+ #ifndef _LIBCUDACXX_CXX03_LANG
185
+ typedef typename _Sp<_Tp>::template rebind<_Up> type;
186
+ #else
187
+ typedef typename _Sp<_Tp>::template rebind<_Up>::other type;
188
+ #endif
189
+ };
190
+
191
+ template <template <class> class _Sp, class _Tp, class _Up>
192
+ struct __pointer_traits_rebind<_Sp<_Tp>, _Up, false>
193
+ {
194
+ typedef _Sp<_Up> type;
195
+ };
196
+
197
+ template <template <class, class> class _Sp, class _Tp, class _A0, class _Up>
198
+ struct __pointer_traits_rebind<_Sp<_Tp, _A0>, _Up, true>
199
+ {
200
+ #ifndef _LIBCUDACXX_CXX03_LANG
201
+ typedef typename _Sp<_Tp, _A0>::template rebind<_Up> type;
202
+ #else
203
+ typedef typename _Sp<_Tp, _A0>::template rebind<_Up>::other type;
204
+ #endif
205
+ };
206
+
207
+ template <template <class, class> class _Sp, class _Tp, class _A0, class _Up>
208
+ struct __pointer_traits_rebind<_Sp<_Tp, _A0>, _Up, false>
209
+ {
210
+ typedef _Sp<_Up, _A0> type;
211
+ };
212
+
213
+ template <template <class, class, class> class _Sp, class _Tp, class _A0,
214
+ class _A1, class _Up>
215
+ struct __pointer_traits_rebind<_Sp<_Tp, _A0, _A1>, _Up, true>
216
+ {
217
+ #ifndef _LIBCUDACXX_CXX03_LANG
218
+ typedef typename _Sp<_Tp, _A0, _A1>::template rebind<_Up> type;
219
+ #else
220
+ typedef typename _Sp<_Tp, _A0, _A1>::template rebind<_Up>::other type;
221
+ #endif
222
+ };
223
+
224
+ template <template <class, class, class> class _Sp, class _Tp, class _A0,
225
+ class _A1, class _Up>
226
+ struct __pointer_traits_rebind<_Sp<_Tp, _A0, _A1>, _Up, false>
227
+ {
228
+ typedef _Sp<_Up, _A0, _A1> type;
229
+ };
230
+
231
+ template <template <class, class, class, class> class _Sp, class _Tp, class _A0,
232
+ class _A1, class _A2, class _Up>
233
+ struct __pointer_traits_rebind<_Sp<_Tp, _A0, _A1, _A2>, _Up, true>
234
+ {
235
+ #ifndef _LIBCUDACXX_CXX03_LANG
236
+ typedef typename _Sp<_Tp, _A0, _A1, _A2>::template rebind<_Up> type;
237
+ #else
238
+ typedef typename _Sp<_Tp, _A0, _A1, _A2>::template rebind<_Up>::other type;
239
+ #endif
240
+ };
241
+
242
+ template <template <class, class, class, class> class _Sp, class _Tp, class _A0,
243
+ class _A1, class _A2, class _Up>
244
+ struct __pointer_traits_rebind<_Sp<_Tp, _A0, _A1, _A2>, _Up, false>
245
+ {
246
+ typedef _Sp<_Up, _A0, _A1, _A2> type;
247
+ };
248
+
249
+ #endif // _LIBCUDACXX_HAS_NO_VARIADICS
250
+
251
+ template <class _Ptr>
252
+ struct _LIBCUDACXX_TEMPLATE_VIS pointer_traits
253
+ {
254
+ typedef _Ptr pointer;
255
+ typedef typename __pointer_traits_element_type<pointer>::type element_type;
256
+ typedef typename __pointer_traits_difference_type<pointer>::type difference_type;
257
+
258
+ #ifndef _LIBCUDACXX_CXX03_LANG
259
+ template <class _Up> using rebind = typename __pointer_traits_rebind<pointer, _Up>::type;
260
+ #else
261
+ template <class _Up> struct rebind
262
+ {typedef typename __pointer_traits_rebind<pointer, _Up>::type other;};
263
+ #endif // _LIBCUDACXX_CXX03_LANG
264
+
265
+ private:
266
+ struct __nat {};
267
+ public:
268
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
269
+ static pointer pointer_to(__conditional_t<is_void<element_type>::value, __nat, element_type>& __r)
270
+ {return pointer::pointer_to(__r);}
271
+ };
272
+
273
+ template <class _Tp>
274
+ struct _LIBCUDACXX_TEMPLATE_VIS pointer_traits<_Tp*>
275
+ {
276
+ typedef _Tp* pointer;
277
+ typedef _Tp element_type;
278
+ typedef ptrdiff_t difference_type;
279
+
280
+ #ifndef _LIBCUDACXX_CXX03_LANG
281
+ template <class _Up> using rebind = _Up*;
282
+ #else
283
+ template <class _Up> struct rebind {typedef _Up* other;};
284
+ #endif
285
+
286
+ private:
287
+ struct __nat {};
288
+ public:
289
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
290
+ static pointer pointer_to(__conditional_t<is_void<element_type>::value, __nat, element_type>& __r) _NOEXCEPT
291
+ {return _CUDA_VSTD::addressof(__r);}
292
+ };
293
+
294
+ template <class _From, class _To>
295
+ struct __rebind_pointer {
296
+ #ifndef _LIBCUDACXX_CXX03_LANG
297
+ typedef typename pointer_traits<_From>::template rebind<_To> type;
298
+ #else
299
+ typedef typename pointer_traits<_From>::template rebind<_To>::other type;
300
+ #endif
301
+ };
302
+
303
+ // to_address
304
+
305
+ template <class _Pointer, class = void>
306
+ struct __to_address_helper;
307
+
308
+ template <class _Tp>
309
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR
310
+ _Tp* __to_address(_Tp* __p) _NOEXCEPT {
311
+ static_assert(!is_function<_Tp>::value, "_Tp is a function type");
312
+ return __p;
313
+ }
314
+
315
+ template <class _Pointer, class = void>
316
+ struct _HasToAddress : false_type {};
317
+
318
+ template <class _Pointer>
319
+ struct _HasToAddress<_Pointer,
320
+ decltype((void)pointer_traits<_Pointer>::to_address(declval<const _Pointer&>()))
321
+ > : true_type {};
322
+
323
+ template <class _Pointer, class = void>
324
+ struct _HasArrow : false_type {};
325
+
326
+ template <class _Pointer>
327
+ struct _HasArrow<_Pointer,
328
+ decltype((void)declval<const _Pointer&>().operator->())
329
+ > : true_type {};
330
+
331
+ template <class _Pointer>
332
+ struct _IsFancyPointer {
333
+ static const bool value = _HasArrow<_Pointer>::value || _HasToAddress<_Pointer>::value;
334
+ };
335
+
336
+ // enable_if is needed here to avoid instantiating checks for fancy pointers on raw pointers
337
+ template <class _Pointer, class = __enable_if_t<
338
+ _And<is_class<_Pointer>, _IsFancyPointer<_Pointer> >::value
339
+ > >
340
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR
341
+ __decay_t<decltype(__to_address_helper<_Pointer>::__call(declval<const _Pointer&>()))>
342
+ __to_address(const _Pointer& __p) _NOEXCEPT {
343
+ return __to_address_helper<_Pointer>::__call(__p);
344
+ }
345
+
346
+ template <class _Pointer, class>
347
+ struct __to_address_helper {
348
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR
349
+ static decltype(_CUDA_VSTD::__to_address(declval<const _Pointer&>().operator->()))
350
+ __call(const _Pointer& __p) _NOEXCEPT {
351
+ return _CUDA_VSTD::__to_address(__p.operator->());
352
+ }
353
+ };
354
+
355
+ template <class _Pointer>
356
+ struct __to_address_helper<_Pointer, decltype((void)pointer_traits<_Pointer>::to_address(declval<const _Pointer&>()))> {
357
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR
358
+ static decltype(pointer_traits<_Pointer>::to_address(declval<const _Pointer&>()))
359
+ __call(const _Pointer& __p) _NOEXCEPT {
360
+ return pointer_traits<_Pointer>::to_address(__p);
361
+ }
362
+ };
363
+
364
+ #if _LIBCUDACXX_STD_VER > 11
365
+ template <class _Tp>
366
+ inline _LIBCUDACXX_INLINE_VISIBILITY constexpr
367
+ auto to_address(_Tp *__p) noexcept {
368
+ return _CUDA_VSTD::__to_address(__p);
369
+ }
370
+
371
+ template <class _Pointer>
372
+ inline _LIBCUDACXX_INLINE_VISIBILITY constexpr
373
+ auto to_address(const _Pointer& __p) noexcept -> decltype(_CUDA_VSTD::__to_address(__p)) {
374
+ return _CUDA_VSTD::__to_address(__p);
375
+ }
376
+ #endif
377
+
378
+ _LIBCUDACXX_END_NAMESPACE_STD
379
+
380
+ #endif // _LIBCUDACXX___MEMORY_POINTER_TRAITS_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__memory/voidify.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___MEMORY_VOIDIFY_H
12
+ #define _LIBCUDACXX___MEMORY_VOIDIFY_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif //__cuda_std__
17
+
18
+ #include "../__memory/addressof.h"
19
+
20
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
21
+ #pragma GCC system_header
22
+ #endif
23
+
24
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
25
+
26
+ template <typename _Tp>
27
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 void* __voidify(_Tp& __from) {
28
+ // Cast away cv-qualifiers to allow modifying elements of a range through const iterators.
29
+ return const_cast<void*>(static_cast<const volatile void*>(_CUDA_VSTD::addressof(__from)));
30
+ }
31
+
32
+ _LIBCUDACXX_END_NAMESPACE_STD
33
+
34
+ #endif // _LIBCUDACXX___MEMORY_VOIDIFY_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_const.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ADD_CONST_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ADD_CONST_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
18
+ #pragma GCC system_header
19
+ #endif
20
+
21
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
22
+
23
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS add_const {
24
+ typedef _LIBCUDACXX_NODEBUG_TYPE const _Tp type;
25
+ };
26
+
27
+ #if _LIBCUDACXX_STD_VER > 11
28
+ template <class _Tp> using add_const_t = typename add_const<_Tp>::type;
29
+ #endif
30
+
31
+ _LIBCUDACXX_END_NAMESPACE_STD
32
+
33
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ADD_CONST_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_cv.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ADD_CV_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ADD_CV_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
18
+ #pragma GCC system_header
19
+ #endif
20
+
21
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
22
+
23
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS add_cv {
24
+ typedef _LIBCUDACXX_NODEBUG_TYPE const volatile _Tp type;
25
+ };
26
+
27
+ #if _LIBCUDACXX_STD_VER > 11
28
+ template <class _Tp> using add_cv_t = typename add_cv<_Tp>::type;
29
+ #endif
30
+
31
+ _LIBCUDACXX_END_NAMESPACE_STD
32
+
33
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ADD_CV_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_lvalue_reference.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ADD_LVALUE_REFERENCE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ADD_LVALUE_REFERENCE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/is_referenceable.h"
18
+
19
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
20
+ #pragma GCC system_header
21
+ #endif
22
+
23
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
24
+
25
+ #if defined(_LIBCUDACXX_ADD_LVALUE_REFERENCE) && !defined(_LIBCUDACXX_USE_ADD_LVALUE_REFERENCE_FALLBACK)
26
+
27
+ template <class _Tp>
28
+ using __add_lvalue_reference_t = _LIBCUDACXX_ADD_LVALUE_REFERENCE(_Tp);
29
+
30
+ #else
31
+
32
+ template <class _Tp, bool = __libcpp_is_referenceable<_Tp>::value>
33
+ struct __add_lvalue_reference_impl {
34
+ typedef _LIBCUDACXX_NODEBUG_TYPE _Tp type;
35
+ };
36
+ template <class _Tp >
37
+ struct __add_lvalue_reference_impl<_Tp, true> {
38
+ typedef _LIBCUDACXX_NODEBUG_TYPE _Tp& type;
39
+ };
40
+
41
+ template <class _Tp>
42
+ using __add_lvalue_reference_t = typename __add_lvalue_reference_impl<_Tp>::type;
43
+
44
+ #endif // defined(_LIBCUDACXX_ADD_LVALUE_REFERENCE) && !defined(_LIBCUDACXX_USE_ADD_LVALUE_REFERENCE_FALLBACK)
45
+
46
+ template <class _Tp>
47
+ struct add_lvalue_reference {
48
+ using type _LIBCUDACXX_NODEBUG_TYPE = __add_lvalue_reference_t<_Tp>;
49
+ };
50
+
51
+ #if _LIBCUDACXX_STD_VER > 11
52
+ template <class _Tp> using add_lvalue_reference_t = __add_lvalue_reference_t<_Tp>;
53
+ #endif
54
+
55
+ _LIBCUDACXX_END_NAMESPACE_STD
56
+
57
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ADD_LVALUE_REFERENCE_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_pointer.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ADD_POINTER_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ADD_POINTER_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/is_referenceable.h"
18
+ #include "../__type_traits/is_same.h"
19
+ #include "../__type_traits/is_void.h"
20
+ #include "../__type_traits/remove_cv.h"
21
+ #include "../__type_traits/remove_reference.h"
22
+
23
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
24
+ #pragma GCC system_header
25
+ #endif
26
+
27
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
28
+
29
+ #if defined(_LIBCUDACXX_ADD_POINTER) && !defined(_LIBCUDACXX_USE_ADD_POINTER_FALLBACK)
30
+
31
+ template <class _Tp>
32
+ using __add_pointer_t = _LIBCUDACXX_ADD_POINTER(_Tp);
33
+
34
+ #else
35
+ template <class _Tp,
36
+ bool = __libcpp_is_referenceable<_Tp>::value || is_void<_Tp>::value>
37
+ struct __add_pointer_impl {
38
+ typedef _LIBCUDACXX_NODEBUG_TYPE __libcpp_remove_reference_t<_Tp>* type;
39
+ };
40
+ template <class _Tp> struct __add_pointer_impl<_Tp, false>
41
+ {typedef _LIBCUDACXX_NODEBUG_TYPE _Tp type;};
42
+
43
+ template <class _Tp>
44
+ using __add_pointer_t = typename __add_pointer_impl<_Tp>::type;
45
+
46
+ #endif // defined(_LIBCUDACXX_ADD_POINTER) && !defined(_LIBCUDACXX_USE_ADD_POINTER_FALLBACK)
47
+
48
+ template <class _Tp>
49
+ struct add_pointer {
50
+ using type _LIBCUDACXX_NODEBUG_TYPE = __add_pointer_t<_Tp>;
51
+ };
52
+
53
+ #if _LIBCUDACXX_STD_VER > 11
54
+ template <class _Tp> using add_pointer_t = __add_pointer_t<_Tp>;
55
+ #endif
56
+
57
+ _LIBCUDACXX_END_NAMESPACE_STD
58
+
59
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ADD_POINTER_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/add_volatile.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ADD_VOLATILE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ADD_VOLATILE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
18
+ #pragma GCC system_header
19
+ #endif
20
+
21
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
22
+
23
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS add_volatile {
24
+ typedef _LIBCUDACXX_NODEBUG_TYPE volatile _Tp type;
25
+ };
26
+
27
+ #if _LIBCUDACXX_STD_VER > 11
28
+ template <class _Tp> using add_volatile_t = typename add_volatile<_Tp>::type;
29
+ #endif
30
+
31
+ _LIBCUDACXX_END_NAMESPACE_STD
32
+
33
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ADD_VOLATILE_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/aligned_storage.h ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ //
7
+ //===----------------------------------------------------------------------===//
8
+
9
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ALIGNED_STORAGE_H
10
+ #define _LIBCUDACXX___TYPE_TRAITS_ALIGNED_STORAGE_H
11
+
12
+ #ifndef __cuda_std__
13
+ #include <__config>
14
+ #endif // __cuda_std__
15
+
16
+ #include "../__type_traits/conditional.h"
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../__type_traits/nat.h"
19
+ #include "../__type_traits/type_list.h"
20
+ #include "../cstddef"
21
+
22
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
23
+ #pragma GCC system_header
24
+ #endif
25
+
26
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
27
+
28
+ template <class _Tp>
29
+ struct __align_type
30
+ {
31
+ static const size_t value = _LIBCUDACXX_PREFERRED_ALIGNOF(_Tp);
32
+ typedef _Tp type;
33
+ };
34
+
35
+ struct __struct_double {long double __lx;};
36
+ struct __struct_double4 {double __lx[4];};
37
+
38
+ typedef
39
+ __type_list<__align_type<unsigned char>,
40
+ __type_list<__align_type<unsigned short>,
41
+ __type_list<__align_type<unsigned int>,
42
+ __type_list<__align_type<unsigned long>,
43
+ __type_list<__align_type<unsigned long long>,
44
+ __type_list<__align_type<double>,
45
+ __type_list<__align_type<long double>,
46
+ __type_list<__align_type<__struct_double>,
47
+ __type_list<__align_type<__struct_double4>,
48
+ __type_list<__align_type<int*>,
49
+ __nat
50
+ > > > > > > > > > > __all_types;
51
+
52
+ template <size_t _Align>
53
+ struct _ALIGNAS(_Align) __fallback_overaligned {};
54
+
55
+ template <class _TL, size_t _Align> struct __find_pod;
56
+
57
+ template <class _Hp, size_t _Align>
58
+ struct __find_pod<__type_list<_Hp, __nat>, _Align>
59
+ {
60
+ typedef __conditional_t<_Align == _Hp::value, typename _Hp::type, __fallback_overaligned<_Align> > type;
61
+ };
62
+
63
+ template <class _Hp, class _Tp, size_t _Align>
64
+ struct __find_pod<__type_list<_Hp, _Tp>, _Align>
65
+ {
66
+ typedef __conditional_t<_Align == _Hp::value, typename _Hp::type, typename __find_pod<_Tp, _Align>::type> type;
67
+ };
68
+
69
+ template <class _TL, size_t _Len> struct __find_max_align;
70
+
71
+ template <class _Hp, size_t _Len>
72
+ struct __find_max_align<__type_list<_Hp, __nat>, _Len> : public integral_constant<size_t, _Hp::value> {};
73
+
74
+ template <size_t _Len, size_t _A1, size_t _A2>
75
+ struct __select_align
76
+ {
77
+ private:
78
+ static const size_t __min = _A2 < _A1 ? _A2 : _A1;
79
+ static const size_t __max = _A1 < _A2 ? _A2 : _A1;
80
+ public:
81
+ static const size_t value = _Len < __max ? __min : __max;
82
+ };
83
+
84
+ template <class _Hp, class _Tp, size_t _Len>
85
+ struct __find_max_align<__type_list<_Hp, _Tp>, _Len>
86
+ : public integral_constant<size_t, __select_align<_Len, _Hp::value, __find_max_align<_Tp, _Len>::value>::value> {};
87
+
88
+ template <size_t _Len, size_t _Align = __find_max_align<__all_types, _Len>::value>
89
+ struct _LIBCUDACXX_TEMPLATE_VIS aligned_storage
90
+ {
91
+ typedef typename __find_pod<__all_types, _Align>::type _Aligner;
92
+ union type
93
+ {
94
+ _Aligner __align;
95
+ unsigned char __data[(_Len + _Align - 1)/_Align * _Align];
96
+ };
97
+ };
98
+
99
+ #if _LIBCUDACXX_STD_VER > 11
100
+ template <size_t _Len, size_t _Align = __find_max_align<__all_types, _Len>::value>
101
+ using aligned_storage_t = typename aligned_storage<_Len, _Align>::type;
102
+ #endif
103
+
104
+ #define _CREATE_ALIGNED_STORAGE_SPECIALIZATION(n) \
105
+ template <size_t _Len>\
106
+ struct _LIBCUDACXX_TEMPLATE_VIS aligned_storage<_Len, n>\
107
+ {\
108
+ struct _ALIGNAS(n) type\
109
+ {\
110
+ unsigned char __lx[(_Len + n - 1)/n * n];\
111
+ };\
112
+ }
113
+
114
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x1);
115
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x2);
116
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x4);
117
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x8);
118
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x10);
119
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x20);
120
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x40);
121
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x80);
122
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x100);
123
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x200);
124
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x400);
125
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x800);
126
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x1000);
127
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x2000);
128
+ // PE/COFF does not support alignment beyond 8192 (=0x2000)
129
+ #if !defined(_LIBCUDACXX_OBJECT_FORMAT_COFF)
130
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x4000);
131
+ #endif // !defined(_LIBCUDACXX_OBJECT_FORMAT_COFF)
132
+
133
+ #undef _CREATE_ALIGNED_STORAGE_SPECIALIZATION
134
+
135
+ _LIBCUDACXX_END_NAMESPACE_STD
136
+
137
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ALIGNED_STORAGE_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/alignment_of.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ALIGNMENT_OF_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ALIGNMENT_OF_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../cstddef"
19
+
20
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
21
+ #pragma GCC system_header
22
+ #endif
23
+
24
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
25
+
26
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS alignment_of
27
+ : public integral_constant<size_t, _LIBCUDACXX_ALIGNOF(_Tp)> {};
28
+
29
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
30
+ template <class _Tp>
31
+ _LIBCUDACXX_INLINE_VAR constexpr size_t alignment_of_v = _LIBCUDACXX_ALIGNOF(_Tp);
32
+ #endif
33
+
34
+ _LIBCUDACXX_END_NAMESPACE_STD
35
+
36
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ALIGNMENT_OF_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/can_extract_key.h ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_CAN_EXTRACT_KEY_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_CAN_EXTRACT_KEY_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__fwd/pair.h"
18
+ #include "../__type_traits/conditional.h"
19
+ #include "../__type_traits/integral_constant.h"
20
+ #include "../__type_traits/is_same.h"
21
+ #include "../__type_traits/remove_const.h"
22
+ #include "../__type_traits/remove_const_ref.h"
23
+
24
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
25
+ #pragma GCC system_header
26
+ #endif
27
+
28
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
29
+
30
+ // These traits are used in __tree and __hash_table
31
+ struct __extract_key_fail_tag {};
32
+ struct __extract_key_self_tag {};
33
+ struct __extract_key_first_tag {};
34
+
35
+ template <class _ValTy, class _Key, class _RawValTy = __remove_const_ref_t<_ValTy> >
36
+ struct __can_extract_key
37
+ : __conditional_t<_IsSame<_RawValTy, _Key>::value, __extract_key_self_tag, __extract_key_fail_tag> {};
38
+
39
+ template <class _Pair, class _Key, class _First, class _Second>
40
+ struct __can_extract_key<_Pair, _Key, pair<_First, _Second> >
41
+ : __conditional_t<_IsSame<__remove_const_t<_First>, _Key>::value, __extract_key_first_tag, __extract_key_fail_tag> {
42
+ };
43
+
44
+ // __can_extract_map_key uses true_type/false_type instead of the tags.
45
+ // It returns true if _Key != _ContainerValueTy (the container is a map not a set)
46
+ // and _ValTy == _Key.
47
+ template <class _ValTy, class _Key, class _ContainerValueTy,
48
+ class _RawValTy = __remove_const_ref_t<_ValTy> >
49
+ struct __can_extract_map_key
50
+ : integral_constant<bool, _IsSame<_RawValTy, _Key>::value> {};
51
+
52
+ // This specialization returns __extract_key_fail_tag for non-map containers
53
+ // because _Key == _ContainerValueTy
54
+ template <class _ValTy, class _Key, class _RawValTy>
55
+ struct __can_extract_map_key<_ValTy, _Key, _Key, _RawValTy>
56
+ : false_type {};
57
+
58
+ _LIBCUDACXX_END_NAMESPACE_STD
59
+
60
+ #endif // _LIBCUDACXX___TYPE_TRAITS_CAN_EXTRACT_KEY_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/common_type.h ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_COMMON_TYPE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_COMMON_TYPE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/decay.h"
18
+ #include "../__type_traits/is_same.h"
19
+ #include "../__type_traits/remove_cvref.h"
20
+ #include "../__type_traits/void_t.h"
21
+ #include "../__utility/declval.h"
22
+
23
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
24
+ #pragma GCC system_header
25
+ #endif
26
+
27
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
28
+
29
+ template <class... _Tp>
30
+ struct _LIBCUDACXX_TEMPLATE_VIS common_type;
31
+
32
+ template <class ..._Tp>
33
+ using __common_type_t = typename common_type<_Tp...>::type;
34
+
35
+ // Let COND_RES(X, Y) be:
36
+ template <class _Tp, class _Up>
37
+ using __cond_type = decltype(false ? declval<_Tp>() : declval<_Up>());
38
+
39
+ #if _LIBCUDACXX_STD_VER > 17
40
+ template <class _Tp, class _Up, class = void>
41
+ struct __common_type3 {};
42
+
43
+ // sub-bullet 4 - "if COND_RES(CREF(D1), CREF(D2)) denotes a type..."
44
+ template <class _Tp, class _Up>
45
+ struct __common_type3<_Tp, _Up, void_t<__cond_type<const _Tp&, const _Up&>>>
46
+ {
47
+ using type = remove_cvref_t<__cond_type<const _Tp&, const _Up&>>;
48
+ };
49
+
50
+ template <class _Tp, class _Up, class = void>
51
+ struct __common_type2_imp : __common_type3<_Tp, _Up> {};
52
+ #else
53
+ template <class _Tp, class _Up, class = void>
54
+ struct __common_type2_imp {};
55
+ #endif
56
+
57
+ // sub-bullet 3 - "if decay_t<decltype(false ? declval<D1>() : declval<D2>())> ..."
58
+ template <class _Tp, class _Up>
59
+ struct __common_type2_imp<_Tp, _Up, __void_t<__cond_type<_Tp, _Up>>>
60
+ {
61
+ typedef _LIBCUDACXX_NODEBUG_TYPE __decay_t<__cond_type<_Tp, _Up>> type;
62
+ };
63
+
64
+ template <class, class = void>
65
+ struct __common_type_impl {};
66
+
67
+ template <class... _Tp>
68
+ struct __common_types;
69
+
70
+ template <class _Tp, class _Up>
71
+ struct __common_type_impl<
72
+ __common_types<_Tp, _Up>, __void_t<__common_type_t<_Tp, _Up>> >
73
+ {
74
+ typedef __common_type_t<_Tp, _Up> type;
75
+ };
76
+
77
+ template <class _Tp, class _Up, class _Vp, class... _Rest>
78
+ struct __common_type_impl<__common_types<_Tp, _Up, _Vp, _Rest...>, __void_t<__common_type_t<_Tp, _Up>> >
79
+ : __common_type_impl<__common_types<__common_type_t<_Tp, _Up>, _Vp, _Rest...>> {};
80
+
81
+ // bullet 1 - sizeof...(Tp) == 0
82
+
83
+ template <>
84
+ struct _LIBCUDACXX_TEMPLATE_VIS common_type<> {};
85
+
86
+ // bullet 2 - sizeof...(Tp) == 1
87
+
88
+ template <class _Tp>
89
+ struct _LIBCUDACXX_TEMPLATE_VIS common_type<_Tp>
90
+ : public common_type<_Tp, _Tp> {};
91
+
92
+ // bullet 3 - sizeof...(Tp) == 2
93
+
94
+ // sub-bullet 1 - "If is_same_v<T1, D1> is false or ..."
95
+ template <class _Tp, class _Up, class _D1 = __decay_t<_Tp>, class _D2 = __decay_t<_Up>>
96
+ struct __common_type2 : common_type<_D1, _D2> {};
97
+
98
+ template <class _Tp, class _Up>
99
+ struct __common_type2<_Tp, _Up, _Tp, _Up> : __common_type2_imp<_Tp, _Up> {};
100
+
101
+ template <class _Tp, class _Up>
102
+ struct _LIBCUDACXX_TEMPLATE_VIS common_type<_Tp, _Up>
103
+ : __common_type2<_Tp, _Up> {};
104
+
105
+ // bullet 4 - sizeof...(Tp) > 2
106
+
107
+ template <class _Tp, class _Up, class _Vp, class... _Rest>
108
+ struct _LIBCUDACXX_TEMPLATE_VIS common_type<_Tp, _Up, _Vp, _Rest...>
109
+ : __common_type_impl<__common_types<_Tp, _Up, _Vp, _Rest...> > {};
110
+
111
+ #if _LIBCUDACXX_STD_VER > 11
112
+ template <class ..._Tp> using common_type_t = typename common_type<_Tp...>::type;
113
+
114
+ template<class, class, class = void>
115
+ _LIBCUDACXX_INLINE_VAR constexpr bool __has_common_type = false;
116
+
117
+ template<class _Tp, class _Up>
118
+ _LIBCUDACXX_INLINE_VAR constexpr bool __has_common_type<_Tp, _Up, void_t<common_type_t<_Tp, _Up>>> = true;
119
+ #endif
120
+
121
+ _LIBCUDACXX_END_NAMESPACE_STD
122
+
123
+ #endif // _LIBCUDACXX___TYPE_TRAITS_COMMON_TYPE_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/conditional.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_CONDITIONAL_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_CONDITIONAL_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
18
+ #pragma GCC system_header
19
+ #endif
20
+
21
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
22
+
23
+ template <bool>
24
+ struct _IfImpl;
25
+
26
+ template <>
27
+ struct _IfImpl<true> {
28
+ template <class _IfRes, class _ElseRes>
29
+ using _Select _LIBCUDACXX_NODEBUG_TYPE = _IfRes;
30
+ };
31
+
32
+ template <>
33
+ struct _IfImpl<false> {
34
+ template <class _IfRes, class _ElseRes>
35
+ using _Select _LIBCUDACXX_NODEBUG_TYPE = _ElseRes;
36
+ };
37
+
38
+ template <bool _Cond, class _IfRes, class _ElseRes>
39
+ using _If _LIBCUDACXX_NODEBUG_TYPE = typename _IfImpl<_Cond>::template _Select<_IfRes, _ElseRes>;
40
+
41
+ template <bool _Bp, class _If, class _Then>
42
+ struct _LIBCUDACXX_TEMPLATE_VIS conditional {typedef _If type;};
43
+ template <class _If, class _Then>
44
+ struct _LIBCUDACXX_TEMPLATE_VIS conditional<false, _If, _Then> {typedef _Then type;};
45
+
46
+ #if _LIBCUDACXX_STD_VER > 11
47
+ template <bool _Bp, class _IfRes, class _ElseRes>
48
+ using conditional_t = typename conditional<_Bp, _IfRes, _ElseRes>::type;
49
+ #endif
50
+
51
+ // Helper so we can use "conditional_t" in all language versions.
52
+ template <bool _Bp, class _If, class _Then> using __conditional_t = typename conditional<_Bp, _If, _Then>::type;
53
+
54
+ _LIBCUDACXX_END_NAMESPACE_STD
55
+
56
+ #endif // _LIBCUDACXX___TYPE_TRAITS_CONDITIONAL_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/enable_if.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ENABLE_IF_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ENABLE_IF_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
18
+ #pragma GCC system_header
19
+ #endif
20
+
21
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
22
+
23
+ template <bool, class _Tp = void> struct _LIBCUDACXX_TEMPLATE_VIS enable_if {};
24
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS enable_if<true, _Tp> {typedef _Tp type;};
25
+
26
+ template <bool _Bp, class _Tp = void> using __enable_if_t _LIBCUDACXX_NODEBUG_TYPE = typename enable_if<_Bp, _Tp>::type;
27
+
28
+ #if _LIBCUDACXX_STD_VER > 11
29
+ template <bool _Bp, class _Tp = void> using enable_if_t = typename enable_if<_Bp, _Tp>::type;
30
+ #endif
31
+
32
+ _LIBCUDACXX_END_NAMESPACE_STD
33
+
34
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ENABLE_IF_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/has_virtual_destructor.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_HAS_VIRTUAL_DESTRUCTOR_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_HAS_VIRTUAL_DESTRUCTOR_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+
19
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
20
+ #pragma GCC system_header
21
+ #endif
22
+
23
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
24
+
25
+ #if defined(_LIBCUDACXX_HAS_VIRTUAL_DESTRUCTOR) && !defined(_LIBCUDACXX_USE_HAS_VIRTUAL_DESTRUCTOR_FALLBACK)
26
+
27
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS has_virtual_destructor
28
+ : public integral_constant<bool, _LIBCUDACXX_HAS_VIRTUAL_DESTRUCTOR(_Tp)> {};
29
+
30
+ #else
31
+
32
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS has_virtual_destructor
33
+ : public false_type {};
34
+
35
+ #endif // defined(_LIBCUDACXX_HAS_VIRTUAL_DESTRUCTOR) && !defined(_LIBCUDACXX_USE_HAS_VIRTUAL_DESTRUCTOR_FALLBACK)
36
+
37
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
38
+ template <class _Tp>
39
+ _LIBCUDACXX_INLINE_VAR constexpr bool has_virtual_destructor_v
40
+ = has_virtual_destructor<_Tp>::value;
41
+ #endif
42
+
43
+ _LIBCUDACXX_END_NAMESPACE_STD
44
+
45
+ #endif // _LIBCUDACXX___TYPE_TRAITS_HAS_VIRTUAL_DESTRUCTOR_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/integral_constant.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_INTEGRAL_CONSTANT_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_INTEGRAL_CONSTANT_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
18
+ #pragma GCC system_header
19
+ #endif
20
+
21
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
22
+
23
+ template <class _Tp, _Tp __v>
24
+ struct _LIBCUDACXX_TEMPLATE_VIS integral_constant
25
+ {
26
+ static _LIBCUDACXX_CONSTEXPR const _Tp value = __v;
27
+ typedef _Tp value_type;
28
+ typedef integral_constant type;
29
+ _LIBCUDACXX_INLINE_VISIBILITY
30
+ _LIBCUDACXX_CONSTEXPR operator value_type() const _NOEXCEPT {return value;}
31
+ #if _LIBCUDACXX_STD_VER > 11
32
+ _LIBCUDACXX_INLINE_VISIBILITY
33
+ constexpr value_type operator ()() const _NOEXCEPT {return value;}
34
+ #endif
35
+ };
36
+
37
+ template <class _Tp, _Tp __v>
38
+ _LIBCUDACXX_CONSTEXPR const _Tp integral_constant<_Tp, __v>::value;
39
+
40
+ typedef integral_constant<bool, true> true_type;
41
+ typedef integral_constant<bool, false> false_type;
42
+
43
+ template <bool _Val>
44
+ using _BoolConstant _LIBCUDACXX_NODEBUG_TYPE = integral_constant<bool, _Val>;
45
+
46
+ #if _LIBCUDACXX_STD_VER > 11
47
+ template <bool __b>
48
+ using bool_constant = integral_constant<bool, __b>;
49
+ #endif
50
+
51
+ #if _LIBCUDACXX_STD_VER > 11
52
+ #define _LIBCUDACXX_BOOL_CONSTANT(__b) bool_constant<(__b)>
53
+ #else
54
+ #define _LIBCUDACXX_BOOL_CONSTANT(__b) integral_constant<bool,(__b)>
55
+ #endif
56
+
57
+ _LIBCUDACXX_END_NAMESPACE_STD
58
+
59
+ #endif // _LIBCUDACXX___TYPE_TRAITS_INTEGRAL_CONSTANT_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_abstract.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_ABSTRACT_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_ABSTRACT_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+
19
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
20
+ #pragma GCC system_header
21
+ #endif
22
+
23
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
24
+
25
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_abstract
26
+ : public integral_constant<bool, __is_abstract(_Tp)> {};
27
+
28
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
29
+ template <class _Tp>
30
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_abstract_v = __is_abstract(_Tp);
31
+ #endif
32
+
33
+ _LIBCUDACXX_END_NAMESPACE_STD
34
+
35
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_ABSTRACT_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_array.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_ARRAY_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_ARRAY_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../cstddef"
19
+
20
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
21
+ #pragma GCC system_header
22
+ #endif
23
+
24
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
25
+
26
+ // TODO: Clang incorrectly reports that __is_array is true for T[0].
27
+ // Re-enable the branch once https://llvm.org/PR54705 is fixed.
28
+ #if defined(_LIBCUDACXX_IS_ARRAY) && !defined(_LIBCUDACXX_USE_IS_ARRAY_FALLBACK)
29
+
30
+ template <class _Tp>
31
+ struct _LIBCUDACXX_TEMPLATE_VIS is_array
32
+ : public integral_constant<bool, _LIBCUDACXX_IS_ARRAY(_Tp)>
33
+ {};
34
+
35
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
36
+ template <class _Tp>
37
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_array_v = _LIBCUDACXX_IS_ARRAY(_Tp);
38
+ #endif
39
+
40
+ #else
41
+
42
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_array
43
+ : public false_type {};
44
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_array<_Tp[]>
45
+ : public true_type {};
46
+ template <class _Tp, size_t _Np> struct _LIBCUDACXX_TEMPLATE_VIS is_array<_Tp[_Np]>
47
+ : public true_type {};
48
+
49
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
50
+ template <class _Tp>
51
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_array_v = is_array<_Tp>::value;
52
+ #endif
53
+
54
+ #endif // defined(_LIBCUDACXX_IS_ARRAY) && !defined(_LIBCUDACXX_USE_IS_ARRAY_FALLBACK)
55
+
56
+ _LIBCUDACXX_END_NAMESPACE_STD
57
+
58
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_ARRAY_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_assignable.h ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_ASSIGNABLE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_ASSIGNABLE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../__type_traits/is_void.h"
19
+ #include "../__utility/declval.h"
20
+
21
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
22
+ #pragma GCC system_header
23
+ #endif
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ template<typename, typename _Tp> struct __select_2nd { typedef _LIBCUDACXX_NODEBUG_TYPE _Tp type; };
28
+
29
+ #if defined(_LIBCUDACXX_IS_ASSIGNABLE) && !defined(_LIBCUDACXX_USE_IS_ASSIGNABLE_FALLBACK)
30
+
31
+ template <class _T1, class _T2> struct _LIBCUDACXX_TEMPLATE_VIS is_assignable
32
+ : public integral_constant<bool, _LIBCUDACXX_IS_ASSIGNABLE(_T1, _T2)>
33
+ {};
34
+
35
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
36
+ template <class _T1, class _T2>
37
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_assignable_v = _LIBCUDACXX_IS_ASSIGNABLE(_T1, _T2);
38
+ #endif
39
+
40
+ #else
41
+
42
+ template <class _Tp, class _Arg>
43
+ _LIBCUDACXX_INLINE_VISIBILITY
44
+ typename __select_2nd<decltype((_CUDA_VSTD::declval<_Tp>() = _CUDA_VSTD::declval<_Arg>())), true_type>::type
45
+ __is_assignable_test(int);
46
+
47
+ template <class, class>
48
+ _LIBCUDACXX_INLINE_VISIBILITY
49
+ false_type __is_assignable_test(...);
50
+
51
+ template <class _Tp, class _Arg, bool = is_void<_Tp>::value || is_void<_Arg>::value>
52
+ struct __is_assignable_imp
53
+ : public decltype((_CUDA_VSTD::__is_assignable_test<_Tp, _Arg>(0))) {};
54
+
55
+ template <class _Tp, class _Arg>
56
+ struct __is_assignable_imp<_Tp, _Arg, true>
57
+ : public false_type
58
+ {
59
+ };
60
+
61
+ template <class _Tp, class _Arg>
62
+ struct is_assignable
63
+ : public __is_assignable_imp<_Tp, _Arg> {};
64
+
65
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
66
+ template <class _Tp, class _Arg>
67
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_assignable_v = is_assignable<_Tp, _Arg>::value;
68
+ #endif
69
+
70
+ #endif // defined(_LIBCUDACXX_IS_ASSIGNABLE) && !defined(_LIBCUDACXX_USE_IS_ASSIGNABLE_FALLBACK)
71
+
72
+ _LIBCUDACXX_END_NAMESPACE_STD
73
+
74
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_ASSIGNABLE_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_base_of.h ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_BASE_OF_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_BASE_OF_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+
19
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
20
+ #pragma GCC system_header
21
+ #endif
22
+
23
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
24
+
25
+ #if defined(_LIBCUDACXX_IS_BASE_OF) && !defined(_LIBCUDACXX_USE_IS_BASE_OF_FALLBACK)
26
+
27
+ template <class _Bp, class _Dp>
28
+ struct _LIBCUDACXX_TEMPLATE_VIS is_base_of
29
+ : public integral_constant<bool, _LIBCUDACXX_IS_BASE_OF(_Bp, _Dp)> {};
30
+
31
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
32
+ template <class _Bp, class _Dp>
33
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_base_of_v = _LIBCUDACXX_IS_BASE_OF(_Bp, _Dp);
34
+ #endif
35
+
36
+ #else // defined(_LIBCUDACXX_IS_BASE_OF) && !defined(_LIBCUDACXX_USE_IS_BASE_OF_FALLBACK)
37
+
38
+ namespace __is_base_of_imp
39
+ {
40
+ template <class _Tp>
41
+ struct _Dst
42
+ {
43
+ _Dst(const volatile _Tp &);
44
+ };
45
+ template <class _Tp>
46
+ struct _Src
47
+ {
48
+ operator const volatile _Tp &();
49
+ template <class _Up> operator const _Dst<_Up> &();
50
+ };
51
+ template <size_t> struct __one { typedef char type; };
52
+ template <class _Bp, class _Dp> typename __one<sizeof(_Dst<_Bp>(declval<_Src<_Dp> >()))>::type __test(int);
53
+ template <class _Bp, class _Dp> __two __test(...);
54
+ }
55
+
56
+ template <class _Bp, class _Dp>
57
+ struct _LIBCUDACXX_TEMPLATE_VIS is_base_of
58
+ : public integral_constant<bool, is_class<_Bp>::value &&
59
+ sizeof(__is_base_of_imp::__test<_Bp, _Dp>(0)) == 2> {};
60
+
61
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
62
+ template <class _Bp, class _Dp>
63
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_base_of_v = is_base_of<_Bp, _Dp>::value;
64
+ #endif
65
+
66
+ #endif // defined(_LIBCUDACXX_IS_BASE_OF) && !defined(_LIBCUDACXX_USE_IS_BASE_OF_FALLBACK)
67
+
68
+ _LIBCUDACXX_END_NAMESPACE_STD
69
+
70
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_BASE_OF_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_callable.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_CALLABLE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_CALLABLE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../__utility/declval.h"
19
+
20
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
21
+ #pragma GCC system_header
22
+ #endif
23
+
24
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
25
+
26
+ template<class _Func, class... _Args, class = decltype(std::declval<_Func>()(std::declval<_Args>()...))>
27
+ _LIBCUDACXX_INLINE_VISIBILITY true_type __is_callable_helper(int);
28
+ template<class...>
29
+ _LIBCUDACXX_INLINE_VISIBILITY false_type __is_callable_helper(...);
30
+
31
+ template<class _Func, class... _Args>
32
+ struct __is_callable : decltype(__is_callable_helper<_Func, _Args...>(0)) {};
33
+
34
+ _LIBCUDACXX_END_NAMESPACE_STD
35
+
36
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_CALLABLE_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_compound.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_COMPOUND_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_COMPOUND_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../__type_traits/is_fundamental.h"
19
+
20
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
21
+ #pragma GCC system_header
22
+ #endif
23
+
24
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
25
+
26
+ #if defined(_LIBCUDACXX_IS_COMPOUND) && !defined(_LIBCUDACXX_USE_IS_COMPOUND_FALLBACK)
27
+
28
+ template<class _Tp>
29
+ struct _LIBCUDACXX_TEMPLATE_VIS is_compound
30
+ : public integral_constant<bool, _LIBCUDACXX_IS_COMPOUND(_Tp)>
31
+ {};
32
+
33
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
34
+ template <class _Tp>
35
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_compound_v = _LIBCUDACXX_IS_COMPOUND(_Tp);
36
+ #endif
37
+
38
+ #else
39
+
40
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_compound
41
+ : public integral_constant<bool, !is_fundamental<_Tp>::value> {};
42
+
43
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
44
+ template <class _Tp>
45
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_compound_v = is_compound<_Tp>::value;
46
+ #endif
47
+
48
+ #endif // defined(_LIBCUDACXX_IS_COMPOUND) && !defined(_LIBCUDACXX_USE_IS_COMPOUND_FALLBACK)
49
+
50
+ _LIBCUDACXX_END_NAMESPACE_STD
51
+
52
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_COMPOUND_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_const.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_CONST_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_CONST_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+
19
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
20
+ #pragma GCC system_header
21
+ #endif
22
+
23
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
24
+
25
+ #if defined(_LIBCUDACXX_IS_CONST) && !defined(_LIBCUDACXX_USE_IS_CONST_FALLBACK)
26
+
27
+ template <class _Tp>
28
+ struct _LIBCUDACXX_TEMPLATE_VIS is_const
29
+ : public integral_constant<bool, _LIBCUDACXX_IS_CONST(_Tp)>
30
+ {};
31
+
32
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
33
+ template <class _Tp>
34
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_const_v = _LIBCUDACXX_IS_CONST(_Tp);
35
+ #endif
36
+
37
+ #else
38
+
39
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_const : public false_type {};
40
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_const<_Tp const> : public true_type {};
41
+
42
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
43
+ template <class _Tp>
44
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_const_v = is_const<_Tp>::value;
45
+ #endif
46
+
47
+ #endif // defined(_LIBCUDACXX_IS_CONST) && !defined(_LIBCUDACXX_USE_IS_CONST_FALLBACK)
48
+
49
+ _LIBCUDACXX_END_NAMESPACE_STD
50
+
51
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_CONST_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_convertible.h ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ // SPDX-FileCopyrightText: Copyright (c) Microsoft Corporation.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_CONVERTIBLE_H
12
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_CONVERTIBLE_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__type_traits/integral_constant.h"
19
+ #include "../__type_traits/is_array.h"
20
+ #include "../__type_traits/is_function.h"
21
+ #include "../__type_traits/is_void.h"
22
+ #include "../__type_traits/remove_reference.h"
23
+ #include "../__utility/declval.h"
24
+ #include "../cstddef"
25
+
26
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
27
+ #pragma GCC system_header
28
+ #endif
29
+
30
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
31
+
32
+ #if defined(_LIBCUDACXX_IS_CONVERTIBLE_TO) && !defined(_LIBCUDACXX_USE_IS_CONVERTIBLE_FALLBACK)
33
+
34
+ template <class _T1, class _T2> struct _LIBCUDACXX_TEMPLATE_VIS is_convertible
35
+ : public integral_constant<bool, _LIBCUDACXX_IS_CONVERTIBLE_TO(_T1, _T2)> {};
36
+
37
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
38
+ template <class _T1, class _T2>
39
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_convertible_v = _LIBCUDACXX_IS_CONVERTIBLE_TO(_T1, _T2);
40
+ #endif
41
+
42
+ #ifdef _LIBCUDACXX_COMPILER_MSVC // Workaround for DevCom-1627396
43
+ template <class _Ty>
44
+ struct is_convertible<_Ty&, volatile _Ty&> : true_type {};
45
+
46
+ template <class _Ty>
47
+ struct is_convertible<volatile _Ty&, volatile _Ty&> : true_type {};
48
+
49
+ template <class _Ty>
50
+ struct is_convertible<_Ty&, const volatile _Ty&> : true_type {};
51
+
52
+ template <class _Ty>
53
+ struct is_convertible<volatile _Ty&, const volatile _Ty&> : true_type {};
54
+
55
+ template <class _Ty>
56
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_convertible_v<_Ty&, volatile _Ty&> = true;
57
+
58
+ template <class _Ty>
59
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_convertible_v<volatile _Ty&, volatile _Ty&> = true;
60
+
61
+ template <class _Ty>
62
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_convertible_v<_Ty&, const volatile _Ty&> = true;
63
+
64
+ template <class _Ty>
65
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_convertible_v<volatile _Ty&, const volatile _Ty&> = true;
66
+ #endif // _LIBCUDACXX_COMPILER_MSVC
67
+
68
+ #else // __has_builtin(__is_convertible_to) && !defined(_LIBCUDACXX_USE_IS_CONVERTIBLE_FALLBACK)
69
+
70
+ namespace __is_convertible_imp
71
+ {
72
+
73
+ _LIBCUDACXX_NV_DIAG_SUPPRESS(3013) // a volatile function parameter is deprecated
74
+ template <class _Tp> _LIBCUDACXX_INLINE_VISIBILITY void __test_convert(_Tp);
75
+ _LIBCUDACXX_NV_DIAG_DEFAULT(3013) // a volatile function parameter is deprecated
76
+
77
+ template <class _From, class _To, class = void>
78
+ struct __is_convertible_test : public false_type {};
79
+
80
+ template <class _From, class _To>
81
+ struct __is_convertible_test<_From, _To,
82
+ decltype(_CUDA_VSTD::__is_convertible_imp::__test_convert<_To>(_CUDA_VSTD::declval<_From>()))> : public true_type
83
+ {};
84
+
85
+ template <class _Tp, bool _IsArray = is_array<_Tp>::value,
86
+ bool _IsFunction = is_function<_Tp>::value,
87
+ bool _IsVoid = is_void<_Tp>::value>
88
+ struct __is_array_function_or_void {enum {value = 0};};
89
+ template <class _Tp> struct __is_array_function_or_void<_Tp, true, false, false> {enum {value = 1};};
90
+ template <class _Tp> struct __is_array_function_or_void<_Tp, false, true, false> {enum {value = 2};};
91
+ template <class _Tp> struct __is_array_function_or_void<_Tp, false, false, true> {enum {value = 3};};
92
+ }
93
+
94
+ template <class _Tp,
95
+ unsigned = __is_convertible_imp::__is_array_function_or_void<__libcpp_remove_reference_t<_Tp>>::value>
96
+ struct __is_convertible_check
97
+ {
98
+ static const size_t __v = 0;
99
+ };
100
+
101
+ template <class _Tp>
102
+ struct __is_convertible_check<_Tp, 0>
103
+ {
104
+ static const size_t __v = sizeof(_Tp);
105
+ };
106
+
107
+ template <class _T1, class _T2,
108
+ unsigned _T1_is_array_function_or_void = __is_convertible_imp::__is_array_function_or_void<_T1>::value,
109
+ unsigned _T2_is_array_function_or_void = __is_convertible_imp::__is_array_function_or_void<_T2>::value>
110
+ struct __is_convertible_fallback
111
+ : public integral_constant<bool,
112
+ __is_convertible_imp::__is_convertible_test<_T1, _T2>::value
113
+ >
114
+ {};
115
+
116
+ template <class _T1, class _T2> struct __is_convertible_fallback<_T1, _T2, 0, 1> : public false_type {};
117
+ template <class _T1, class _T2> struct __is_convertible_fallback<_T1, _T2, 1, 1> : public false_type {};
118
+ template <class _T1, class _T2> struct __is_convertible_fallback<_T1, _T2, 2, 1> : public false_type {};
119
+ template <class _T1, class _T2> struct __is_convertible_fallback<_T1, _T2, 3, 1> : public false_type {};
120
+
121
+ template <class _T1, class _T2> struct __is_convertible_fallback<_T1, _T2, 0, 2> : public false_type {};
122
+ template <class _T1, class _T2> struct __is_convertible_fallback<_T1, _T2, 1, 2> : public false_type {};
123
+ template <class _T1, class _T2> struct __is_convertible_fallback<_T1, _T2, 2, 2> : public false_type {};
124
+ template <class _T1, class _T2> struct __is_convertible_fallback<_T1, _T2, 3, 2> : public false_type {};
125
+
126
+ template <class _T1, class _T2> struct __is_convertible_fallback<_T1, _T2, 0, 3> : public false_type {};
127
+ template <class _T1, class _T2> struct __is_convertible_fallback<_T1, _T2, 1, 3> : public false_type {};
128
+ template <class _T1, class _T2> struct __is_convertible_fallback<_T1, _T2, 2, 3> : public false_type {};
129
+ template <class _T1, class _T2> struct __is_convertible_fallback<_T1, _T2, 3, 3> : public true_type {};
130
+
131
+ template <class _T1, class _T2> struct _LIBCUDACXX_TEMPLATE_VIS is_convertible
132
+ : public __is_convertible_fallback<_T1, _T2>
133
+ {
134
+ static const size_t __complete_check1 = __is_convertible_check<_T1>::__v;
135
+ static const size_t __complete_check2 = __is_convertible_check<_T2>::__v;
136
+ };
137
+
138
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
139
+ template <class _From, class _To>
140
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_convertible_v = is_convertible<_From, _To>::value;
141
+ #endif
142
+
143
+ #endif // __has_builtin(__is_convertible_to) && !defined(_LIBCUDACXX_USE_IS_CONVERTIBLE_FALLBACK)
144
+
145
+ _LIBCUDACXX_END_NAMESPACE_STD
146
+
147
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_CONVERTIBLE_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_copy_constructible.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_COPY_CONSTRUCTIBLE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_COPY_CONSTRUCTIBLE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/add_const.h"
18
+ #include "../__type_traits/add_lvalue_reference.h"
19
+ #include "../__type_traits/is_constructible.h"
20
+
21
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
22
+ #pragma GCC system_header
23
+ #endif
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ template <class _Tp>
28
+ struct _LIBCUDACXX_TEMPLATE_VIS is_copy_constructible
29
+ : public is_constructible<_Tp, __add_lvalue_reference_t<typename add_const<_Tp>::type>> {};
30
+
31
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
32
+ template <class _Tp>
33
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_copy_constructible_v = is_copy_constructible<_Tp>::value;
34
+ #endif
35
+
36
+ _LIBCUDACXX_END_NAMESPACE_STD
37
+
38
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_COPY_CONSTRUCTIBLE_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_empty.h ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_EMPTY_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_EMPTY_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+
19
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
20
+ #pragma GCC system_header
21
+ #endif
22
+
23
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
24
+
25
+ #if defined(_LIBCUDACXX_IS_EMPTY) && !defined(_LIBCUDACXX_USE_IS_EMPTY_FALLBACK)
26
+
27
+ template <class _Tp>
28
+ struct _LIBCUDACXX_TEMPLATE_VIS is_empty
29
+ : public integral_constant<bool, _LIBCUDACXX_IS_EMPTY(_Tp)> {};
30
+
31
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
32
+ template <class _Tp>
33
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_empty_v = _LIBCUDACXX_IS_EMPTY(_Tp);
34
+ #endif
35
+
36
+ #else
37
+
38
+ template <class _Tp>
39
+ struct __is_empty1
40
+ : public _Tp
41
+ {
42
+ double __lx;
43
+ };
44
+
45
+ struct __is_empty2
46
+ {
47
+ double __lx;
48
+ };
49
+
50
+ template <class _Tp, bool = is_class<_Tp>::value>
51
+ struct __libcpp_empty : public integral_constant<bool, sizeof(__is_empty1<_Tp>) == sizeof(__is_empty2)> {};
52
+
53
+ template <class _Tp> struct __libcpp_empty<_Tp, false> : public false_type {};
54
+
55
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_empty : public __libcpp_empty<_Tp> {};
56
+
57
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
58
+ template <class _Tp>
59
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_empty_v = is_empty<_Tp>::value;
60
+ #endif
61
+
62
+ #endif // defined(_LIBCUDACXX_IS_EMPTY) && !defined(_LIBCUDACXX_USE_IS_EMPTY_FALLBACK)
63
+
64
+ _LIBCUDACXX_END_NAMESPACE_STD
65
+
66
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_EMPTY_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_enum.h ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_ENUM_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_ENUM_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../__type_traits/is_array.h"
19
+ #include "../__type_traits/is_class.h"
20
+ #include "../__type_traits/is_floating_point.h"
21
+ #include "../__type_traits/is_function.h"
22
+ #include "../__type_traits/is_integral.h"
23
+ #include "../__type_traits/is_member_pointer.h"
24
+ #include "../__type_traits/is_pointer.h"
25
+ #include "../__type_traits/is_reference.h"
26
+ #include "../__type_traits/is_union.h"
27
+ #include "../__type_traits/is_void.h"
28
+
29
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
30
+ #pragma GCC system_header
31
+ #endif
32
+
33
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
34
+
35
+ #if defined(_LIBCUDACXX_IS_ENUM) && !defined(_LIBCUDACXX_USE_IS_ENUM_FALLBACK)
36
+
37
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_enum
38
+ : public integral_constant<bool, _LIBCUDACXX_IS_ENUM(_Tp)> {};
39
+
40
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
41
+ template <class _Tp>
42
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_enum_v = _LIBCUDACXX_IS_ENUM(_Tp);
43
+ #endif
44
+
45
+ #else
46
+
47
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_enum
48
+ : public integral_constant<bool, !is_void<_Tp>::value &&
49
+ !is_integral<_Tp>::value &&
50
+ !is_floating_point<_Tp>::value &&
51
+ !is_array<_Tp>::value &&
52
+ !is_pointer<_Tp>::value &&
53
+ !is_reference<_Tp>::value &&
54
+ !is_member_pointer<_Tp>::value &&
55
+ !is_union<_Tp>::value &&
56
+ !is_class<_Tp>::value &&
57
+ !is_function<_Tp>::value > {};
58
+
59
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
60
+ template <class _Tp>
61
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_enum_v
62
+ = is_enum<_Tp>::value;
63
+ #endif
64
+
65
+ #endif // defined(_LIBCUDACXX_IS_ENUM) && !defined(_LIBCUDACXX_USE_IS_ENUM_FALLBACK)
66
+
67
+ _LIBCUDACXX_END_NAMESPACE_STD
68
+
69
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_ENUM_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_fundamental.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_FUNDAMENTAL_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_FUNDAMENTAL_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../__type_traits/is_null_pointer.h"
19
+ #include "../__type_traits/is_void.h"
20
+
21
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
22
+ #pragma GCC system_header
23
+ #endif
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ #if defined(_LIBCUDACXX_IS_FUNDAMENTAL) && !defined(_LIBCUDACXX_USE_IS_FUNDAMENTAL_FALLBACK)
28
+
29
+ template<class _Tp>
30
+ struct _LIBCUDACXX_TEMPLATE_VIS is_fundamental
31
+ : public integral_constant<bool, _LIBCUDACXX_IS_FUNDAMENTAL(_Tp)>
32
+ {};
33
+
34
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
35
+ template <class _Tp>
36
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_fundamental_v = _LIBCUDACXX_IS_FUNDAMENTAL(_Tp);
37
+ #endif
38
+
39
+ #else
40
+
41
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_fundamental
42
+ : public integral_constant<bool, is_void<_Tp>::value ||
43
+ __is_nullptr_t<_Tp>::value ||
44
+ is_arithmetic<_Tp>::value> {};
45
+
46
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
47
+ template <class _Tp>
48
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_fundamental_v = is_fundamental<_Tp>::value;
49
+ #endif
50
+
51
+ #endif // defined(_LIBCUDACXX_IS_FUNDAMENTAL) && !defined(_LIBCUDACXX_USE_IS_FUNDAMENTAL_FALLBACK)
52
+
53
+ _LIBCUDACXX_END_NAMESPACE_STD
54
+
55
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_FUNDAMENTAL_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_integral.h ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_INTEGRAL_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_INTEGRAL_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../__type_traits/remove_cv.h"
19
+
20
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
21
+ #pragma GCC system_header
22
+ #endif
23
+
24
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
25
+
26
+ #if defined(_LIBCUDACXX_IS_INTEGRAL) && !defined(_LIBCUDACXX_USE_IS_INTEGRAL_FALLBACK)
27
+
28
+ template <class _Tp>
29
+ struct _LIBCUDACXX_TEMPLATE_VIS is_integral
30
+ : public integral_constant<bool, _LIBCUDACXX_IS_INTEGRAL(_Tp)>
31
+ {};
32
+
33
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
34
+ template <class _Tp>
35
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_integral_v = _LIBCUDACXX_IS_INTEGRAL(_Tp);
36
+ #endif
37
+
38
+ #else
39
+
40
+ template <class _Tp> struct __libcpp_is_integral : public false_type {};
41
+ template <> struct __libcpp_is_integral<bool> : public true_type {};
42
+ template <> struct __libcpp_is_integral<char> : public true_type {};
43
+ template <> struct __libcpp_is_integral<signed char> : public true_type {};
44
+ template <> struct __libcpp_is_integral<unsigned char> : public true_type {};
45
+ template <> struct __libcpp_is_integral<wchar_t> : public true_type {};
46
+ #ifndef _LIBCUDACXX_NO_HAS_CHAR8_T
47
+ template <> struct __libcpp_is_integral<char8_t> : public true_type {};
48
+ #endif
49
+ #ifndef _LIBCUDACXX_HAS_NO_UNICODE_CHARS
50
+ template <> struct __libcpp_is_integral<char16_t> : public true_type {};
51
+ template <> struct __libcpp_is_integral<char32_t> : public true_type {};
52
+ #endif // _LIBCUDACXX_HAS_NO_UNICODE_CHARS
53
+ template <> struct __libcpp_is_integral<short> : public true_type {};
54
+ template <> struct __libcpp_is_integral<unsigned short> : public true_type {};
55
+ template <> struct __libcpp_is_integral<int> : public true_type {};
56
+ template <> struct __libcpp_is_integral<unsigned int> : public true_type {};
57
+ template <> struct __libcpp_is_integral<long> : public true_type {};
58
+ template <> struct __libcpp_is_integral<unsigned long> : public true_type {};
59
+ template <> struct __libcpp_is_integral<long long> : public true_type {};
60
+ template <> struct __libcpp_is_integral<unsigned long long> : public true_type {};
61
+ #ifndef _LIBCUDACXX_HAS_NO_INT128
62
+ template <> struct __libcpp_is_integral<__int128_t> : public true_type {};
63
+ template <> struct __libcpp_is_integral<__uint128_t> : public true_type {};
64
+ #endif
65
+
66
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_integral
67
+ : public integral_constant<bool, __libcpp_is_integral<__remove_cv_t<_Tp> >::value>
68
+ {};
69
+
70
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
71
+ template <class _Tp>
72
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_integral_v = is_integral<_Tp>::value;
73
+ #endif
74
+
75
+ #endif // defined(_LIBCUDACXX_IS_INTEGRAL) && !defined(_LIBCUDACXX_USE_IS_INTEGRAL_FALLBACK)
76
+
77
+ _LIBCUDACXX_END_NAMESPACE_STD
78
+
79
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_INTEGRAL_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_member_function_pointer.h ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_MEMBER_FUNCTION_POINTER_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_MEMBER_FUNCTION_POINTER_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../__type_traits/is_function.h"
19
+ #include "../__type_traits/remove_cv.h"
20
+ #include "../cstddef"
21
+
22
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
23
+ #pragma GCC system_header
24
+ #endif
25
+
26
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
27
+
28
+ #if defined(_LIBCUDACXX_IS_MEMBER_FUNCTION_POINTER) && !defined(_LIBCUDACXX_USE_IS_MEMBER_FUNCTION_POINTER_FALLBACK)
29
+
30
+ template<class _Tp>
31
+ struct _LIBCUDACXX_TEMPLATE_VIS is_member_function_pointer
32
+ : public integral_constant<bool, _LIBCUDACXX_IS_MEMBER_FUNCTION_POINTER(_Tp)>
33
+ {};
34
+
35
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
36
+ template <class _Tp>
37
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_member_function_pointer_v = _LIBCUDACXX_IS_MEMBER_FUNCTION_POINTER(_Tp);
38
+ #endif
39
+
40
+ #else
41
+
42
+ template <class _Tp> struct __libcpp_is_member_pointer {
43
+ enum {
44
+ __is_member = false,
45
+ __is_func = false,
46
+ __is_obj = false
47
+ };
48
+ };
49
+ template <class _Tp, class _Up> struct __libcpp_is_member_pointer<_Tp _Up::*> {
50
+ enum {
51
+ __is_member = true,
52
+ __is_func = is_function<_Tp>::value,
53
+ __is_obj = !__is_func,
54
+ };
55
+ };
56
+
57
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_member_function_pointer
58
+ : public integral_constant<bool, __libcpp_is_member_pointer<__remove_cv_t<_Tp> >::__is_func >
59
+ {};
60
+
61
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
62
+ template <class _Tp>
63
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_member_function_pointer_v = is_member_function_pointer<_Tp>::value;
64
+ #endif
65
+
66
+ #endif // defined(_LIBCUDACXX_IS_MEMBER_FUNCTION_POINTER) && !defined(_LIBCUDACXX_USE_IS_MEMBER_FUNCTION_POINTER_FALLBACK)
67
+
68
+ _LIBCUDACXX_END_NAMESPACE_STD
69
+
70
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_MEMBER_FUNCTION_POINTER_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_move_assignable.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_MOVE_ASSIGNABLE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_MOVE_ASSIGNABLE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/add_lvalue_reference.h"
18
+ #include "../__type_traits/add_rvalue_reference.h"
19
+ #include "../__type_traits/is_assignable.h"
20
+
21
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
22
+ #pragma GCC system_header
23
+ #endif
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ template <class _Tp>
28
+ struct _LIBCUDACXX_TEMPLATE_VIS is_move_assignable
29
+ : public is_assignable<__add_lvalue_reference_t<_Tp>, __add_rvalue_reference_t<_Tp>> {};
30
+
31
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
32
+ template <class _Tp>
33
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_move_assignable_v = is_move_assignable<_Tp>::value;
34
+ #endif
35
+
36
+ _LIBCUDACXX_END_NAMESPACE_STD
37
+
38
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_MOVE_ASSIGNABLE_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_move_constructible.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_MOVE_CONSTRUCTIBLE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_MOVE_CONSTRUCTIBLE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/add_lvalue_reference.h"
18
+ #include "../__type_traits/add_rvalue_reference.h"
19
+ #include "../__type_traits/is_constructible.h"
20
+
21
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
22
+ #pragma GCC system_header
23
+ #endif
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ template <class _Tp>
28
+ struct _LIBCUDACXX_TEMPLATE_VIS is_move_constructible
29
+ : public is_constructible<_Tp, __add_rvalue_reference_t<_Tp>>
30
+ { };
31
+
32
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
33
+ template <class _Tp>
34
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_move_constructible_v = is_move_constructible<_Tp>::value;
35
+ #endif
36
+
37
+ _LIBCUDACXX_END_NAMESPACE_STD
38
+
39
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_MOVE_CONSTRUCTIBLE_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_nothrow_assignable.h ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_NOTHROW_ASSIGNABLE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_NOTHROW_ASSIGNABLE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../__type_traits/is_assignable.h"
19
+ #include "../__type_traits/is_scalar.h"
20
+ #include "../__utility/declval.h"
21
+
22
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
23
+ #pragma GCC system_header
24
+ #endif
25
+
26
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
27
+
28
+ #if defined(_LIBCUDACXX_IS_NOTHROW_ASSIGNABLE) && !defined(_LIBCUDACXX_USE_IS_NOTHROW_ASSIGNABLE_FALLBACK)
29
+
30
+ template <class _Tp, class _Arg>
31
+ struct _LIBCUDACXX_TEMPLATE_VIS is_nothrow_assignable
32
+ : public integral_constant<bool, _LIBCUDACXX_IS_NOTHROW_ASSIGNABLE(_Tp, _Arg)> {};
33
+
34
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
35
+ template <class _Tp, class _Arg>
36
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_nothrow_assignable_v = _LIBCUDACXX_IS_NOTHROW_ASSIGNABLE(_Tp, _Arg);
37
+ #endif
38
+
39
+ #elif !defined(_LIBCUDACXX_HAS_NO_NOEXCEPT) && !defined(_LIBCUDACXX_HAS_NO_NOEXCEPT_SFINAE)
40
+
41
+ template <bool, class _Tp, class _Arg> struct __libcpp_is_nothrow_assignable;
42
+
43
+ template <class _Tp, class _Arg>
44
+ struct __libcpp_is_nothrow_assignable<false, _Tp, _Arg>
45
+ : public false_type
46
+ { };
47
+
48
+ template <class _Tp, class _Arg>
49
+ struct __libcpp_is_nothrow_assignable<true, _Tp, _Arg>
50
+ : public integral_constant<bool, noexcept(_CUDA_VSTD::declval<_Tp>() = _CUDA_VSTD::declval<_Arg>()) >
51
+ { };
52
+
53
+ template <class _Tp, class _Arg>
54
+ struct _LIBCUDACXX_TEMPLATE_VIS is_nothrow_assignable
55
+ : public __libcpp_is_nothrow_assignable<is_assignable<_Tp, _Arg>::value, _Tp, _Arg>
56
+ { };
57
+
58
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
59
+ template <class _Tp, class _Arg>
60
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_nothrow_assignable_v = is_nothrow_assignable<_Tp, _Arg>::value;
61
+ #endif
62
+
63
+ #else
64
+
65
+ template <class _Tp, class _Arg>
66
+ struct _LIBCUDACXX_TEMPLATE_VIS is_nothrow_assignable
67
+ : public false_type {};
68
+
69
+ template <class _Tp>
70
+ struct _LIBCUDACXX_TEMPLATE_VIS is_nothrow_assignable<_Tp&, _Tp>
71
+ #if defined(_LIBCUDACXX_HAS_NOTHROW_ASSIGN) && !defined(_LIBCUDACXX_USE_HAS_NOTHROW_ASSIGN_FALLBACK)
72
+ : integral_constant<bool, _LIBCUDACXX_HAS_NOTHROW_ASSIGN(_Tp)> {};
73
+ #else
74
+ : integral_constant<bool, is_scalar<_Tp>::value> {};
75
+ #endif // defined(_LIBCUDACXX_HAS_NOTHROW_ASSIGN) && !defined(_LIBCUDACXX_USE_HAS_NOTHROW_ASSIGN_FALLBACK)
76
+
77
+ template <class _Tp>
78
+ struct _LIBCUDACXX_TEMPLATE_VIS is_nothrow_assignable<_Tp&, _Tp&>
79
+ #if defined(_LIBCUDACXX_HAS_NOTHROW_ASSIGN) && !defined(_LIBCUDACXX_USE_HAS_NOTHROW_ASSIGN_FALLBACK)
80
+ : integral_constant<bool, _LIBCUDACXX_HAS_NOTHROW_ASSIGN(_Tp)> {};
81
+ #else
82
+ : integral_constant<bool, is_scalar<_Tp>::value> {};
83
+ #endif // defined(_LIBCUDACXX_HAS_NOTHROW_ASSIGN) && !defined(_LIBCUDACXX_USE_HAS_NOTHROW_ASSIGN_FALLBACK)
84
+
85
+ template <class _Tp>
86
+ struct _LIBCUDACXX_TEMPLATE_VIS is_nothrow_assignable<_Tp&, const _Tp&>
87
+ #if defined(_LIBCUDACXX_HAS_NOTHROW_ASSIGN) && !defined(_LIBCUDACXX_USE_HAS_NOTHROW_ASSIGN_FALLBACK)
88
+ : integral_constant<bool, _LIBCUDACXX_HAS_NOTHROW_ASSIGN(_Tp)> {};
89
+ #else
90
+ : integral_constant<bool, is_scalar<_Tp>::value> {};
91
+ #endif // defined(_LIBCUDACXX_HAS_NOTHROW_ASSIGN) && !defined(_LIBCUDACXX_USE_HAS_NOTHROW_ASSIGN_FALLBACK)
92
+
93
+ #ifndef _LIBCUDACXX_HAS_NO_RVALUE_REFERENCES
94
+
95
+ template <class _Tp>
96
+ struct is_nothrow_assignable<_Tp&, _Tp&&>
97
+ #if defined(_LIBCUDACXX_HAS_NOTHROW_ASSIGN) && !defined(_LIBCUDACXX_USE_HAS_NOTHROW_ASSIGN_FALLBACK)
98
+ : integral_constant<bool, _LIBCUDACXX_HAS_NOTHROW_ASSIGN(_Tp)> {};
99
+ #else
100
+ : integral_constant<bool, is_scalar<_Tp>::value> {};
101
+ #endif // defined(_LIBCUDACXX_HAS_NOTHROW_ASSIGN) && !defined(_LIBCUDACXX_USE_HAS_NOTHROW_ASSIGN_FALLBACK)
102
+
103
+ #endif // _LIBCUDACXX_HAS_NO_RVALUE_REFERENCES
104
+
105
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
106
+ template <class _Tp, class _Arg>
107
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_nothrow_assignable_v = is_nothrow_assignable<_Tp, _Arg>::value;
108
+ #endif
109
+
110
+ #endif // !defined(_LIBCUDACXX_HAS_NO_NOEXCEPT)
111
+
112
+ _LIBCUDACXX_END_NAMESPACE_STD
113
+
114
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_NOTHROW_ASSIGNABLE_H
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/cuda/std/detail/libcxx/include/__type_traits/is_null_pointer.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_NULL_POINTER_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_NULL_POINTER_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../__type_traits/remove_cv.h"
19
+ #include "../cstddef"
20
+
21
+ #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
22
+ #pragma GCC system_header
23
+ #endif
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ template <class _Tp> struct __is_nullptr_t_impl : public false_type {};
28
+ template <> struct __is_nullptr_t_impl<nullptr_t> : public true_type {};
29
+
30
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS __is_nullptr_t
31
+ : public __is_nullptr_t_impl<__remove_cv_t<_Tp> > {};
32
+
33
+ #if _LIBCUDACXX_STD_VER > 11
34
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_null_pointer
35
+ : public __is_nullptr_t_impl<__remove_cv_t<_Tp> > {};
36
+ #endif
37
+
38
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
39
+ template <class _Tp>
40
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_null_pointer_v = is_null_pointer<_Tp>::value;
41
+ #endif // _LIBCUDACXX_STD_VER > 11
42
+
43
+ _LIBCUDACXX_END_NAMESPACE_STD
44
+
45
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_NULL_POINTER_H