ADAPT-Chase commited on
Commit
34b70a1
·
verified ·
1 Parent(s): 517f511

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/__init__.py +0 -0
  3. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cuda_stdint.h +112 -0
  4. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti.h +123 -0
  5. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_activity.h +0 -0
  6. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_activity_deprecated.h +0 -0
  7. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_callbacks.h +864 -0
  8. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_checkpoint.h +127 -0
  9. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_common.h +93 -0
  10. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_driver_cbid.h +780 -0
  11. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_events.h +1350 -0
  12. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_metrics.h +825 -0
  13. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_nvtx_cbid.h +111 -0
  14. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling.h +936 -0
  15. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling_util.h +402 -0
  16. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_pmsampling.h +474 -0
  17. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_profiler_host.h +541 -0
  18. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_profiler_target.h +602 -0
  19. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_range_profiler.h +465 -0
  20. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_result.h +346 -0
  21. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_runtime_cbid.h +497 -0
  22. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/__pycache__/__init__.cpython-312.pyc +0 -0
  23. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/include/__init__.py +0 -0
  24. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/include/__pycache__/__init__.cpython-312.pyc +0 -0
  25. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/include/nvrtc.h +876 -0
  26. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/__init__.py +0 -0
  27. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/__pycache__/__init__.cpython-312.pyc +0 -0
  28. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.6 +3 -0
  29. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.12 +3 -0
  30. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/__pycache__/__init__.cpython-312.pyc +0 -0
  31. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/__init__.py +0 -0
  32. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/__pycache__/__init__.cpython-312.pyc +0 -0
  33. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/builtin_types.h +64 -0
  34. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h +588 -0
  35. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/common_functions.h +65 -0
  36. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h +1743 -0
  37. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/async.h +452 -0
  38. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_reduce.h +95 -0
  39. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h +174 -0
  40. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/driver_abi.h +99 -0
  41. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/functional.h +212 -0
  42. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/helpers.h +693 -0
  43. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/info.h +345 -0
  44. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/invoke.h +189 -0
  45. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/memory.h +136 -0
  46. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/partitioning.h +160 -0
  47. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/reduce.h +424 -0
  48. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/scan.h +320 -0
  49. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/sync.h +281 -0
  50. tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/memcpy_async.h +62 -0
.gitattributes CHANGED
@@ -4056,3 +4056,6 @@ tool_server/.venv/lib/python3.12/site-packages/nvidia/nvjitlink/lib/libnvJitLink
4056
  tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
4057
  tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/lib/libcurand.so.10 filter=lfs diff=lfs merge=lfs -text
4058
  tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/lib/libcufile.so.0 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
4056
  tool_server/.venv/lib/python3.12/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
4057
  tool_server/.venv/lib/python3.12/site-packages/nvidia/curand/lib/libcurand.so.10 filter=lfs diff=lfs merge=lfs -text
4058
  tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/lib/libcufile.so.0 filter=lfs diff=lfs merge=lfs -text
4059
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/lib/libcudart.so.12 filter=lfs diff=lfs merge=lfs -text
4060
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.6 filter=lfs diff=lfs merge=lfs -text
4061
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.12 filter=lfs diff=lfs merge=lfs -text
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/__init__.py ADDED
File without changes
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cuda_stdint.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2009-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions
6
+ * are met:
7
+ * * Redistributions of source code must retain the above copyright
8
+ * notice, this list of conditions and the following disclaimer.
9
+ * * Redistributions in binary form must reproduce the above copyright
10
+ * notice, this list of conditions and the following disclaimer in the
11
+ * documentation and/or other materials provided with the distribution.
12
+ * * Neither the name of NVIDIA CORPORATION nor the names of its
13
+ * contributors may be used to endorse or promote products derived
14
+ * from this software without specific prior written permission.
15
+ *
16
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
17
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
20
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
24
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+ */
28
+
29
+ #ifndef __cuda_stdint_h__
30
+ #define __cuda_stdint_h__
31
+
32
+ // Compiler-specific treatment for C99's stdint.h
33
+ //
34
+ // By default, this header will use the standard headers (so it
35
+ // is your responsibility to make sure they are available), except
36
+ // on MSVC before Visual Studio 2010, when they were not provided.
37
+ // To support old MSVC, a few of the commonly-used definitions are
38
+ // provided here. If more definitions are needed, add them here,
39
+ // or replace these definitions with a complete implementation,
40
+ // such as the ones available from Google, Boost, or MSVC10. You
41
+ // can prevent the definition of any of these types (in order to
42
+ // use your own) by #defining CU_STDINT_TYPES_ALREADY_DEFINED.
43
+
44
+ #if !defined(CU_STDINT_TYPES_ALREADY_DEFINED)
45
+
46
+ // In VS including stdint.h forces the C++ runtime dep - provide an opt-out
47
+ // (CU_STDINT_VS_FORCE_NO_STDINT_H) for users that care (notably static
48
+ // cudart).
49
+ #if defined(_MSC_VER) && ((_MSC_VER < 1600) || defined(CU_STDINT_VS_FORCE_NO_STDINT_H))
50
+
51
+ // These definitions can be used with MSVC 8 and 9,
52
+ // which don't ship with stdint.h:
53
+
54
+ typedef unsigned char uint8_t;
55
+
56
+ typedef short int16_t;
57
+ typedef unsigned short uint16_t;
58
+
59
+ // To keep it consistent with all MSVC build. define those types
60
+ // in the exact same way they are defined with the MSVC headers
61
+ #if defined(_MSC_VER)
62
+ typedef signed char int8_t;
63
+
64
+ typedef int int32_t;
65
+ typedef unsigned int uint32_t;
66
+
67
+ typedef long long int64_t;
68
+ typedef unsigned long long uint64_t;
69
+ #else
70
+ typedef char int8_t;
71
+
72
+ typedef long int32_t;
73
+ typedef unsigned long uint32_t;
74
+
75
+ typedef __int64 int64_t;
76
+ typedef unsigned __int64 uint64_t;
77
+ #endif
78
+
79
+ #elif defined(__DJGPP__)
80
+
81
+ // These definitions can be used when compiling
82
+ // C code with DJGPP, which only provides stdint.h
83
+ // when compiling C++ code with TR1 enabled.
84
+
85
+ typedef char int8_t;
86
+ typedef unsigned char uint8_t;
87
+
88
+ typedef short int16_t;
89
+ typedef unsigned short uint16_t;
90
+
91
+ typedef long int32_t;
92
+ typedef unsigned long uint32_t;
93
+
94
+ typedef long long int64_t;
95
+ typedef unsigned long long uint64_t;
96
+
97
+ #else
98
+
99
+ // Use standard headers, as specified by C99 and C++ TR1.
100
+ // Known to be provided by:
101
+ // - gcc/glibc, supported by all versions of glibc
102
+ // - djgpp, supported since 2001
103
+ // - MSVC, supported by Visual Studio 2010 and later
104
+
105
+ #include <stdint.h>
106
+
107
+ #endif
108
+
109
+ #endif // !defined(CU_STDINT_TYPES_ALREADY_DEFINED)
110
+
111
+
112
+ #endif // file guard
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti.h ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_H_)
51
+ #define _CUPTI_H_
52
+
53
+ #ifdef _WIN32
54
+ #ifndef WIN32_LEAN_AND_MEAN
55
+ #define WIN32_LEAN_AND_MEAN
56
+ #endif
57
+ #ifdef NOMINMAX
58
+ #include <windows.h>
59
+ #else
60
+ #define NOMINMAX
61
+ #include <windows.h>
62
+ #undef NOMINMAX
63
+ #endif
64
+ #endif
65
+
66
+ #include <cuda.h>
67
+ #include <cupti_result.h>
68
+ #include <cupti_version.h>
69
+
70
+ /* Activity, callback, event and metric APIs */
71
+ #include <cupti_activity.h>
72
+ #include <cupti_callbacks.h>
73
+ #include <cupti_events.h>
74
+ #include <cupti_metrics.h>
75
+
76
+ /* Runtime, driver, and nvtx function identifiers */
77
+ #include <cupti_driver_cbid.h>
78
+ #include <cupti_runtime_cbid.h>
79
+ #include <cupti_nvtx_cbid.h>
80
+
81
+ /* To support function parameter structures for obsoleted API. See
82
+ cuda.h for the actual definition of these structures. */
83
+ typedef unsigned int CUdeviceptr_v1;
84
+ typedef struct CUDA_MEMCPY2D_v1_st { int dummy; } CUDA_MEMCPY2D_v1;
85
+ typedef struct CUDA_MEMCPY3D_v1_st { int dummy; } CUDA_MEMCPY3D_v1;
86
+ typedef struct CUDA_ARRAY_DESCRIPTOR_v1_st { int dummy; } CUDA_ARRAY_DESCRIPTOR_v1;
87
+ typedef struct CUDA_ARRAY3D_DESCRIPTOR_v1_st { int dummy; } CUDA_ARRAY3D_DESCRIPTOR_v1;
88
+
89
+ /* Function parameter structures */
90
+ #include <generated_cuda_runtime_api_meta.h>
91
+ #include <generated_cuda_meta.h>
92
+
93
+ /* The following parameter structures cannot be included unless a
94
+ header that defines GL_VERSION is included before including them.
95
+ If these are needed then make sure such a header is included
96
+ already. */
97
+ #ifdef GL_VERSION
98
+ #include <generated_cuda_gl_interop_meta.h>
99
+ #include <generated_cudaGL_meta.h>
100
+ #endif
101
+
102
+ //#include <generated_nvtx_meta.h>
103
+
104
+ /* The following parameter structures cannot be included by default as
105
+ they are not guaranteed to be available on all systems. Uncomment
106
+ the includes that are available, or use the include explicitly. */
107
+ #if defined(__linux__)
108
+ //#include <generated_cuda_vdpau_interop_meta.h>
109
+ //#include <generated_cudaVDPAU_meta.h>
110
+ #endif
111
+
112
+ #ifdef _WIN32
113
+ //#include <generated_cuda_d3d9_interop_meta.h>
114
+ //#include <generated_cuda_d3d10_interop_meta.h>
115
+ //#include <generated_cuda_d3d11_interop_meta.h>
116
+ //#include <generated_cudaD3D9_meta.h>
117
+ //#include <generated_cudaD3D10_meta.h>
118
+ //#include <generated_cudaD3D11_meta.h>
119
+ #endif
120
+
121
+ #endif /*_CUPTI_H_*/
122
+
123
+
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_activity.h ADDED
The diff for this file is too large to render. See raw diff
 
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_activity_deprecated.h ADDED
The diff for this file is too large to render. See raw diff
 
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_callbacks.h ADDED
@@ -0,0 +1,864 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUPTI_CALLBACKS_H__)
51
+ #define __CUPTI_CALLBACKS_H__
52
+
53
+ #include <cuda.h>
54
+ #include <builtin_types.h>
55
+ #include <string.h>
56
+ #include <cuda_stdint.h>
57
+ #include <cupti_result.h>
58
+
59
+ #ifndef CUPTIAPI
60
+ #ifdef _WIN32
61
+ #define CUPTIAPI __stdcall
62
+ #else
63
+ #define CUPTIAPI
64
+ #endif
65
+ #endif
66
+
67
+ #if defined(__cplusplus)
68
+ extern "C" {
69
+ #endif
70
+
71
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
72
+ #pragma GCC visibility push(default)
73
+ #endif
74
+
75
+ /**
76
+ * \defgroup CUPTI_CALLBACK_API CUPTI Callback API
77
+ * Functions, types, and enums that implement the CUPTI Callback API.
78
+ * @{
79
+ */
80
+
81
+ /**
82
+ * \brief Specifies the point in an API call that a callback is issued.
83
+ *
84
+ * Specifies the point in an API call that a callback is issued. This
85
+ * value is communicated to the callback function via \ref
86
+ * CUpti_CallbackData::callbackSite.
87
+ */
88
+ typedef enum {
89
+ /**
90
+ * The callback is at the entry of the API call.
91
+ */
92
+ CUPTI_API_ENTER = 0,
93
+ /**
94
+ * The callback is at the exit of the API call.
95
+ */
96
+ CUPTI_API_EXIT = 1,
97
+ CUPTI_API_CBSITE_FORCE_INT = 0x7fffffff
98
+ } CUpti_ApiCallbackSite;
99
+
100
+ /**
101
+ * \brief Callback domains.
102
+ *
103
+ * Callback domains. Each domain represents callback points for a
104
+ * group of related API functions or CUDA driver activity.
105
+ */
106
+ typedef enum {
107
+ /**
108
+ * Invalid domain.
109
+ */
110
+ CUPTI_CB_DOMAIN_INVALID = 0,
111
+ /**
112
+ * Domain containing callback points for all driver API functions.
113
+ */
114
+ CUPTI_CB_DOMAIN_DRIVER_API = 1,
115
+ /**
116
+ * Domain containing callback points for all runtime API
117
+ * functions.
118
+ */
119
+ CUPTI_CB_DOMAIN_RUNTIME_API = 2,
120
+ /**
121
+ * Domain containing callback points for CUDA resource tracking.
122
+ */
123
+ CUPTI_CB_DOMAIN_RESOURCE = 3,
124
+ /**
125
+ * Domain containing callback points for CUDA synchronization.
126
+ */
127
+ CUPTI_CB_DOMAIN_SYNCHRONIZE = 4,
128
+ /**
129
+ * Domain containing callback points for NVTX API functions.
130
+ */
131
+ CUPTI_CB_DOMAIN_NVTX = 5,
132
+ /**
133
+ * Domain containing callback points for various states.
134
+ */
135
+ CUPTI_CB_DOMAIN_STATE = 6,
136
+
137
+ CUPTI_CB_DOMAIN_SIZE,
138
+
139
+ CUPTI_CB_DOMAIN_FORCE_INT = 0x7fffffff
140
+ } CUpti_CallbackDomain;
141
+
142
+ /**
143
+ * \brief Callback IDs for resource domain.
144
+ *
145
+ * Callback IDs for resource domain, CUPTI_CB_DOMAIN_RESOURCE. This
146
+ * value is communicated to the callback function via the \p cbid
147
+ * parameter.
148
+ */
149
+ typedef enum {
150
+ /**
151
+ * Invalid resource callback ID.
152
+ */
153
+ CUPTI_CBID_RESOURCE_INVALID = 0,
154
+ /**
155
+ * A new context has been created.
156
+ */
157
+ CUPTI_CBID_RESOURCE_CONTEXT_CREATED = 1,
158
+ /**
159
+ * A context is about to be destroyed.
160
+ */
161
+ CUPTI_CBID_RESOURCE_CONTEXT_DESTROY_STARTING = 2,
162
+ /**
163
+ * A new stream has been created.
164
+ */
165
+ CUPTI_CBID_RESOURCE_STREAM_CREATED = 3,
166
+ /**
167
+ * A stream is about to be destroyed.
168
+ */
169
+ CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING = 4,
170
+ /**
171
+ * The driver has finished initializing.
172
+ */
173
+ CUPTI_CBID_RESOURCE_CU_INIT_FINISHED = 5,
174
+ /**
175
+ * A module has been loaded.
176
+ */
177
+ CUPTI_CBID_RESOURCE_MODULE_LOADED = 6,
178
+ /**
179
+ * A module is about to be unloaded.
180
+ */
181
+ CUPTI_CBID_RESOURCE_MODULE_UNLOAD_STARTING = 7,
182
+ /**
183
+ * The current module which is being profiled.
184
+ */
185
+ CUPTI_CBID_RESOURCE_MODULE_PROFILED = 8,
186
+ /**
187
+ * CUDA graph has been created.
188
+ */
189
+ CUPTI_CBID_RESOURCE_GRAPH_CREATED = 9,
190
+ /**
191
+ * CUDA graph is about to be destroyed.
192
+ */
193
+ CUPTI_CBID_RESOURCE_GRAPH_DESTROY_STARTING = 10,
194
+ /**
195
+ * CUDA graph is cloned.
196
+ */
197
+ CUPTI_CBID_RESOURCE_GRAPH_CLONED = 11,
198
+ /**
199
+ * CUDA graph node is about to be created
200
+ */
201
+ CUPTI_CBID_RESOURCE_GRAPHNODE_CREATE_STARTING = 12,
202
+ /**
203
+ * CUDA graph node is created.
204
+ */
205
+ CUPTI_CBID_RESOURCE_GRAPHNODE_CREATED = 13,
206
+ /**
207
+ * CUDA graph node is about to be destroyed.
208
+ */
209
+ CUPTI_CBID_RESOURCE_GRAPHNODE_DESTROY_STARTING = 14,
210
+ /**
211
+ * Dependency on a CUDA graph node is created.
212
+ */
213
+ CUPTI_CBID_RESOURCE_GRAPHNODE_DEPENDENCY_CREATED = 15,
214
+ /**
215
+ * Dependency on a CUDA graph node is destroyed.
216
+ */
217
+ CUPTI_CBID_RESOURCE_GRAPHNODE_DEPENDENCY_DESTROY_STARTING = 16,
218
+ /**
219
+ * An executable CUDA graph is about to be created.
220
+ */
221
+ CUPTI_CBID_RESOURCE_GRAPHEXEC_CREATE_STARTING = 17,
222
+ /**
223
+ * An executable CUDA graph is created.
224
+ */
225
+ CUPTI_CBID_RESOURCE_GRAPHEXEC_CREATED = 18,
226
+ /**
227
+ * An executable CUDA graph is about to be destroyed.
228
+ */
229
+ CUPTI_CBID_RESOURCE_GRAPHEXEC_DESTROY_STARTING = 19,
230
+ /**
231
+ * CUDA graph node is cloned.
232
+ */
233
+ CUPTI_CBID_RESOURCE_GRAPHNODE_CLONED = 20,
234
+ /**
235
+ * CUDA stream attribute is changed.
236
+ */
237
+ CUPTI_CBID_RESOURCE_STREAM_ATTRIBUTE_CHANGED = 21,
238
+
239
+ CUPTI_CBID_RESOURCE_SIZE,
240
+ CUPTI_CBID_RESOURCE_FORCE_INT = 0x7fffffff
241
+ } CUpti_CallbackIdResource;
242
+
243
+ /**
244
+ * \brief Callback IDs for synchronization domain.
245
+ *
246
+ * Callback IDs for synchronization domain,
247
+ * CUPTI_CB_DOMAIN_SYNCHRONIZE. This value is communicated to the
248
+ * callback function via the \p cbid parameter.
249
+ */
250
+ typedef enum {
251
+ /**
252
+ * Invalid synchronize callback ID.
253
+ */
254
+ CUPTI_CBID_SYNCHRONIZE_INVALID = 0,
255
+ /**
256
+ * Stream synchronization has completed for the stream.
257
+ */
258
+ CUPTI_CBID_SYNCHRONIZE_STREAM_SYNCHRONIZED = 1,
259
+ /**
260
+ * Context synchronization has completed for the context.
261
+ */
262
+ CUPTI_CBID_SYNCHRONIZE_CONTEXT_SYNCHRONIZED = 2,
263
+ CUPTI_CBID_SYNCHRONIZE_SIZE,
264
+ CUPTI_CBID_SYNCHRONIZE_FORCE_INT = 0x7fffffff
265
+ } CUpti_CallbackIdSync;
266
+
267
+
268
+ /**
269
+ * \brief Callback IDs for state domain.
270
+ *
271
+ * Callback IDs for state domain,
272
+ * CUPTI_CB_DOMAIN_STATE. This value is communicated to the
273
+ * callback function via the \p cbid parameter.
274
+ */
275
+ typedef enum {
276
+ /**
277
+ * Invalid state callback ID.
278
+ */
279
+ CUPTI_CBID_STATE_INVALID = 0,
280
+ /**
281
+ * Notification of fatal errors - high impact, non-recoverable
282
+ * When encountered, CUPTI automatically invokes cuptiFinalize()
283
+ * User can control behavior of the application in future from
284
+ * receiving this callback - such as continuing without profiling, or
285
+ * terminating the whole application.
286
+ */
287
+ CUPTI_CBID_STATE_FATAL_ERROR = 1,
288
+ /**
289
+ * Notification of non fatal errors - high impact, but recoverable
290
+ * This notification is not issued in the current release.
291
+ */
292
+ CUPTI_CBID_STATE_ERROR = 2,
293
+ /**
294
+ * Notification of warnings - low impact, recoverable
295
+ * This notification is not issued in the current release.
296
+ */
297
+ CUPTI_CBID_STATE_WARNING = 3,
298
+
299
+ CUPTI_CBID_STATE_SIZE,
300
+ CUPTI_CBID_STATE_FORCE_INT = 0x7fffffff
301
+ } CUpti_CallbackIdState;
302
+
303
+
304
+ /**
305
+ * \brief Data passed into a runtime or driver API callback function.
306
+ *
307
+ * Data passed into a runtime or driver API callback function as the
308
+ * \p cbdata argument to \ref CUpti_CallbackFunc. The \p cbdata will
309
+ * be this type for \p domain equal to CUPTI_CB_DOMAIN_DRIVER_API or
310
+ * CUPTI_CB_DOMAIN_RUNTIME_API. The callback data is valid only within
311
+ * the invocation of the callback function that is passed the data. If
312
+ * you need to retain some data for use outside of the callback, you
313
+ * must make a copy of that data. For example, if you make a shallow
314
+ * copy of CUpti_CallbackData within a callback, you cannot
315
+ * dereference \p functionParams outside of that callback to access
316
+ * the function parameters. \p functionName is an exception: the
317
+ * string pointed to by \p functionName is a global constant and so
318
+ * may be accessed outside of the callback.
319
+ */
320
+ typedef struct {
321
+ /**
322
+ * Point in the runtime or driver function from where the callback
323
+ * was issued.
324
+ */
325
+ CUpti_ApiCallbackSite callbackSite;
326
+
327
+ /**
328
+ * Name of the runtime or driver API function which issued the
329
+ * callback. This string is a global constant and so may be
330
+ * accessed outside of the callback.
331
+ */
332
+ const char *functionName;
333
+
334
+ /**
335
+ * Pointer to the arguments passed to the runtime or driver API
336
+ * call. See generated_cuda_runtime_api_meta.h and
337
+ * generated_cuda_meta.h for structure definitions for the
338
+ * parameters for each runtime and driver API function.
339
+ */
340
+ const void *functionParams;
341
+
342
+ /**
343
+ * Pointer to the return value of the runtime or driver API
344
+ * call. This field is only valid within the exit::CUPTI_API_EXIT
345
+ * callback. For a runtime API \p functionReturnValue points to a
346
+ * \p cudaError_t. For a driver API \p functionReturnValue points
347
+ * to a \p CUresult.
348
+ */
349
+ void *functionReturnValue;
350
+
351
+ /**
352
+ * Name of the symbol operated on by the runtime or driver API
353
+ * function which issued the callback. This entry is valid only for
354
+ * driver and runtime launch callbacks, where it returns the name of
355
+ * the kernel.
356
+ */
357
+ const char *symbolName;
358
+
359
+ /**
360
+ * Driver context current to the thread, or null if no context is
361
+ * current. This value can change from the entry to exit callback
362
+ * of a runtime API function if the runtime initializes a context.
363
+ */
364
+ CUcontext context;
365
+
366
+ /**
367
+ * Unique ID for the CUDA context associated with the thread. The
368
+ * UIDs are assigned sequentially as contexts are created and are
369
+ * unique within a process.
370
+ */
371
+ uint32_t contextUid;
372
+
373
+ /**
374
+ * Pointer to data shared between the entry and exit callbacks of
375
+ * a given runtime or drive API function invocation. This field
376
+ * can be used to pass 64-bit values from the entry callback to
377
+ * the corresponding exit callback.
378
+ */
379
+ uint64_t *correlationData;
380
+
381
+ /**
382
+ * The activity record correlation ID for this callback. For a
383
+ * driver domain callback (i.e. \p domain
384
+ * CUPTI_CB_DOMAIN_DRIVER_API) this ID will equal the correlation ID
385
+ * in the CUpti_ActivityAPI record corresponding to the CUDA driver
386
+ * function call. For a runtime domain callback (i.e. \p domain
387
+ * CUPTI_CB_DOMAIN_RUNTIME_API) this ID will equal the correlation
388
+ * ID in the CUpti_ActivityAPI record corresponding to the CUDA
389
+ * runtime function call. Within the callback, this ID can be
390
+ * recorded to correlate user data with the activity record. This
391
+ * field is new in 4.1.
392
+ */
393
+ uint32_t correlationId;
394
+
395
+ } CUpti_CallbackData;
396
+
397
+ /**
398
+ * \brief Data passed into a resource callback function.
399
+ *
400
+ * Data passed into a resource callback function as the \p cbdata
401
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
402
+ * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The callback
403
+ * data is valid only within the invocation of the callback function
404
+ * that is passed the data. If you need to retain some data for use
405
+ * outside of the callback, you must make a copy of that data.
406
+ */
407
+ typedef struct {
408
+ /**
409
+ * For CUPTI_CBID_RESOURCE_CONTEXT_CREATED and
410
+ * CUPTI_CBID_RESOURCE_CONTEXT_DESTROY_STARTING, the context being
411
+ * created or destroyed. For CUPTI_CBID_RESOURCE_STREAM_CREATED and
412
+ * CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING, the context
413
+ * containing the stream being created or destroyed.
414
+ */
415
+ CUcontext context;
416
+
417
+ union {
418
+ /**
419
+ * For CUPTI_CBID_RESOURCE_STREAM_CREATED and
420
+ * CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING, the stream being
421
+ * created or destroyed.
422
+ */
423
+ CUstream stream;
424
+ } resourceHandle;
425
+
426
+ /**
427
+ * Reserved for future use.
428
+ */
429
+ void *resourceDescriptor;
430
+ } CUpti_ResourceData;
431
+
432
+
433
+ /**
434
+ * \brief Module data passed into a resource callback function.
435
+ *
436
+ * CUDA module data passed into a resource callback function as the \p cbdata
437
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
438
+ * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The module
439
+ * data is valid only within the invocation of the callback function
440
+ * that is passed the data. If you need to retain some data for use
441
+ * outside of the callback, you must make a copy of that data.
442
+ */
443
+
444
+ typedef struct {
445
+ /**
446
+ * Identifier to associate with the CUDA module.
447
+ */
448
+ uint32_t moduleId;
449
+
450
+ /**
451
+ * The size of the cubin.
452
+ */
453
+ size_t cubinSize;
454
+
455
+ /**
456
+ * Pointer to the associated cubin.
457
+ */
458
+ const char *pCubin;
459
+ } CUpti_ModuleResourceData;
460
+
461
+ /**
462
+ * \brief CUDA graphs data passed into a resource callback function.
463
+ *
464
+ * CUDA graphs data passed into a resource callback function as the \p cbdata
465
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
466
+ * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The graph
467
+ * data is valid only within the invocation of the callback function
468
+ * that is passed the data. If you need to retain some data for use
469
+ * outside of the callback, you must make a copy of that data.
470
+ */
471
+
472
+ typedef struct {
473
+ /**
474
+ * CUDA graph
475
+ */
476
+ CUgraph graph;
477
+ /**
478
+ * The original CUDA graph from which \param graph is cloned
479
+ */
480
+ CUgraph originalGraph;
481
+ /**
482
+ * CUDA graph node
483
+ */
484
+ CUgraphNode node;
485
+ /**
486
+ * The original CUDA graph node from which \param node is cloned
487
+ */
488
+ CUgraphNode originalNode;
489
+ /**
490
+ * Type of the \param node
491
+ */
492
+ CUgraphNodeType nodeType;
493
+ /**
494
+ * The dependent graph node
495
+ * The size of the array is \param numDependencies.
496
+ */
497
+ CUgraphNode dependency;
498
+ /**
499
+ * CUDA executable graph
500
+ */
501
+ CUgraphExec graphExec;
502
+ } CUpti_GraphData;
503
+
504
+ /**
505
+ * \brief Data passed into a synchronize callback function.
506
+ *
507
+ * Data passed into a synchronize callback function as the \p cbdata
508
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
509
+ * type for \p domain equal to CUPTI_CB_DOMAIN_SYNCHRONIZE. The
510
+ * callback data is valid only within the invocation of the callback
511
+ * function that is passed the data. If you need to retain some data
512
+ * for use outside of the callback, you must make a copy of that data.
513
+ */
514
+ typedef struct {
515
+ /**
516
+ * The context of the stream being synchronized.
517
+ */
518
+ CUcontext context;
519
+ /**
520
+ * The stream being synchronized.
521
+ */
522
+ CUstream stream;
523
+ } CUpti_SynchronizeData;
524
+
525
+ /**
526
+ * \brief Data passed into a NVTX callback function.
527
+ *
528
+ * Data passed into a NVTX callback function as the \p cbdata argument
529
+ * to \ref CUpti_CallbackFunc. The \p cbdata will be this type for \p
530
+ * domain equal to CUPTI_CB_DOMAIN_NVTX. Unless otherwise notes, the
531
+ * callback data is valid only within the invocation of the callback
532
+ * function that is passed the data. If you need to retain some data
533
+ * for use outside of the callback, you must make a copy of that data.
534
+ */
535
+ typedef struct {
536
+ /**
537
+ * Name of the NVTX API function which issued the callback. This
538
+ * string is a global constant and so may be accessed outside of the
539
+ * callback.
540
+ */
541
+ const char *functionName;
542
+
543
+ /**
544
+ * Pointer to the arguments passed to the NVTX API call. See
545
+ * generated_nvtx_meta.h for structure definitions for the
546
+ * parameters for each NVTX API function.
547
+ */
548
+ const void *functionParams;
549
+
550
+ /**
551
+ * Pointer to the return value of the NVTX API call. See
552
+ * nvToolsExt.h for each NVTX API function's return value.
553
+ */
554
+ const void *functionReturnValue;
555
+ } CUpti_NvtxData;
556
+
557
+ /**
558
+ * \brief Stream attribute data passed into a resource callback function
559
+ * for CUPTI_CBID_RESOURCE_STREAM_ATTRIBUTE_CHANGED callback
560
+
561
+ * Data passed into a resource callback function as the \p cbdata
562
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
563
+ * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The
564
+ * stream attribute data is valid only within the invocation of the callback
565
+ * function that is passed the data. If you need to retain some data
566
+ * for use outside of the callback, you must make a copy of that data.
567
+ */
568
+ typedef struct {
569
+ /**
570
+ * The CUDA stream handle for the attribute
571
+ */
572
+ CUstream stream;
573
+
574
+ /**
575
+ * The type of the CUDA stream attribute
576
+ */
577
+ CUstreamAttrID attr;
578
+
579
+ /**
580
+ * The value of the CUDA stream attribute
581
+ */
582
+ const CUstreamAttrValue *value;
583
+ } CUpti_StreamAttrData;
584
+
585
+ /**
586
+ * \brief Data passed into a State callback function.
587
+ *
588
+ * Data passed into a State callback function as the \p cbdata argument
589
+ * to \ref CUpti_CallbackFunc. The \p cbdata will be this type for \p
590
+ * domain equal to CUPTI_CB_DOMAIN_STATE and callback Ids belonging to CUpti_CallbackIdState.
591
+ * Unless otherwise noted, the callback data is valid only within the invocation of the callback
592
+ * function that is passed the data. If you need to retain some data
593
+ * for use outside of the callback, you must make a copy of that data.
594
+ */
595
+ typedef struct {
596
+ union {
597
+ /**
598
+ * Data passed along with the callback Ids
599
+ * Enum CUpti_CallbackIdState used to denote callback ids
600
+ */
601
+ struct {
602
+ /**
603
+ * Error code
604
+ */
605
+ CUptiResult result;
606
+ /**
607
+ * String containing more details. It can be NULL.
608
+ */
609
+ const char *message;
610
+ } notification;
611
+ };
612
+ } CUpti_StateData;
613
+
614
+ /**
615
+ * \brief An ID for a driver API, runtime API, resource or
616
+ * synchronization callback.
617
+ *
618
+ * An ID for a driver API, runtime API, resource or synchronization
619
+ * callback. Within a driver API callback this should be interpreted
620
+ * as a CUpti_driver_api_trace_cbid value (these values are defined in
621
+ * cupti_driver_cbid.h). Within a runtime API callback this should be
622
+ * interpreted as a CUpti_runtime_api_trace_cbid value (these values
623
+ * are defined in cupti_runtime_cbid.h). Within a resource API
624
+ * callback this should be interpreted as a \ref
625
+ * CUpti_CallbackIdResource value. Within a synchronize API callback
626
+ * this should be interpreted as a \ref CUpti_CallbackIdSync value.
627
+ */
628
+ typedef uint32_t CUpti_CallbackId;
629
+
630
+ /**
631
+ * \brief Function type for a callback.
632
+ *
633
+ * Function type for a callback. The type of the data passed to the
634
+ * callback in \p cbdata depends on the \p domain. If \p domain is
635
+ * CUPTI_CB_DOMAIN_DRIVER_API or CUPTI_CB_DOMAIN_RUNTIME_API the type
636
+ * of \p cbdata will be CUpti_CallbackData. If \p domain is
637
+ * CUPTI_CB_DOMAIN_RESOURCE the type of \p cbdata will be
638
+ * CUpti_ResourceData. If \p domain is CUPTI_CB_DOMAIN_SYNCHRONIZE the
639
+ * type of \p cbdata will be CUpti_SynchronizeData. If \p domain is
640
+ * CUPTI_CB_DOMAIN_NVTX the type of \p cbdata will be CUpti_NvtxData.
641
+ *
642
+ * \param userdata User data supplied at subscription of the callback
643
+ * \param domain The domain of the callback
644
+ * \param cbid The ID of the callback
645
+ * \param cbdata Data passed to the callback.
646
+ */
647
+ typedef void (CUPTIAPI *CUpti_CallbackFunc)(
648
+ void *userdata,
649
+ CUpti_CallbackDomain domain,
650
+ CUpti_CallbackId cbid,
651
+ const void *cbdata);
652
+
653
+ /**
654
+ * \brief A callback subscriber.
655
+ */
656
+ typedef struct CUpti_Subscriber_st *CUpti_SubscriberHandle;
657
+
658
+ /**
659
+ * \brief Pointer to an array of callback domains.
660
+ */
661
+ typedef CUpti_CallbackDomain *CUpti_DomainTable;
662
+
663
+ /**
664
+ * \brief Get the available callback domains.
665
+ *
666
+ * Returns in \p *domainTable an array of size \p *domainCount of all
667
+ * the available callback domains.
668
+ * \note \b Thread-safety: this function is thread safe.
669
+ *
670
+ * \param domainCount Returns number of callback domains
671
+ * \param domainTable Returns pointer to array of available callback domains
672
+ *
673
+ * \retval CUPTI_SUCCESS on success
674
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialize CUPTI
675
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p domainCount or \p domainTable are NULL
676
+ */
677
+ CUptiResult CUPTIAPI cuptiSupportedDomains(size_t *domainCount,
678
+ CUpti_DomainTable *domainTable);
679
+
680
+ /**
681
+ * \brief Initialize a callback subscriber with a callback function
682
+ * and user data.
683
+ *
684
+ * Initializes a callback subscriber with a callback function and
685
+ * (optionally) a pointer to user data. The returned subscriber handle
686
+ * can be used to enable and disable the callback for specific domains
687
+ * and callback IDs.
688
+ * \note Only a single subscriber can be registered at a time. To ensure
689
+ * that no other CUPTI client interrupts the profiling session, it's the
690
+ * responsibility of all the CUPTI clients to call this function before
691
+ * starting the profling session. In case profiling session is already
692
+ * started by another CUPTI client, this function returns the error code
693
+ * CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED.
694
+ * Note that this function returns the same error when application is
695
+ * launched using NVIDIA tools like nvprof, Visual Profiler, Nsight Systems,
696
+ * Nsight Compute, cuda-gdb and cuda-memcheck.
697
+ * \note This function does not enable any callbacks.
698
+ * \note \b Thread-safety: this function is thread safe.
699
+ *
700
+ * \param subscriber Returns handle to initialize subscriber
701
+ * \param callback The callback function
702
+ * \param userdata A pointer to user data. This data will be passed to
703
+ * the callback function via the \p userdata parameter.
704
+ *
705
+ * \retval CUPTI_SUCCESS on success
706
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialize CUPTI
707
+ * \retval CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED if there is already a CUPTI subscriber
708
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber is NULL
709
+ */
710
+ CUptiResult CUPTIAPI cuptiSubscribe(CUpti_SubscriberHandle *subscriber,
711
+ CUpti_CallbackFunc callback,
712
+ void *userdata);
713
+
714
+ /**
715
+ * \brief Unregister a callback subscriber.
716
+ *
717
+ * Removes a callback subscriber so that no future callbacks will be
718
+ * issued to that subscriber.
719
+ * \note \b Thread-safety: this function is thread safe.
720
+ *
721
+ * \param subscriber Handle to the initialize subscriber
722
+ *
723
+ * \retval CUPTI_SUCCESS on success
724
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
725
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber is NULL or not initialized
726
+ */
727
+ CUptiResult CUPTIAPI cuptiUnsubscribe(CUpti_SubscriberHandle subscriber);
728
+
729
+ /**
730
+ * \brief Get the current enabled/disabled state of a callback for a specific
731
+ * domain and function ID.
732
+ *
733
+ * Returns non-zero in \p *enable if the callback for a domain and
734
+ * callback ID is enabled, and zero if not enabled.
735
+ *
736
+ * \note \b Thread-safety: a subscriber must serialize access to
737
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
738
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackState(sub,
739
+ * d, c) and cuptiEnableCallback(sub, d, c) are called concurrently,
740
+ * the results are undefined.
741
+ *
742
+ * \param enable Returns non-zero if callback enabled, zero if not enabled
743
+ * \param subscriber Handle to the initialize subscriber
744
+ * \param domain The domain of the callback
745
+ * \param cbid The ID of the callback
746
+ *
747
+ * \retval CUPTI_SUCCESS on success
748
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
749
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p enabled is NULL, or if \p
750
+ * subscriber, \p domain or \p cbid is invalid.
751
+ */
752
+ CUptiResult CUPTIAPI cuptiGetCallbackState(uint32_t *enable,
753
+ CUpti_SubscriberHandle subscriber,
754
+ CUpti_CallbackDomain domain,
755
+ CUpti_CallbackId cbid);
756
+
757
+ /**
758
+ * \brief Enable or disabled callbacks for a specific domain and
759
+ * callback ID.
760
+ *
761
+ * Enable or disabled callbacks for a subscriber for a specific domain
762
+ * and callback ID.
763
+ *
764
+ * \note \b Thread-safety: a subscriber must serialize access to
765
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
766
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackState(sub,
767
+ * d, c) and cuptiEnableCallback(sub, d, c) are called concurrently,
768
+ * the results are undefined.
769
+ *
770
+ * \param enable New enable state for the callback. Zero disables the
771
+ * callback, non-zero enables the callback.
772
+ * \param subscriber - Handle to callback subscription
773
+ * \param domain The domain of the callback
774
+ * \param cbid The ID of the callback
775
+ *
776
+ * \retval CUPTI_SUCCESS on success
777
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
778
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber, \p domain or \p
779
+ * cbid is invalid.
780
+ */
781
+ CUptiResult CUPTIAPI cuptiEnableCallback(uint32_t enable,
782
+ CUpti_SubscriberHandle subscriber,
783
+ CUpti_CallbackDomain domain,
784
+ CUpti_CallbackId cbid);
785
+
786
+ /**
787
+ * \brief Enable or disabled all callbacks for a specific domain.
788
+ *
789
+ * Enable or disabled all callbacks for a specific domain.
790
+ *
791
+ * \note \b Thread-safety: a subscriber must serialize access to
792
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
793
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackEnabled(sub,
794
+ * d, *) and cuptiEnableDomain(sub, d) are called concurrently, the
795
+ * results are undefined.
796
+ *
797
+ * \param enable New enable state for all callbacks in the
798
+ * domain. Zero disables all callbacks, non-zero enables all
799
+ * callbacks.
800
+ * \param subscriber - Handle to callback subscription
801
+ * \param domain The domain of the callback
802
+ *
803
+ * \retval CUPTI_SUCCESS on success
804
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
805
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber or \p domain is invalid
806
+ */
807
+ CUptiResult CUPTIAPI cuptiEnableDomain(uint32_t enable,
808
+ CUpti_SubscriberHandle subscriber,
809
+ CUpti_CallbackDomain domain);
810
+
811
+ /**
812
+ * \brief Enable or disable all callbacks in all domains.
813
+ *
814
+ * Enable or disable all callbacks in all domains.
815
+ *
816
+ * \note \b Thread-safety: a subscriber must serialize access to
817
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
818
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackState(sub,
819
+ * d, *) and cuptiEnableAllDomains(sub) are called concurrently, the
820
+ * results are undefined.
821
+ *
822
+ * \param enable New enable state for all callbacks in all
823
+ * domain. Zero disables all callbacks, non-zero enables all
824
+ * callbacks.
825
+ * \param subscriber - Handle to callback subscription
826
+ *
827
+ * \retval CUPTI_SUCCESS on success
828
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
829
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber is invalid
830
+ */
831
+ CUptiResult CUPTIAPI cuptiEnableAllDomains(uint32_t enable,
832
+ CUpti_SubscriberHandle subscriber);
833
+
834
+ /**
835
+ * \brief Get the name of a callback for a specific domain and callback ID.
836
+ *
837
+ * Returns a pointer to the name c_string in \p **name.
838
+ *
839
+ * \note \b Names are available only for the DRIVER and RUNTIME domains.
840
+ *
841
+ * \param domain The domain of the callback
842
+ * \param cbid The ID of the callback
843
+ * \param name Returns pointer to the name string on success, NULL otherwise
844
+ *
845
+ * \retval CUPTI_SUCCESS on success
846
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p name is NULL, or if
847
+ * \p domain or \p cbid is invalid.
848
+ */
849
+ CUptiResult CUPTIAPI cuptiGetCallbackName(CUpti_CallbackDomain domain,
850
+ uint32_t cbid,
851
+ const char **name);
852
+
853
+ /** @} */ /* END CUPTI_CALLBACK_API */
854
+
855
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
856
+ #pragma GCC visibility pop
857
+ #endif
858
+
859
+ #if defined(__cplusplus)
860
+ }
861
+ #endif
862
+
863
+ #endif // file guard
864
+
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_checkpoint.h ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <cupti_result.h>
5
+
6
+ #include <stddef.h>
7
+ #include <stdint.h>
8
+
9
+ namespace NV { namespace Cupti { namespace Checkpoint {
10
+
11
+ #ifdef __cplusplus
12
+ extern "C"
13
+ {
14
+ #endif
15
+
16
+ /**
17
+ * \defgroup CUPTI_CHECKPOINT_API CUPTI Checkpoint API
18
+ * Functions, types, and enums that implement the CUPTI Checkpoint API.
19
+ * @{
20
+ */
21
+
22
+ /**
23
+ * \brief Specifies optimization options for a checkpoint, may be OR'd together to specify multiple options.
24
+ */
25
+ typedef enum
26
+ {
27
+ CUPTI_CHECKPOINT_OPT_NONE = 0, //!< Default behavior
28
+ CUPTI_CHECKPOINT_OPT_TRANSFER = 1, //!< Determine which mem blocks have changed, and only restore those. This optimization is cached, which means cuptiCheckpointRestore must always be called at the same point in the application when this option is enabled, or the result may be incorrect.
29
+ } CUpti_CheckpointOptimizations;
30
+
31
+ /**
32
+ * \brief Configuration and handle for a CUPTI Checkpoint
33
+ *
34
+ * A CUptiCheckpoint object should be initialized with desired options prior to passing into any
35
+ * CUPTI Checkpoint API function. The first call into a Checkpoint API function will initialize internal
36
+ * state based on these options. Subsequent changes to these options will not have any effect.
37
+ *
38
+ * Checkpoint data is saved in device, host, and filesystem space. There are options to reserve memory
39
+ * at each level (device, host, filesystem) which are intended to allow a guarantee that a certain amount
40
+ * of memory will remain free for use after the checkpoint is saved.
41
+ * Note, however, that falling back to slower levels of memory (host, and then filesystem) to save the checkpoint
42
+ * will result in performance degradation.
43
+ * Currently, the filesystem limitation is not implemented. Note that falling back to filesystem storage may
44
+ * significantly impact the performance for saving and restoring a checkpoint.
45
+ */
46
+ typedef struct
47
+ {
48
+ size_t structSize; //!< [in] Must be set to CUpti_Checkpoint_STRUCT_SIZE
49
+
50
+ CUcontext ctx; //!< [in] Set to context to save from, or will use current context if NULL
51
+
52
+ size_t reserveDeviceMB; //!< [in] Restrict checkpoint from using last N MB of device memory (-1 = use no device memory)
53
+ size_t reserveHostMB; //!< [in] Restrict checkpoint from using last N MB of host memory (-1 = use no host memory)
54
+ uint8_t allowOverwrite; //!< [in] Boolean, Allow checkpoint to save over existing checkpoint
55
+ uint8_t optimizations; //!< [in] Mask of CUpti_CheckpointOptimizations flags for this checkpoint
56
+
57
+ void * pPriv; //!< [in] Assign to NULL
58
+ } CUpti_Checkpoint;
59
+
60
+ #define CUpti_Checkpoint_STRUCT_SIZE \
61
+ (offsetof(CUpti_Checkpoint, pPriv) + \
62
+ sizeof(((CUpti_Checkpoint*)(nullptr))->pPriv))
63
+
64
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
65
+ #pragma GCC visibility push(default)
66
+ #endif
67
+
68
+ /**
69
+ * \brief Initialize and save a checkpoint of the device state associated with the handle context
70
+ *
71
+ * Uses the handle options to configure and save a checkpoint of the device state associated with the specified context.
72
+ *
73
+ * \param handle A pointer to a CUpti_Checkpoint object
74
+ *
75
+ * \retval CUPTI_SUCCESS if a checkpoint was successfully initialized and saved
76
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p handle does not appear to refer to a valid CUpti_Checkpoint
77
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
78
+ * \retval CUPTI_ERROR_INVALID_DEVICE if device associated with context is not compatible with checkpoint API
79
+ * \retval CUPTI_ERROR_INVALID_OPERATION if Save is requested over an existing checkpoint, but \p allowOverwrite was not originally specified
80
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY if as configured, not enough backing storage space to save the checkpoint
81
+ */
82
+ CUptiResult cuptiCheckpointSave(CUpti_Checkpoint * const handle);
83
+
84
+ /**
85
+ * \brief Restore a checkpoint to the device associated with its context
86
+ *
87
+ * Restores device, pinned, and allocated memory to the state when the checkpoint was saved
88
+ *
89
+ * \param handle A pointer to a previously saved CUpti_Checkpoint object
90
+ *
91
+ * \retval CUTPI_SUCCESS if the checkpoint was successfully restored
92
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if the checkpoint was not previously initialized
93
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
94
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if the handle appears invalid
95
+ * \retval CUPTI_ERROR_UNKNOWN if the restore or optimization operation fails
96
+ */
97
+ CUptiResult cuptiCheckpointRestore(CUpti_Checkpoint * const handle);
98
+
99
+ /**
100
+ * \brief Free the backing data for a checkpoint
101
+ *
102
+ * Frees all associated device, host memory and filesystem storage used for this context.
103
+ * After freeing a handle, it may be re-used as if it was new - options may be re-configured and will
104
+ * take effect on the next call to \p cuptiCheckpointSave.
105
+ *
106
+ * \param handle A pointer to a previously saved CUpti_Checkpoint object
107
+ *
108
+ * \retval CUPTI_SUCCESS if the handle was successfully freed
109
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if the handle was already freed or appears invalid
110
+ * \retval CUPTI_ERROR_INVALID_CONTEXT if the context is no longer valid
111
+ */
112
+ CUptiResult cuptiCheckpointFree(CUpti_Checkpoint * const handle);
113
+
114
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
115
+ #pragma GCC visibility pop
116
+ #endif
117
+
118
+ /**
119
+ * @}
120
+ */
121
+
122
+ #ifdef __cplusplus
123
+ }
124
+ #endif
125
+
126
+ // Exit namespace NV::Cupti::Checkpoint
127
+ }}}
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_common.h ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUPTI_COMMON_H__)
51
+ #define __CUPTI_COMMON_H__
52
+
53
+ #ifndef CUPTIAPI
54
+ #ifdef _WIN32
55
+ #define CUPTIAPI __stdcall
56
+ #else
57
+ #define CUPTIAPI
58
+ #endif
59
+ #endif
60
+
61
+ #ifndef CUPTIUTILAPI
62
+ #ifdef _WIN32
63
+ #define CUPTIUTILAPI __stdcall
64
+ #else
65
+ #define CUPTIUTILAPI
66
+ #endif
67
+ #endif
68
+
69
+ #if defined(__LP64__)
70
+ #define CUPTILP64 1
71
+ #elif defined(_WIN64)
72
+ #define CUPTILP64 1
73
+ #else
74
+ #undef CUPTILP64
75
+ #endif
76
+
77
+ #define ACTIVITY_RECORD_ALIGNMENT 8
78
+ #if defined(_WIN32) // Windows 32- and 64-bit
79
+ #define START_PACKED_ALIGNMENT __pragma(pack(push,1)) // exact fit - no padding
80
+ #define PACKED_ALIGNMENT __declspec(align(ACTIVITY_RECORD_ALIGNMENT))
81
+ #define END_PACKED_ALIGNMENT __pragma(pack(pop))
82
+ #elif defined(__GNUC__) // GCC
83
+ #define START_PACKED_ALIGNMENT
84
+ #define PACKED_ALIGNMENT __attribute__ ((__packed__)) __attribute__ ((aligned (ACTIVITY_RECORD_ALIGNMENT)))
85
+ #define END_PACKED_ALIGNMENT
86
+ #else // all other compilers
87
+ #define START_PACKED_ALIGNMENT
88
+ #define PACKED_ALIGNMENT
89
+ #define END_PACKED_ALIGNMENT
90
+ #endif
91
+
92
+ #endif /*__CUPTI_COMMON_H__*/
93
+
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_driver_cbid.h ADDED
@@ -0,0 +1,780 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ // *************************************************************************
3
+ // Definitions of indices for API functions, unique across entire API
4
+ // *************************************************************************
5
+
6
+ // This file is generated. Any changes you make will be lost during the next clean build.
7
+ // CUDA public interface, for type definitions and cu* function prototypes
8
+
9
+ #if !defined(_CUPTI_DRIVER_CBID_H_)
10
+ #define _CUPTI_DRIVER_CBID_H_
11
+
12
+ typedef enum CUpti_driver_api_trace_cbid_enum {
13
+ CUPTI_DRIVER_TRACE_CBID_INVALID = 0,
14
+ CUPTI_DRIVER_TRACE_CBID_cuInit = 1,
15
+ CUPTI_DRIVER_TRACE_CBID_cuDriverGetVersion = 2,
16
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGet = 3,
17
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetCount = 4,
18
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetName = 5,
19
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceComputeCapability = 6,
20
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem = 7,
21
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetProperties = 8,
22
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetAttribute = 9,
23
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate = 10,
24
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy = 11,
25
+ CUPTI_DRIVER_TRACE_CBID_cuCtxAttach = 12,
26
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDetach = 13,
27
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent = 14,
28
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent = 15,
29
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetDevice = 16,
30
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSynchronize = 17,
31
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoad = 18,
32
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoadData = 19,
33
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoadDataEx = 20,
34
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoadFatBinary = 21,
35
+ CUPTI_DRIVER_TRACE_CBID_cuModuleUnload = 22,
36
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetFunction = 23,
37
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal = 24,
38
+ CUPTI_DRIVER_TRACE_CBID_cu64ModuleGetGlobal = 25,
39
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetTexRef = 26,
40
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo = 27,
41
+ CUPTI_DRIVER_TRACE_CBID_cu64MemGetInfo = 28,
42
+ CUPTI_DRIVER_TRACE_CBID_cuMemAlloc = 29,
43
+ CUPTI_DRIVER_TRACE_CBID_cu64MemAlloc = 30,
44
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch = 31,
45
+ CUPTI_DRIVER_TRACE_CBID_cu64MemAllocPitch = 32,
46
+ CUPTI_DRIVER_TRACE_CBID_cuMemFree = 33,
47
+ CUPTI_DRIVER_TRACE_CBID_cu64MemFree = 34,
48
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange = 35,
49
+ CUPTI_DRIVER_TRACE_CBID_cu64MemGetAddressRange = 36,
50
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost = 37,
51
+ CUPTI_DRIVER_TRACE_CBID_cuMemFreeHost = 38,
52
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc = 39,
53
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer = 40,
54
+ CUPTI_DRIVER_TRACE_CBID_cu64MemHostGetDevicePointer = 41,
55
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostGetFlags = 42,
56
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD = 43,
57
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoD = 44,
58
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH = 45,
59
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoH = 46,
60
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD = 47,
61
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoD = 48,
62
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA = 49,
63
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoA = 50,
64
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD = 51,
65
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyAtoD = 52,
66
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA = 53,
67
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH = 54,
68
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA = 55,
69
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D = 56,
70
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned = 57,
71
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D = 58,
72
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3D = 59,
73
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync = 60,
74
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoDAsync = 61,
75
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync = 62,
76
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoHAsync = 63,
77
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync = 64,
78
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoDAsync = 65,
79
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync = 66,
80
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync = 67,
81
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync = 68,
82
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync = 69,
83
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3DAsync = 70,
84
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8 = 71,
85
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8 = 72,
86
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16 = 73,
87
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16 = 74,
88
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32 = 75,
89
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32 = 76,
90
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8 = 77,
91
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8 = 78,
92
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16 = 79,
93
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16 = 80,
94
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32 = 81,
95
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32 = 82,
96
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetBlockShape = 83,
97
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedSize = 84,
98
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetAttribute = 85,
99
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetCacheConfig = 86,
100
+ CUPTI_DRIVER_TRACE_CBID_cuArrayCreate = 87,
101
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor = 88,
102
+ CUPTI_DRIVER_TRACE_CBID_cuArrayDestroy = 89,
103
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate = 90,
104
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor = 91,
105
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefCreate = 92,
106
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefDestroy = 93,
107
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetArray = 94,
108
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress = 95,
109
+ CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress = 96,
110
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D = 97,
111
+ CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress2D = 98,
112
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFormat = 99,
113
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddressMode = 100,
114
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFilterMode = 101,
115
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFlags = 102,
116
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress = 103,
117
+ CUPTI_DRIVER_TRACE_CBID_cu64TexRefGetAddress = 104,
118
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetArray = 105,
119
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddressMode = 106,
120
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFilterMode = 107,
121
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFormat = 108,
122
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFlags = 109,
123
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetSize = 110,
124
+ CUPTI_DRIVER_TRACE_CBID_cuParamSeti = 111,
125
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetf = 112,
126
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetv = 113,
127
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetTexRef = 114,
128
+ CUPTI_DRIVER_TRACE_CBID_cuLaunch = 115,
129
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchGrid = 116,
130
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchGridAsync = 117,
131
+ CUPTI_DRIVER_TRACE_CBID_cuEventCreate = 118,
132
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecord = 119,
133
+ CUPTI_DRIVER_TRACE_CBID_cuEventQuery = 120,
134
+ CUPTI_DRIVER_TRACE_CBID_cuEventSynchronize = 121,
135
+ CUPTI_DRIVER_TRACE_CBID_cuEventDestroy = 122,
136
+ CUPTI_DRIVER_TRACE_CBID_cuEventElapsedTime = 123,
137
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCreate = 124,
138
+ CUPTI_DRIVER_TRACE_CBID_cuStreamQuery = 125,
139
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize = 126,
140
+ CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy = 127,
141
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnregisterResource = 128,
142
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsSubResourceGetMappedArray = 129,
143
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer = 130,
144
+ CUPTI_DRIVER_TRACE_CBID_cu64GraphicsResourceGetMappedPointer = 131,
145
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags = 132,
146
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources = 133,
147
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources = 134,
148
+ CUPTI_DRIVER_TRACE_CBID_cuGetExportTable = 135,
149
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetLimit = 136,
150
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetLimit = 137,
151
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevice = 138,
152
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate = 139,
153
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D10RegisterResource = 140,
154
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10RegisterResource = 141,
155
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10UnregisterResource = 142,
156
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10MapResources = 143,
157
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10UnmapResources = 144,
158
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceSetMapFlags = 145,
159
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedArray = 146,
160
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer = 147,
161
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize = 148,
162
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch = 149,
163
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions = 150,
164
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevice = 151,
165
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate = 152,
166
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D11RegisterResource = 153,
167
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevice = 154,
168
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate = 155,
169
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D9RegisterResource = 156,
170
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDirect3DDevice = 157,
171
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterResource = 158,
172
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterResource = 159,
173
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9MapResources = 160,
174
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapResources = 161,
175
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceSetMapFlags = 162,
176
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions = 163,
177
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedArray = 164,
178
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer = 165,
179
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize = 166,
180
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch = 167,
181
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9Begin = 168,
182
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9End = 169,
183
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterVertexBuffer = 170,
184
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer = 171,
185
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapVertexBuffer = 172,
186
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterVertexBuffer = 173,
187
+ CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate = 174,
188
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterBuffer = 175,
189
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterImage = 176,
190
+ CUPTI_DRIVER_TRACE_CBID_cuWGLGetDevice = 177,
191
+ CUPTI_DRIVER_TRACE_CBID_cuGLInit = 178,
192
+ CUPTI_DRIVER_TRACE_CBID_cuGLRegisterBufferObject = 179,
193
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject = 180,
194
+ CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObject = 181,
195
+ CUPTI_DRIVER_TRACE_CBID_cuGLUnregisterBufferObject = 182,
196
+ CUPTI_DRIVER_TRACE_CBID_cuGLSetBufferObjectMapFlags = 183,
197
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync = 184,
198
+ CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObjectAsync = 185,
199
+ CUPTI_DRIVER_TRACE_CBID_cuVDPAUGetDevice = 186,
200
+ CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate = 187,
201
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterVideoSurface = 188,
202
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterOutputSurface = 189,
203
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetSurfRef = 190,
204
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefCreate = 191,
205
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefDestroy = 192,
206
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetFormat = 193,
207
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetArray = 194,
208
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetFormat = 195,
209
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetArray = 196,
210
+ CUPTI_DRIVER_TRACE_CBID_cu64DeviceTotalMem = 197,
211
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPointer = 198,
212
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedSize = 199,
213
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPitch = 200,
214
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetSurfaceDimensions = 201,
215
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetSurfaceDimensions = 202,
216
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPointer = 203,
217
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedSize = 204,
218
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPitch = 205,
219
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9MapVertexBuffer = 206,
220
+ CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObject = 207,
221
+ CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObjectAsync = 208,
222
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevices = 209,
223
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreateOnDevice = 210,
224
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevices = 211,
225
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreateOnDevice = 212,
226
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevices = 213,
227
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreateOnDevice = 214,
228
+ CUPTI_DRIVER_TRACE_CBID_cu64MemHostAlloc = 215,
229
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async = 216,
230
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8Async = 217,
231
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async = 218,
232
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16Async = 219,
233
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async = 220,
234
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32Async = 221,
235
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async = 222,
236
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8Async = 223,
237
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async = 224,
238
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16Async = 225,
239
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async = 226,
240
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32Async = 227,
241
+ CUPTI_DRIVER_TRACE_CBID_cu64ArrayCreate = 228,
242
+ CUPTI_DRIVER_TRACE_CBID_cu64ArrayGetDescriptor = 229,
243
+ CUPTI_DRIVER_TRACE_CBID_cu64Array3DCreate = 230,
244
+ CUPTI_DRIVER_TRACE_CBID_cu64Array3DGetDescriptor = 231,
245
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2D = 232,
246
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DUnaligned = 233,
247
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DAsync = 234,
248
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v2 = 235,
249
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate_v2 = 236,
250
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate_v2 = 237,
251
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate_v2 = 238,
252
+ CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate_v2 = 239,
253
+ CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate_v2 = 240,
254
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal_v2 = 241,
255
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo_v2 = 242,
256
+ CUPTI_DRIVER_TRACE_CBID_cuMemAlloc_v2 = 243,
257
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch_v2 = 244,
258
+ CUPTI_DRIVER_TRACE_CBID_cuMemFree_v2 = 245,
259
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange_v2 = 246,
260
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer_v2 = 247,
261
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy_v2 = 248,
262
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2 = 249,
263
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2 = 250,
264
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2 = 251,
265
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2 = 252,
266
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2 = 253,
267
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2 = 254,
268
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress_v2 = 255,
269
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v2 = 256,
270
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress_v2 = 257,
271
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer_v2 = 258,
272
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem_v2 = 259,
273
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer_v2 = 260,
274
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize_v2 = 261,
275
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch_v2 = 262,
276
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions_v2 = 263,
277
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions_v2 = 264,
278
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer_v2 = 265,
279
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize_v2 = 266,
280
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch_v2 = 267,
281
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer_v2 = 268,
282
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2 = 269,
283
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2 = 270,
284
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc_v2 = 271,
285
+ CUPTI_DRIVER_TRACE_CBID_cuArrayCreate_v2 = 272,
286
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor_v2 = 273,
287
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate_v2 = 274,
288
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor_v2 = 275,
289
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2 = 276,
290
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2 = 277,
291
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2 = 278,
292
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2 = 279,
293
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2 = 280,
294
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2 = 281,
295
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2 = 282,
296
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2 = 283,
297
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2 = 284,
298
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2 = 285,
299
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2 = 286,
300
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2 = 287,
301
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2 = 288,
302
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2 = 289,
303
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2 = 290,
304
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2 = 291,
305
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2 = 292,
306
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2 = 293,
307
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost_v2 = 294,
308
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent = 295,
309
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetApiVersion = 296,
310
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDirect3DDevice = 297,
311
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDirect3DDevice = 298,
312
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetCacheConfig = 299,
313
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetCacheConfig = 300,
314
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister = 301,
315
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostUnregister = 302,
316
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetCurrent = 303,
317
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetCurrent = 304,
318
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy = 305,
319
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync = 306,
320
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel = 307,
321
+ CUPTI_DRIVER_TRACE_CBID_cuProfilerStart = 308,
322
+ CUPTI_DRIVER_TRACE_CBID_cuProfilerStop = 309,
323
+ CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttribute = 310,
324
+ CUPTI_DRIVER_TRACE_CBID_cuProfilerInitialize = 311,
325
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceCanAccessPeer = 312,
326
+ CUPTI_DRIVER_TRACE_CBID_cuCtxEnablePeerAccess = 313,
327
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDisablePeerAccess = 314,
328
+ CUPTI_DRIVER_TRACE_CBID_cuMemPeerRegister = 315,
329
+ CUPTI_DRIVER_TRACE_CBID_cuMemPeerUnregister = 316,
330
+ CUPTI_DRIVER_TRACE_CBID_cuMemPeerGetDevicePointer = 317,
331
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer = 318,
332
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync = 319,
333
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer = 320,
334
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync = 321,
335
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy_v2 = 322,
336
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent_v2 = 323,
337
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent_v2 = 324,
338
+ CUPTI_DRIVER_TRACE_CBID_cuEventDestroy_v2 = 325,
339
+ CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy_v2 = 326,
340
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v3 = 327,
341
+ CUPTI_DRIVER_TRACE_CBID_cuIpcGetMemHandle = 328,
342
+ CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle = 329,
343
+ CUPTI_DRIVER_TRACE_CBID_cuIpcCloseMemHandle = 330,
344
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetByPCIBusId = 331,
345
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetPCIBusId = 332,
346
+ CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices = 333,
347
+ CUPTI_DRIVER_TRACE_CBID_cuIpcGetEventHandle = 334,
348
+ CUPTI_DRIVER_TRACE_CBID_cuIpcOpenEventHandle = 335,
349
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetSharedMemConfig = 336,
350
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetSharedMemConfig = 337,
351
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedMemConfig = 338,
352
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectCreate = 339,
353
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectDestroy = 340,
354
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceDesc = 341,
355
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetTextureDesc = 342,
356
+ CUPTI_DRIVER_TRACE_CBID_cuSurfObjectCreate = 343,
357
+ CUPTI_DRIVER_TRACE_CBID_cuSurfObjectDestroy = 344,
358
+ CUPTI_DRIVER_TRACE_CBID_cuSurfObjectGetResourceDesc = 345,
359
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback = 346,
360
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayCreate = 347,
361
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetLevel = 348,
362
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayDestroy = 349,
363
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmappedArray = 350,
364
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapFilterMode = 351,
365
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelBias = 352,
366
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelClamp = 353,
367
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMaxAnisotropy = 354,
368
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmappedArray = 355,
369
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapFilterMode = 356,
370
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelBias = 357,
371
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelClamp = 358,
372
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMaxAnisotropy = 359,
373
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedMipmappedArray = 360,
374
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceViewDesc = 361,
375
+ CUPTI_DRIVER_TRACE_CBID_cuLinkCreate = 362,
376
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddData = 363,
377
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile = 364,
378
+ CUPTI_DRIVER_TRACE_CBID_cuLinkComplete = 365,
379
+ CUPTI_DRIVER_TRACE_CBID_cuLinkDestroy = 366,
380
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCreateWithPriority = 367,
381
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority = 368,
382
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags = 369,
383
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetStreamPriorityRange = 370,
384
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocManaged = 371,
385
+ CUPTI_DRIVER_TRACE_CBID_cuGetErrorString = 372,
386
+ CUPTI_DRIVER_TRACE_CBID_cuGetErrorName = 373,
387
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessor = 374,
388
+ CUPTI_DRIVER_TRACE_CBID_cuCompilePtx = 375,
389
+ CUPTI_DRIVER_TRACE_CBID_cuBinaryFree = 376,
390
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync = 377,
391
+ CUPTI_DRIVER_TRACE_CBID_cuPointerSetAttribute = 378,
392
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister_v2 = 379,
393
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags_v2 = 380,
394
+ CUPTI_DRIVER_TRACE_CBID_cuLinkCreate_v2 = 381,
395
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddData_v2 = 382,
396
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile_v2 = 383,
397
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSize = 384,
398
+ CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices_v2 = 385,
399
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRetain = 386,
400
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease = 387,
401
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags = 388,
402
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset = 389,
403
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsEGLRegisterImage = 390,
404
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetFlags = 391,
405
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxGetState = 392,
406
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnect = 393,
407
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerDisconnect = 394,
408
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerAcquireFrame = 395,
409
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerReleaseFrame = 396,
410
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2_ptds = 397,
411
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2_ptds = 398,
412
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2_ptds = 399,
413
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2_ptds = 400,
414
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2_ptds = 401,
415
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2_ptds = 402,
416
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2_ptds = 403,
417
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2_ptds = 404,
418
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2_ptds = 405,
419
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2_ptds = 406,
420
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2_ptds = 407,
421
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy_ptds = 408,
422
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer_ptds = 409,
423
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer_ptds = 410,
424
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2_ptds = 411,
425
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2_ptds = 412,
426
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2_ptds = 413,
427
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2_ptds = 414,
428
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2_ptds = 415,
429
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2_ptds = 416,
430
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2_ptds = 417,
431
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync_ptsz = 418,
432
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2_ptsz = 419,
433
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2_ptsz = 420,
434
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2_ptsz = 421,
435
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2_ptsz = 422,
436
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2_ptsz = 423,
437
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2_ptsz = 424,
438
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2_ptsz = 425,
439
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync_ptsz = 426,
440
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync_ptsz = 427,
441
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async_ptsz = 428,
442
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async_ptsz = 429,
443
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async_ptsz = 430,
444
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async_ptsz = 431,
445
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async_ptsz = 432,
446
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async_ptsz = 433,
447
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority_ptsz = 434,
448
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags_ptsz = 435,
449
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent_ptsz = 436,
450
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback_ptsz = 437,
451
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync_ptsz = 438,
452
+ CUPTI_DRIVER_TRACE_CBID_cuStreamQuery_ptsz = 439,
453
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize_ptsz = 440,
454
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecord_ptsz = 441,
455
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel_ptsz = 442,
456
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources_ptsz = 443,
457
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources_ptsz = 444,
458
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2_ptsz = 445,
459
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerConnect = 446,
460
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerDisconnect = 447,
461
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerPresentFrame = 448,
462
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedEglFrame = 449,
463
+ CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttributes = 450,
464
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags = 451,
465
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSizeWithFlags = 452,
466
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerReturnFrame = 453,
467
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetP2PAttribute = 454,
468
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetBorderColor = 455,
469
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetBorderColor = 456,
470
+ CUPTI_DRIVER_TRACE_CBID_cuMemAdvise = 457,
471
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32 = 458,
472
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_ptsz = 459,
473
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32 = 460,
474
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_ptsz = 461,
475
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp = 462,
476
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_ptsz = 463,
477
+ CUPTI_DRIVER_TRACE_CBID_cuNVNbufferGetPointer = 464,
478
+ CUPTI_DRIVER_TRACE_CBID_cuNVNtextureGetArray = 465,
479
+ CUPTI_DRIVER_TRACE_CBID_cuNNSetAllocator = 466,
480
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync = 467,
481
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_ptsz = 468,
482
+ CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromNVNSync = 469,
483
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnectWithFlags = 470,
484
+ CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttribute = 471,
485
+ CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttributes = 472,
486
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64 = 473,
487
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_ptsz = 474,
488
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64 = 475,
489
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_ptsz = 476,
490
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel = 477,
491
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel_ptsz = 478,
492
+ CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromEGLSync = 479,
493
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernelMultiDevice = 480,
494
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetAttribute = 481,
495
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid = 482,
496
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx = 483,
497
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx_ptsz = 484,
498
+ CUPTI_DRIVER_TRACE_CBID_cuImportExternalMemory = 485,
499
+ CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedBuffer = 486,
500
+ CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedMipmappedArray = 487,
501
+ CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalMemory = 488,
502
+ CUPTI_DRIVER_TRACE_CBID_cuImportExternalSemaphore = 489,
503
+ CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync = 490,
504
+ CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync_ptsz = 491,
505
+ CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync = 492,
506
+ CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync_ptsz = 493,
507
+ CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalSemaphore = 494,
508
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture = 495,
509
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_ptsz = 496,
510
+ CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture = 497,
511
+ CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture_ptsz = 498,
512
+ CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing = 499,
513
+ CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing_ptsz = 500,
514
+ CUPTI_DRIVER_TRACE_CBID_cuGraphCreate = 501,
515
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode = 502,
516
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams = 503,
517
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemcpyNode = 504,
518
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeGetParams = 505,
519
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemsetNode = 506,
520
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeGetParams = 507,
521
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeSetParams = 508,
522
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetType = 509,
523
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetRootNodes = 510,
524
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependencies = 511,
525
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependentNodes = 512,
526
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate = 513,
527
+ CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch = 514,
528
+ CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch_ptsz = 515,
529
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecDestroy = 516,
530
+ CUPTI_DRIVER_TRACE_CBID_cuGraphDestroy = 517,
531
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddDependencies = 518,
532
+ CUPTI_DRIVER_TRACE_CBID_cuGraphRemoveDependencies = 519,
533
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeSetParams = 520,
534
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams = 521,
535
+ CUPTI_DRIVER_TRACE_CBID_cuGraphDestroyNode = 522,
536
+ CUPTI_DRIVER_TRACE_CBID_cuGraphClone = 523,
537
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeFindInClone = 524,
538
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddChildGraphNode = 525,
539
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddEmptyNode = 526,
540
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc = 527,
541
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc_ptsz = 528,
542
+ CUPTI_DRIVER_TRACE_CBID_cuGraphChildGraphNodeGetGraph = 529,
543
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddHostNode = 530,
544
+ CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeGetParams = 531,
545
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetLuid = 532,
546
+ CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeSetParams = 533,
547
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetNodes = 534,
548
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetEdges = 535,
549
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo = 536,
550
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_ptsz = 537,
551
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams = 538,
552
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2 = 539,
553
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2_ptsz = 540,
554
+ CUPTI_DRIVER_TRACE_CBID_cuThreadExchangeStreamCaptureMode = 541,
555
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetNvSciSyncAttributes = 542,
556
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyAvailableDynamicSMemPerBlock = 543,
557
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease_v2 = 544,
558
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset_v2 = 545,
559
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags_v2 = 546,
560
+ CUPTI_DRIVER_TRACE_CBID_cuMemAddressReserve = 547,
561
+ CUPTI_DRIVER_TRACE_CBID_cuMemAddressFree = 548,
562
+ CUPTI_DRIVER_TRACE_CBID_cuMemCreate = 549,
563
+ CUPTI_DRIVER_TRACE_CBID_cuMemRelease = 550,
564
+ CUPTI_DRIVER_TRACE_CBID_cuMemMap = 551,
565
+ CUPTI_DRIVER_TRACE_CBID_cuMemUnmap = 552,
566
+ CUPTI_DRIVER_TRACE_CBID_cuMemSetAccess = 553,
567
+ CUPTI_DRIVER_TRACE_CBID_cuMemExportToShareableHandle = 554,
568
+ CUPTI_DRIVER_TRACE_CBID_cuMemImportFromShareableHandle = 555,
569
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationGranularity = 556,
570
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationPropertiesFromHandle = 557,
571
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAccess = 558,
572
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags = 559,
573
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags_ptsz = 560,
574
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate = 561,
575
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemcpyNodeSetParams = 562,
576
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemsetNodeSetParams = 563,
577
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecHostNodeSetParams = 564,
578
+ CUPTI_DRIVER_TRACE_CBID_cuMemRetainAllocationHandle = 565,
579
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetModule = 566,
580
+ CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle_v2 = 567,
581
+ CUPTI_DRIVER_TRACE_CBID_cuCtxResetPersistingL2Cache = 568,
582
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeCopyAttributes = 569,
583
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetAttribute = 570,
584
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetAttribute = 571,
585
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes = 572,
586
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes_ptsz = 573,
587
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute = 574,
588
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute_ptsz = 575,
589
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute = 576,
590
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute_ptsz = 577,
591
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate_v2 = 578,
592
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetTexture1DLinearMaxWidth = 579,
593
+ CUPTI_DRIVER_TRACE_CBID_cuGraphUpload = 580,
594
+ CUPTI_DRIVER_TRACE_CBID_cuGraphUpload_ptsz = 581,
595
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetSparseProperties = 582,
596
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetSparseProperties = 583,
597
+ CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync = 584,
598
+ CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync_ptsz = 585,
599
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecChildGraphNodeSetParams = 586,
600
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags = 587,
601
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags_ptsz = 588,
602
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventRecordNode = 589,
603
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventWaitNode = 590,
604
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeGetEvent = 591,
605
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeGetEvent = 592,
606
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeSetEvent = 593,
607
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeSetEvent = 594,
608
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventRecordNodeSetEvent = 595,
609
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventWaitNodeSetEvent = 596,
610
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetPlane = 597,
611
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync = 598,
612
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync_ptsz = 599,
613
+ CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync = 600,
614
+ CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync_ptsz = 601,
615
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolTrimTo = 602,
616
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAttribute = 603,
617
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAttribute = 604,
618
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAccess = 605,
619
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetDefaultMemPool = 606,
620
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolCreate = 607,
621
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolDestroy = 608,
622
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceSetMemPool = 609,
623
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetMemPool = 610,
624
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync = 611,
625
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync_ptsz = 612,
626
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportToShareableHandle = 613,
627
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportFromShareableHandle = 614,
628
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportPointer = 615,
629
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportPointer = 616,
630
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAccess = 617,
631
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresSignalNode = 618,
632
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeGetParams = 619,
633
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeSetParams = 620,
634
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresWaitNode = 621,
635
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeGetParams = 622,
636
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeSetParams = 623,
637
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresSignalNodeSetParams = 624,
638
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresWaitNodeSetParams = 625,
639
+ CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress = 626,
640
+ CUPTI_DRIVER_TRACE_CBID_cuFlushGPUDirectRDMAWrites = 627,
641
+ CUPTI_DRIVER_TRACE_CBID_cuGraphDebugDotPrint = 628,
642
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2 = 629,
643
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2_ptsz = 630,
644
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies = 631,
645
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_ptsz = 632,
646
+ CUPTI_DRIVER_TRACE_CBID_cuUserObjectCreate = 633,
647
+ CUPTI_DRIVER_TRACE_CBID_cuUserObjectRetain = 634,
648
+ CUPTI_DRIVER_TRACE_CBID_cuUserObjectRelease = 635,
649
+ CUPTI_DRIVER_TRACE_CBID_cuGraphRetainUserObject = 636,
650
+ CUPTI_DRIVER_TRACE_CBID_cuGraphReleaseUserObject = 637,
651
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemAllocNode = 638,
652
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemFreeNode = 639,
653
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGraphMemTrim = 640,
654
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetGraphMemAttribute = 641,
655
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceSetGraphMemAttribute = 642,
656
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithFlags = 643,
657
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetExecAffinitySupport = 644,
658
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v3 = 645,
659
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetExecAffinity = 646,
660
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid_v2 = 647,
661
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemAllocNodeGetParams = 648,
662
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemFreeNodeGetParams = 649,
663
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeSetEnabled = 650,
664
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetEnabled = 651,
665
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx = 652,
666
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx_ptsz = 653,
667
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetMemoryRequirements = 654,
668
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetMemoryRequirements = 655,
669
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams = 656,
670
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams_ptsz = 657,
671
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecGetFlags = 658,
672
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2 = 659,
673
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2_ptsz = 660,
674
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2 = 661,
675
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2_ptsz = 662,
676
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2 = 663,
677
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2_ptsz = 664,
678
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2 = 665,
679
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2_ptsz = 666,
680
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2 = 667,
681
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2_ptsz = 668,
682
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddBatchMemOpNode = 669,
683
+ CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeGetParams = 670,
684
+ CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeSetParams = 671,
685
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecBatchMemOpNodeSetParams = 672,
686
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetLoadingMode = 673,
687
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetHandleForAddressRange = 674,
688
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialClusterSize = 675,
689
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveClusters = 676,
690
+ CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress_v2 = 677,
691
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadData = 678,
692
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadFromFile = 679,
693
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryUnload = 680,
694
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetKernel = 681,
695
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetModule = 682,
696
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetFunction = 683,
697
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetGlobal = 684,
698
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetManaged = 685,
699
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetAttribute = 686,
700
+ CUPTI_DRIVER_TRACE_CBID_cuKernelSetAttribute = 687,
701
+ CUPTI_DRIVER_TRACE_CBID_cuKernelSetCacheConfig = 688,
702
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode_v2 = 689,
703
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams_v2 = 690,
704
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams_v2 = 691,
705
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams_v2 = 692,
706
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetId = 693,
707
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetId_ptsz = 694,
708
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetId = 695,
709
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate_v2 = 696,
710
+ CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeTiled = 697,
711
+ CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeIm2col = 698,
712
+ CUPTI_DRIVER_TRACE_CBID_cuTensorMapReplaceAddress = 699,
713
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetUnifiedFunction = 700,
714
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttribute = 701,
715
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttributeGlobal = 702,
716
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttribute = 703,
717
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttributeGlobal = 704,
718
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetFlags = 705,
719
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastCreate = 706,
720
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastAddDevice = 707,
721
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastBindMem = 708,
722
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastBindAddr = 709,
723
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastUnbind = 710,
724
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastGetGranularity = 711,
725
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddNode = 712,
726
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeSetParams = 713,
727
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecNodeSetParams = 714,
728
+ CUPTI_DRIVER_TRACE_CBID_cuMemAdvise_v2 = 715,
729
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_v2 = 716,
730
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_v2_ptsz = 717,
731
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetName = 718,
732
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetName = 719,
733
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCaptureToGraph = 720,
734
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCaptureToGraph_ptsz = 721,
735
+ CUPTI_DRIVER_TRACE_CBID_cuGraphConditionalHandleCreate = 722,
736
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddNode_v2 = 723,
737
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetEdges_v2 = 724,
738
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependencies_v2 = 725,
739
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependentNodes_v2 = 726,
740
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddDependencies_v2 = 727,
741
+ CUPTI_DRIVER_TRACE_CBID_cuGraphRemoveDependencies_v2 = 728,
742
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v3 = 729,
743
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v3_ptsz = 730,
744
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_v2 = 731,
745
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_v2_ptsz = 732,
746
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetParamInfo = 733,
747
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetParamInfo = 734,
748
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceRegisterAsyncNotification = 735,
749
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceUnregisterAsyncNotification = 736,
750
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetFunctionCount = 737,
751
+ CUPTI_DRIVER_TRACE_CBID_cuModuleEnumerateFunctions = 738,
752
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetKernelCount = 739,
753
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryEnumerateKernels = 740,
754
+ CUPTI_DRIVER_TRACE_CBID_cuFuncIsLoaded = 741,
755
+ CUPTI_DRIVER_TRACE_CBID_cuFuncLoad = 742,
756
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxCreate = 743,
757
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxDestroy = 744,
758
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetDevResource = 745,
759
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetDevResource = 746,
760
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxGetDevResource = 747,
761
+ CUPTI_DRIVER_TRACE_CBID_cuDevResourceGenerateDesc = 748,
762
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxRecordEvent = 749,
763
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxWaitEvent = 750,
764
+ CUPTI_DRIVER_TRACE_CBID_cuDevSmResourceSplitByCount = 751,
765
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetGreenCtx = 752,
766
+ CUPTI_DRIVER_TRACE_CBID_cuCtxFromGreenCtx = 753,
767
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetLibrary = 754,
768
+ CUPTI_DRIVER_TRACE_CBID_cuCtxRecordEvent = 755,
769
+ CUPTI_DRIVER_TRACE_CBID_cuCtxWaitEvent = 756,
770
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v4 = 757,
771
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxStreamCreate = 758,
772
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx_v2 = 759,
773
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx_v2_ptsz = 760,
774
+ CUPTI_DRIVER_TRACE_CBID_cuMemBatchDecompressAsync = 761,
775
+ CUPTI_DRIVER_TRACE_CBID_cuMemBatchDecompressAsync_ptsz = 762,
776
+ CUPTI_DRIVER_TRACE_CBID_SIZE = 763,
777
+ CUPTI_DRIVER_TRACE_CBID_FORCE_INT = 0x7fffffff
778
+ } CUpti_driver_api_trace_cbid;
779
+
780
+ #endif
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_events.h ADDED
@@ -0,0 +1,1350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_EVENTS_H_)
51
+ #define _CUPTI_EVENTS_H_
52
+
53
+ #include <cuda.h>
54
+ #include <string.h>
55
+ #include <cuda_stdint.h>
56
+ #include <cupti_result.h>
57
+
58
+ #ifndef CUPTIAPI
59
+ #ifdef _WIN32
60
+ #define CUPTIAPI __stdcall
61
+ #else
62
+ #define CUPTIAPI
63
+ #endif
64
+ #endif
65
+
66
+ #if defined(__cplusplus)
67
+ extern "C" {
68
+ #endif
69
+
70
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
71
+ #pragma GCC visibility push(default)
72
+ #endif
73
+
74
+ /**
75
+ * \defgroup CUPTI_EVENT_API CUPTI Event API
76
+ * Functions, types, and enums that implement the CUPTI Event API.
77
+ *
78
+ * \note CUPTI event API from the header cupti_events.h are not supported on devices
79
+ * with compute capability 7.5 and higher (i.e. Turing and later GPU architectures).
80
+ * These API will be deprecated in a future CUDA release. These are replaced by
81
+ * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API
82
+ * in the headers nvperf_host.h and nvperf_target.h which are supported on
83
+ * devices with compute capability 7.0 and higher (i.e. Volta and later GPU
84
+ * architectures).
85
+ *
86
+ * @{
87
+ */
88
+
89
+ /**
90
+ * \brief ID for an event.
91
+ *
92
+ * An event represents a countable activity, action, or occurrence on
93
+ * the device.
94
+ */
95
+ typedef uint32_t CUpti_EventID;
96
+
97
+ /**
98
+ * \brief ID for an event domain.
99
+ *
100
+ * ID for an event domain. An event domain represents a group of
101
+ * related events. A device may have multiple instances of a domain,
102
+ * indicating that the device can simultaneously record multiple
103
+ * instances of each event within that domain.
104
+ */
105
+ typedef uint32_t CUpti_EventDomainID;
106
+
107
+ /**
108
+ * \brief A group of events.
109
+ *
110
+ * An event group is a collection of events that are managed
111
+ * together. All events in an event group must belong to the same
112
+ * domain.
113
+ */
114
+ typedef void *CUpti_EventGroup;
115
+
116
+ /**
117
+ * \brief Device class.
118
+ *
119
+ * Enumeration of device classes for device attribute
120
+ * CUPTI_DEVICE_ATTR_DEVICE_CLASS.
121
+ */
122
+ typedef enum {
123
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_TESLA = 0,
124
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_QUADRO = 1,
125
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_GEFORCE = 2,
126
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_TEGRA = 3,
127
+ } CUpti_DeviceAttributeDeviceClass;
128
+
129
+ /**
130
+ * \brief Device attributes.
131
+ *
132
+ * CUPTI device attributes. These attributes can be read using \ref
133
+ * cuptiDeviceGetAttribute.
134
+ */
135
+ typedef enum {
136
+ /**
137
+ * Number of event IDs for a device. Value is a uint32_t.
138
+ */
139
+ CUPTI_DEVICE_ATTR_MAX_EVENT_ID = 1,
140
+ /**
141
+ * Number of event domain IDs for a device. Value is a uint32_t.
142
+ */
143
+ CUPTI_DEVICE_ATTR_MAX_EVENT_DOMAIN_ID = 2,
144
+ /**
145
+ * Get global memory bandwidth in Kbytes/sec. Value is a uint64_t.
146
+ */
147
+ CUPTI_DEVICE_ATTR_GLOBAL_MEMORY_BANDWIDTH = 3,
148
+ /**
149
+ * Get theoretical maximum number of instructions per cycle. Value
150
+ * is a uint32_t.
151
+ */
152
+ CUPTI_DEVICE_ATTR_INSTRUCTION_PER_CYCLE = 4,
153
+ /**
154
+ * Get theoretical maximum number of single precision instructions
155
+ * that can be executed per second. Value is a uint64_t.
156
+ */
157
+ CUPTI_DEVICE_ATTR_INSTRUCTION_THROUGHPUT_SINGLE_PRECISION = 5,
158
+ /**
159
+ * Get number of frame buffers for device. Value is a uint64_t.
160
+ */
161
+ CUPTI_DEVICE_ATTR_MAX_FRAME_BUFFERS = 6,
162
+ /**
163
+ * Get PCIE link rate in Mega bits/sec for device. Return 0 if bus-type
164
+ * is non-PCIE. Value is a uint64_t.
165
+ */
166
+ CUPTI_DEVICE_ATTR_PCIE_LINK_RATE = 7,
167
+ /**
168
+ * Get PCIE link width for device. Return 0 if bus-type
169
+ * is non-PCIE. Value is a uint64_t.
170
+ */
171
+ CUPTI_DEVICE_ATTR_PCIE_LINK_WIDTH = 8,
172
+ /**
173
+ * Get PCIE generation for device. Return 0 if bus-type
174
+ * is non-PCIE. Value is a uint64_t.
175
+ */
176
+ CUPTI_DEVICE_ATTR_PCIE_GEN = 9,
177
+ /**
178
+ * Get the class for the device. Value is a
179
+ * CUpti_DeviceAttributeDeviceClass.
180
+ */
181
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS = 10,
182
+ /**
183
+ * Get the peak single precision flop per cycle. Value is a uint64_t.
184
+ */
185
+ CUPTI_DEVICE_ATTR_FLOP_SP_PER_CYCLE = 11,
186
+ /**
187
+ * Get the peak double precision flop per cycle. Value is a uint64_t.
188
+ */
189
+ CUPTI_DEVICE_ATTR_FLOP_DP_PER_CYCLE = 12,
190
+ /**
191
+ * Get number of L2 units. Value is a uint64_t.
192
+ */
193
+ CUPTI_DEVICE_ATTR_MAX_L2_UNITS = 13,
194
+ /**
195
+ * Get the maximum shared memory for the CU_FUNC_CACHE_PREFER_SHARED
196
+ * preference. Value is a uint64_t.
197
+ */
198
+ CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_SHARED = 14,
199
+ /**
200
+ * Get the maximum shared memory for the CU_FUNC_CACHE_PREFER_L1
201
+ * preference. Value is a uint64_t.
202
+ */
203
+ CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_L1 = 15,
204
+ /**
205
+ * Get the maximum shared memory for the CU_FUNC_CACHE_PREFER_EQUAL
206
+ * preference. Value is a uint64_t.
207
+ */
208
+ CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_EQUAL = 16,
209
+ /**
210
+ * Get the peak half precision flop per cycle. Value is a uint64_t.
211
+ */
212
+ CUPTI_DEVICE_ATTR_FLOP_HP_PER_CYCLE = 17,
213
+ /**
214
+ * Check if Nvlink is connected to device. Returns 1, if at least one
215
+ * Nvlink is connected to the device, returns 0 otherwise.
216
+ * Value is a uint32_t.
217
+ */
218
+ CUPTI_DEVICE_ATTR_NVLINK_PRESENT = 18,
219
+ /**
220
+ * Check if Nvlink is present between GPU and CPU. Returns Bandwidth,
221
+ * in Bytes/sec, if Nvlink is present, returns 0 otherwise.
222
+ * Value is a uint64_t.
223
+ */
224
+ CUPTI_DEVICE_ATTR_GPU_CPU_NVLINK_BW = 19,
225
+ /**
226
+ * Check if NVSwitch is present in the underlying topology.
227
+ * Returns 1, if present, returns 0 otherwise.
228
+ * Value is a uint32_t.
229
+ */
230
+ CUPTI_DEVICE_ATTR_NVSWITCH_PRESENT = 20,
231
+ CUPTI_DEVICE_ATTR_FORCE_INT = 0x7fffffff,
232
+ } CUpti_DeviceAttribute;
233
+
234
+ /**
235
+ * \brief Event domain attributes.
236
+ *
237
+ * Event domain attributes. Except where noted, all the attributes can
238
+ * be read using either \ref cuptiDeviceGetEventDomainAttribute or
239
+ * \ref cuptiEventDomainGetAttribute.
240
+ */
241
+ typedef enum {
242
+ /**
243
+ * Event domain name. Value is a null terminated const c-string.
244
+ */
245
+ CUPTI_EVENT_DOMAIN_ATTR_NAME = 0,
246
+ /**
247
+ * Number of instances of the domain for which event counts will be
248
+ * collected. The domain may have additional instances that cannot
249
+ * be profiled (see CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT).
250
+ * Can be read only with \ref
251
+ * cuptiDeviceGetEventDomainAttribute. Value is a uint32_t.
252
+ */
253
+ CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT = 1,
254
+ /**
255
+ * Total number of instances of the domain, including instances that
256
+ * cannot be profiled. Use CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT
257
+ * to get the number of instances that can be profiled. Can be read
258
+ * only with \ref cuptiDeviceGetEventDomainAttribute. Value is a
259
+ * uint32_t.
260
+ */
261
+ CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT = 3,
262
+ /**
263
+ * Collection method used for events contained in the event domain.
264
+ * Value is a \ref CUpti_EventCollectionMethod.
265
+ */
266
+ CUPTI_EVENT_DOMAIN_ATTR_COLLECTION_METHOD = 4,
267
+
268
+ CUPTI_EVENT_DOMAIN_ATTR_FORCE_INT = 0x7fffffff,
269
+ } CUpti_EventDomainAttribute;
270
+
271
+ /**
272
+ * \brief The collection method used for an event.
273
+ *
274
+ * The collection method indicates how an event is collected.
275
+ */
276
+ typedef enum {
277
+ /**
278
+ * Event is collected using a hardware global performance monitor.
279
+ */
280
+ CUPTI_EVENT_COLLECTION_METHOD_PM = 0,
281
+ /**
282
+ * Event is collected using a hardware SM performance monitor.
283
+ */
284
+ CUPTI_EVENT_COLLECTION_METHOD_SM = 1,
285
+ /**
286
+ * Event is collected using software instrumentation.
287
+ */
288
+ CUPTI_EVENT_COLLECTION_METHOD_INSTRUMENTED = 2,
289
+ /**
290
+ * Event is collected using NvLink throughput counter method.
291
+ */
292
+ CUPTI_EVENT_COLLECTION_METHOD_NVLINK_TC = 3,
293
+ CUPTI_EVENT_COLLECTION_METHOD_FORCE_INT = 0x7fffffff
294
+ } CUpti_EventCollectionMethod;
295
+
296
+ /**
297
+ * \brief Event group attributes.
298
+ *
299
+ * Event group attributes. These attributes can be read using \ref
300
+ * cuptiEventGroupGetAttribute. Attributes marked [rw] can also be
301
+ * written using \ref cuptiEventGroupSetAttribute.
302
+ */
303
+ typedef enum {
304
+ /**
305
+ * The domain to which the event group is bound. This attribute is
306
+ * set when the first event is added to the group. Value is a
307
+ * CUpti_EventDomainID.
308
+ */
309
+ CUPTI_EVENT_GROUP_ATTR_EVENT_DOMAIN_ID = 0,
310
+ /**
311
+ * [rw] Profile all the instances of the domain for this
312
+ * eventgroup. This feature can be used to get load balancing
313
+ * across all instances of a domain. Value is an integer.
314
+ */
315
+ CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES = 1,
316
+ /**
317
+ * [rw] Reserved for user data.
318
+ */
319
+ CUPTI_EVENT_GROUP_ATTR_USER_DATA = 2,
320
+ /**
321
+ * Number of events in the group. Value is a uint32_t.
322
+ */
323
+ CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS = 3,
324
+ /**
325
+ * Enumerates events in the group. Value is a pointer to buffer of
326
+ * size sizeof(CUpti_EventID) * num_of_events in the eventgroup.
327
+ * num_of_events can be queried using
328
+ * CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS.
329
+ */
330
+ CUPTI_EVENT_GROUP_ATTR_EVENTS = 4,
331
+ /**
332
+ * Number of instances of the domain bound to this event group that
333
+ * will be counted. Value is a uint32_t.
334
+ */
335
+ CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT = 5,
336
+ /**
337
+ * Event group scope can be set to CUPTI_EVENT_PROFILING_SCOPE_DEVICE or
338
+ * CUPTI_EVENT_PROFILING_SCOPE_CONTEXT for an eventGroup, before
339
+ * adding any event.
340
+ * Sets the scope of eventgroup as CUPTI_EVENT_PROFILING_SCOPE_DEVICE or
341
+ * CUPTI_EVENT_PROFILING_SCOPE_CONTEXT when the scope of the events
342
+ * that will be added is CUPTI_EVENT_PROFILING_SCOPE_BOTH.
343
+ * If profiling scope of event is either
344
+ * CUPTI_EVENT_PROFILING_SCOPE_DEVICE or CUPTI_EVENT_PROFILING_SCOPE_CONTEXT
345
+ * then setting this attribute will not affect the default scope.
346
+ * It is not allowed to add events of different scope to same eventgroup.
347
+ * Value is a uint32_t.
348
+ */
349
+ CUPTI_EVENT_GROUP_ATTR_PROFILING_SCOPE = 6,
350
+ CUPTI_EVENT_GROUP_ATTR_FORCE_INT = 0x7fffffff,
351
+ } CUpti_EventGroupAttribute;
352
+
353
+ /**
354
+ * \brief Profiling scope for event.
355
+ *
356
+ * Profiling scope of event indicates if the event can be collected at context
357
+ * scope or device scope or both i.e. it can be collected at any of context or
358
+ * device scope.
359
+ */
360
+ typedef enum {
361
+ /**
362
+ * Event is collected at context scope.
363
+ */
364
+ CUPTI_EVENT_PROFILING_SCOPE_CONTEXT = 0,
365
+ /**
366
+ * Event is collected at device scope.
367
+ */
368
+ CUPTI_EVENT_PROFILING_SCOPE_DEVICE = 1,
369
+ /**
370
+ * Event can be collected at device or context scope.
371
+ * The scope can be set using \ref cuptiEventGroupSetAttribute API.
372
+ */
373
+ CUPTI_EVENT_PROFILING_SCOPE_BOTH = 2,
374
+ CUPTI_EVENT_PROFILING_SCOPE_FORCE_INT = 0x7fffffff
375
+ } CUpti_EventProfilingScope;
376
+
377
+ /**
378
+ * \brief Event attributes.
379
+ *
380
+ * Event attributes. These attributes can be read using \ref
381
+ * cuptiEventGetAttribute.
382
+ */
383
+ typedef enum {
384
+ /**
385
+ * Event name. Value is a null terminated const c-string.
386
+ */
387
+ CUPTI_EVENT_ATTR_NAME = 0,
388
+ /**
389
+ * Short description of event. Value is a null terminated const
390
+ * c-string.
391
+ */
392
+ CUPTI_EVENT_ATTR_SHORT_DESCRIPTION = 1,
393
+ /**
394
+ * Long description of event. Value is a null terminated const
395
+ * c-string.
396
+ */
397
+ CUPTI_EVENT_ATTR_LONG_DESCRIPTION = 2,
398
+ /**
399
+ * Category of event. Value is CUpti_EventCategory.
400
+ */
401
+ CUPTI_EVENT_ATTR_CATEGORY = 3,
402
+ /**
403
+ * Profiling scope of the events. It can be either device or context or both.
404
+ * Value is a \ref CUpti_EventProfilingScope.
405
+ */
406
+ CUPTI_EVENT_ATTR_PROFILING_SCOPE = 5,
407
+
408
+ CUPTI_EVENT_ATTR_FORCE_INT = 0x7fffffff,
409
+ } CUpti_EventAttribute;
410
+
411
+ /**
412
+ * \brief Event collection modes.
413
+ *
414
+ * The event collection mode determines the period over which the
415
+ * events within the enabled event groups will be collected.
416
+ */
417
+ typedef enum {
418
+ /**
419
+ * Events are collected for the entire duration between the
420
+ * cuptiEventGroupEnable and cuptiEventGroupDisable calls.
421
+ * Event values are reset when the events are read.
422
+ * For CUDA toolkit v6.0 and older this was the default mode.
423
+ */
424
+ CUPTI_EVENT_COLLECTION_MODE_CONTINUOUS = 0,
425
+ /**
426
+ * Events are collected only for the durations of kernel executions
427
+ * that occur between the cuptiEventGroupEnable and
428
+ * cuptiEventGroupDisable calls. Event collection begins when a
429
+ * kernel execution begins, and stops when kernel execution
430
+ * completes. Event values are reset to zero when each kernel
431
+ * execution begins. If multiple kernel executions occur between the
432
+ * cuptiEventGroupEnable and cuptiEventGroupDisable calls then the
433
+ * event values must be read after each kernel launch if those
434
+ * events need to be associated with the specific kernel launch.
435
+ * Note that collection in this mode may significantly change the
436
+ * overall performance characteristics of the application because
437
+ * kernel executions that occur between the cuptiEventGroupEnable and
438
+ * cuptiEventGroupDisable calls are serialized on the GPU.
439
+ * This is the default mode from CUDA toolkit v6.5
440
+ */
441
+ CUPTI_EVENT_COLLECTION_MODE_KERNEL = 1,
442
+ CUPTI_EVENT_COLLECTION_MODE_FORCE_INT = 0x7fffffff
443
+ } CUpti_EventCollectionMode;
444
+
445
+ /**
446
+ * \brief An event category.
447
+ *
448
+ * Each event is assigned to a category that represents the general
449
+ * type of the event. A event's category is accessed using \ref
450
+ * cuptiEventGetAttribute and the CUPTI_EVENT_ATTR_CATEGORY attribute.
451
+ */
452
+ typedef enum {
453
+ /**
454
+ * An instruction related event.
455
+ */
456
+ CUPTI_EVENT_CATEGORY_INSTRUCTION = 0,
457
+ /**
458
+ * A memory related event.
459
+ */
460
+ CUPTI_EVENT_CATEGORY_MEMORY = 1,
461
+ /**
462
+ * A cache related event.
463
+ */
464
+ CUPTI_EVENT_CATEGORY_CACHE = 2,
465
+ /**
466
+ * A profile-trigger event.
467
+ */
468
+ CUPTI_EVENT_CATEGORY_PROFILE_TRIGGER = 3,
469
+ /**
470
+ * A system event.
471
+ */
472
+ CUPTI_EVENT_CATEGORY_SYSTEM = 4,
473
+ CUPTI_EVENT_CATEGORY_FORCE_INT = 0x7fffffff
474
+ } CUpti_EventCategory;
475
+
476
+ /**
477
+ * \brief The overflow value for a CUPTI event.
478
+ *
479
+ * The CUPTI event value that indicates an overflow.
480
+ */
481
+ #define CUPTI_EVENT_OVERFLOW ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
482
+
483
+ /**
484
+ * \brief The value that indicates the event value is invalid
485
+ */
486
+ #define CUPTI_EVENT_INVALID ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
487
+
488
+ /**
489
+ * \brief Flags for cuptiEventGroupReadEvent an
490
+ * cuptiEventGroupReadAllEvents.
491
+ *
492
+ * Flags for \ref cuptiEventGroupReadEvent an \ref
493
+ * cuptiEventGroupReadAllEvents.
494
+ */
495
+ typedef enum {
496
+ /**
497
+ * No flags.
498
+ */
499
+ CUPTI_EVENT_READ_FLAG_NONE = 0,
500
+ CUPTI_EVENT_READ_FLAG_FORCE_INT = 0x7fffffff,
501
+ } CUpti_ReadEventFlags;
502
+
503
+
504
+ /**
505
+ * \brief A set of event groups.
506
+ *
507
+ * A set of event groups. When returned by \ref
508
+ * cuptiEventGroupSetsCreate and \ref cuptiMetricCreateEventGroupSets
509
+ * a set indicates that event groups that can be enabled at the same
510
+ * time (i.e. all the events in the set can be collected
511
+ * simultaneously).
512
+ */
513
+ typedef struct {
514
+ /**
515
+ * The number of event groups in the set.
516
+ */
517
+ uint32_t numEventGroups;
518
+ /**
519
+ * An array of \p numEventGroups event groups.
520
+ */
521
+ CUpti_EventGroup *eventGroups;
522
+ } CUpti_EventGroupSet;
523
+
524
+ /**
525
+ * \brief A set of event group sets.
526
+ *
527
+ * A set of event group sets. When returned by \ref
528
+ * cuptiEventGroupSetsCreate and \ref cuptiMetricCreateEventGroupSets
529
+ * a CUpti_EventGroupSets indicates the number of passes required to
530
+ * collect all the events, and the event groups that should be
531
+ * collected during each pass.
532
+ */
533
+ typedef struct {
534
+ /**
535
+ * Number of event group sets.
536
+ */
537
+ uint32_t numSets;
538
+ /**
539
+ * An array of \p numSets event group sets.
540
+ */
541
+ CUpti_EventGroupSet *sets;
542
+ } CUpti_EventGroupSets;
543
+
544
+ /**
545
+ * \brief Set the event collection mode.
546
+ *
547
+ * Set the event collection mode for a \p context. The \p mode
548
+ * controls the event collection behavior of all events in event
549
+ * groups created in the \p context. This API is invalid in kernel
550
+ * replay mode.
551
+ * \note \b Thread-safety: this function is thread safe.
552
+ *
553
+ * \param context The context
554
+ * \param mode The event collection mode
555
+ *
556
+ * \retval CUPTI_SUCCESS
557
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
558
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
559
+ * \retval CUPTI_ERROR_INVALID_OPERATION if called when replay mode is enabled
560
+ * \retval CUPTI_ERROR_NOT_SUPPORTED if mode is not supported on the device
561
+ */
562
+
563
+ CUptiResult CUPTIAPI cuptiSetEventCollectionMode(CUcontext context,
564
+ CUpti_EventCollectionMode mode);
565
+
566
+ /**
567
+ * \brief Read a device attribute.
568
+ *
569
+ * Read a device attribute and return it in \p *value.
570
+ * \note \b Thread-safety: this function is thread safe.
571
+ *
572
+ * \param device The CUDA device
573
+ * \param attrib The attribute to read
574
+ * \param valueSize Size of buffer pointed by the value, and
575
+ * returns the number of bytes written to \p value
576
+ * \param value Returns the value of the attribute
577
+ *
578
+ * \retval CUPTI_SUCCESS
579
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
580
+ * \retval CUPTI_ERROR_INVALID_DEVICE
581
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
582
+ * is NULL, or if \p attrib is not a device attribute
583
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
584
+ * attribute values, indicates that the \p value buffer is too small
585
+ * to hold the attribute value.
586
+ */
587
+ CUptiResult CUPTIAPI cuptiDeviceGetAttribute(CUdevice device,
588
+ CUpti_DeviceAttribute attrib,
589
+ size_t *valueSize,
590
+ void *value);
591
+
592
+ /**
593
+ * \brief Get the number of domains for a device.
594
+ *
595
+ * Returns the number of domains in \p numDomains for a device.
596
+ * \note \b Thread-safety: this function is thread safe.
597
+ *
598
+ * \param device The CUDA device
599
+ * \param numDomains Returns the number of domains
600
+ *
601
+ * \retval CUPTI_SUCCESS
602
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
603
+ * \retval CUPTI_ERROR_INVALID_DEVICE
604
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numDomains is NULL
605
+ */
606
+ CUptiResult CUPTIAPI cuptiDeviceGetNumEventDomains(CUdevice device,
607
+ uint32_t *numDomains);
608
+
609
+ /**
610
+ * \brief Get the event domains for a device.
611
+ *
612
+ * Returns the event domains IDs in \p domainArray for a device. The
613
+ * size of the \p domainArray buffer is given by \p
614
+ * *arraySizeBytes. The size of the \p domainArray buffer must be at
615
+ * least \p numdomains * sizeof(CUpti_EventDomainID) or else all
616
+ * domains will not be returned. The value returned in \p
617
+ * *arraySizeBytes contains the number of bytes returned in \p
618
+ * domainArray.
619
+ * \note \b Thread-safety: this function is thread safe.
620
+ *
621
+ * \param device The CUDA device
622
+ * \param arraySizeBytes The size of \p domainArray in bytes, and
623
+ * returns the number of bytes written to \p domainArray
624
+ * \param domainArray Returns the IDs of the event domains for the device
625
+ *
626
+ * \retval CUPTI_SUCCESS
627
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
628
+ * \retval CUPTI_ERROR_INVALID_DEVICE
629
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or
630
+ * \p domainArray are NULL
631
+ */
632
+ CUptiResult CUPTIAPI cuptiDeviceEnumEventDomains(CUdevice device,
633
+ size_t *arraySizeBytes,
634
+ CUpti_EventDomainID *domainArray);
635
+
636
+ /**
637
+ * \brief Read an event domain attribute.
638
+ *
639
+ * Returns an event domain attribute in \p *value. The size of the \p
640
+ * value buffer is given by \p *valueSize. The value returned in \p
641
+ * *valueSize contains the number of bytes returned in \p value.
642
+ *
643
+ * If the attribute value is a c-string that is longer than \p
644
+ * *valueSize, then only the first \p *valueSize characters will be
645
+ * returned and there will be no terminating null byte.
646
+ * \note \b Thread-safety: this function is thread safe.
647
+ *
648
+ * \param device The CUDA device
649
+ * \param eventDomain ID of the event domain
650
+ * \param attrib The event domain attribute to read
651
+ * \param valueSize The size of the \p value buffer in bytes, and
652
+ * returns the number of bytes written to \p value
653
+ * \param value Returns the attribute's value
654
+ *
655
+ * \retval CUPTI_SUCCESS
656
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
657
+ * \retval CUPTI_ERROR_INVALID_DEVICE
658
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
659
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
660
+ * is NULL, or if \p attrib is not an event domain attribute
661
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
662
+ * attribute values, indicates that the \p value buffer is too small
663
+ * to hold the attribute value.
664
+ */
665
+ CUptiResult CUPTIAPI cuptiDeviceGetEventDomainAttribute(CUdevice device,
666
+ CUpti_EventDomainID eventDomain,
667
+ CUpti_EventDomainAttribute attrib,
668
+ size_t *valueSize,
669
+ void *value);
670
+
671
+ /**
672
+ * \brief Get the number of event domains available on any device.
673
+ *
674
+ * Returns the total number of event domains available on any
675
+ * CUDA-capable device.
676
+ * \note \b Thread-safety: this function is thread safe.
677
+ *
678
+ * \param numDomains Returns the number of domains
679
+ *
680
+ * \retval CUPTI_SUCCESS
681
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numDomains is NULL
682
+ */
683
+ CUptiResult CUPTIAPI cuptiGetNumEventDomains(uint32_t *numDomains);
684
+
685
+ /**
686
+ * \brief Get the event domains available on any device.
687
+ *
688
+ * Returns all the event domains available on any CUDA-capable device.
689
+ * Event domain IDs are returned in \p domainArray. The size of the \p
690
+ * domainArray buffer is given by \p *arraySizeBytes. The size of the
691
+ * \p domainArray buffer must be at least \p numDomains *
692
+ * sizeof(CUpti_EventDomainID) or all domains will not be
693
+ * returned. The value returned in \p *arraySizeBytes contains the
694
+ * number of bytes returned in \p domainArray.
695
+ * \note \b Thread-safety: this function is thread safe.
696
+ *
697
+ * \param arraySizeBytes The size of \p domainArray in bytes, and
698
+ * returns the number of bytes written to \p domainArray
699
+ * \param domainArray Returns all the event domains
700
+ *
701
+ * \retval CUPTI_SUCCESS
702
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or
703
+ * \p domainArray are NULL
704
+ */
705
+ CUptiResult CUPTIAPI cuptiEnumEventDomains(size_t *arraySizeBytes,
706
+ CUpti_EventDomainID *domainArray);
707
+
708
+ /**
709
+ * \brief Read an event domain attribute.
710
+ *
711
+ * Returns an event domain attribute in \p *value. The size of the \p
712
+ * value buffer is given by \p *valueSize. The value returned in \p
713
+ * *valueSize contains the number of bytes returned in \p value.
714
+ *
715
+ * If the attribute value is a c-string that is longer than \p
716
+ * *valueSize, then only the first \p *valueSize characters will be
717
+ * returned and there will be no terminating null byte.
718
+ * \note \b Thread-safety: this function is thread safe.
719
+ *
720
+ * \param eventDomain ID of the event domain
721
+ * \param attrib The event domain attribute to read
722
+ * \param valueSize The size of the \p value buffer in bytes, and
723
+ * returns the number of bytes written to \p value
724
+ * \param value Returns the attribute's value
725
+ *
726
+ * \retval CUPTI_SUCCESS
727
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
728
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
729
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
730
+ * is NULL, or if \p attrib is not an event domain attribute
731
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
732
+ * attribute values, indicates that the \p value buffer is too small
733
+ * to hold the attribute value.
734
+ */
735
+ CUptiResult CUPTIAPI cuptiEventDomainGetAttribute(CUpti_EventDomainID eventDomain,
736
+ CUpti_EventDomainAttribute attrib,
737
+ size_t *valueSize,
738
+ void *value);
739
+
740
+ /**
741
+ * \brief Get number of events in a domain.
742
+ *
743
+ * Returns the number of events in \p numEvents for a domain.
744
+ * \note \b Thread-safety: this function is thread safe.
745
+ *
746
+ * \param eventDomain ID of the event domain
747
+ * \param numEvents Returns the number of events in the domain
748
+ *
749
+ * \retval CUPTI_SUCCESS
750
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
751
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
752
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numEvents is NULL
753
+ */
754
+ CUptiResult CUPTIAPI cuptiEventDomainGetNumEvents(CUpti_EventDomainID eventDomain,
755
+ uint32_t *numEvents);
756
+
757
+ /**
758
+ * \brief Get the events in a domain.
759
+ *
760
+ * Returns the event IDs in \p eventArray for a domain. The size of
761
+ * the \p eventArray buffer is given by \p *arraySizeBytes. The size
762
+ * of the \p eventArray buffer must be at least \p numdomainevents *
763
+ * sizeof(CUpti_EventID) or else all events will not be returned. The
764
+ * value returned in \p *arraySizeBytes contains the number of bytes
765
+ * returned in \p eventArray.
766
+ * \note \b Thread-safety: this function is thread safe.
767
+ *
768
+ * \param eventDomain ID of the event domain
769
+ * \param arraySizeBytes The size of \p eventArray in bytes, and
770
+ * returns the number of bytes written to \p eventArray
771
+ * \param eventArray Returns the IDs of the events in the domain
772
+ *
773
+ * \retval CUPTI_SUCCESS
774
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
775
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
776
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or \p
777
+ * eventArray are NULL
778
+ */
779
+ CUptiResult CUPTIAPI cuptiEventDomainEnumEvents(CUpti_EventDomainID eventDomain,
780
+ size_t *arraySizeBytes,
781
+ CUpti_EventID *eventArray);
782
+
783
+ /**
784
+ * \brief Get an event attribute.
785
+ *
786
+ * Returns an event attribute in \p *value. The size of the \p
787
+ * value buffer is given by \p *valueSize. The value returned in \p
788
+ * *valueSize contains the number of bytes returned in \p value.
789
+ *
790
+ * If the attribute value is a c-string that is longer than \p
791
+ * *valueSize, then only the first \p *valueSize characters will be
792
+ * returned and there will be no terminating null byte.
793
+ * \note \b Thread-safety: this function is thread safe.
794
+ *
795
+ * \param event ID of the event
796
+ * \param attrib The event attribute to read
797
+ * \param valueSize The size of the \p value buffer in bytes, and
798
+ * returns the number of bytes written to \p value
799
+ * \param value Returns the attribute's value
800
+ *
801
+ * \retval CUPTI_SUCCESS
802
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
803
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
804
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
805
+ * is NULL, or if \p attrib is not an event attribute
806
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
807
+ * attribute values, indicates that the \p value buffer is too small
808
+ * to hold the attribute value.
809
+ */
810
+ CUptiResult CUPTIAPI cuptiEventGetAttribute(CUpti_EventID event,
811
+ CUpti_EventAttribute attrib,
812
+ size_t *valueSize,
813
+ void *value);
814
+
815
+ /**
816
+ * \brief Find an event by name.
817
+ *
818
+ * Find an event by name and return the event ID in \p *event.
819
+ * \note \b Thread-safety: this function is thread safe.
820
+ *
821
+ * \param device The CUDA device
822
+ * \param eventName The name of the event to find
823
+ * \param event Returns the ID of the found event or undefined if
824
+ * unable to find the event
825
+ *
826
+ * \retval CUPTI_SUCCESS
827
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
828
+ * \retval CUPTI_ERROR_INVALID_DEVICE
829
+ * \retval CUPTI_ERROR_INVALID_EVENT_NAME if unable to find an event
830
+ * with name \p eventName. In this case \p *event is undefined
831
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventName or \p event are NULL
832
+ */
833
+ CUptiResult CUPTIAPI cuptiEventGetIdFromName(CUdevice device,
834
+ const char *eventName,
835
+ CUpti_EventID *event);
836
+
837
+ /**
838
+ * \brief Create a new event group for a context.
839
+ *
840
+ * Creates a new event group for \p context and returns the new group
841
+ * in \p *eventGroup.
842
+ * \note \p flags are reserved for future use and should be set to zero.
843
+ * \note \b Thread-safety: this function is thread safe.
844
+ *
845
+ * \param context The context for the event group
846
+ * \param eventGroup Returns the new event group
847
+ * \param flags Reserved - must be zero
848
+ *
849
+ * \retval CUPTI_SUCCESS
850
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
851
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
852
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY
853
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
854
+ */
855
+ CUptiResult CUPTIAPI cuptiEventGroupCreate(CUcontext context,
856
+ CUpti_EventGroup *eventGroup,
857
+ uint32_t flags);
858
+
859
+ /**
860
+ * \brief Destroy an event group.
861
+ *
862
+ * Destroy an \p eventGroup and free its resources. An event group
863
+ * cannot be destroyed if it is enabled.
864
+ * \note \b Thread-safety: this function is thread safe.
865
+ *
866
+ * \param eventGroup The event group to destroy
867
+ *
868
+ * \retval CUPTI_SUCCESS
869
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
870
+ * \retval CUPTI_ERROR_INVALID_OPERATION if the event group is enabled
871
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if eventGroup is NULL
872
+ */
873
+ CUptiResult CUPTIAPI cuptiEventGroupDestroy(CUpti_EventGroup eventGroup);
874
+
875
+ /**
876
+ * \brief Read an event group attribute.
877
+ *
878
+ * Read an event group attribute and return it in \p *value.
879
+ * \note \b Thread-safety: this function is thread safe but client
880
+ * must guard against simultaneous destruction or modification of \p
881
+ * eventGroup (for example, client must guard against simultaneous
882
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
883
+ * etc.), and must guard against simultaneous destruction of the
884
+ * context in which \p eventGroup was created (for example, client
885
+ * must guard against simultaneous calls to cudaDeviceReset,
886
+ * cuCtxDestroy, etc.).
887
+ *
888
+ * \param eventGroup The event group
889
+ * \param attrib The attribute to read
890
+ * \param valueSize Size of buffer pointed by the value, and
891
+ * returns the number of bytes written to \p value
892
+ * \param value Returns the value of the attribute
893
+ *
894
+ * \retval CUPTI_SUCCESS
895
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
896
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
897
+ * is NULL, or if \p attrib is not an eventgroup attribute
898
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
899
+ * attribute values, indicates that the \p value buffer is too small
900
+ * to hold the attribute value.
901
+ */
902
+ CUptiResult CUPTIAPI cuptiEventGroupGetAttribute(CUpti_EventGroup eventGroup,
903
+ CUpti_EventGroupAttribute attrib,
904
+ size_t *valueSize,
905
+ void *value);
906
+
907
+ /**
908
+ * \brief Write an event group attribute.
909
+ *
910
+ * Write an event group attribute.
911
+ * \note \b Thread-safety: this function is thread safe.
912
+ *
913
+ * \param eventGroup The event group
914
+ * \param attrib The attribute to write
915
+ * \param valueSize The size, in bytes, of the value
916
+ * \param value The attribute value to write
917
+ *
918
+ * \retval CUPTI_SUCCESS
919
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
920
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
921
+ * is NULL, or if \p attrib is not an event group attribute, or if
922
+ * \p attrib is not a writable attribute
923
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT Indicates that
924
+ * the \p value buffer is too small to hold the attribute value.
925
+ */
926
+ CUptiResult CUPTIAPI cuptiEventGroupSetAttribute(CUpti_EventGroup eventGroup,
927
+ CUpti_EventGroupAttribute attrib,
928
+ size_t valueSize,
929
+ void *value);
930
+
931
+ /**
932
+ * \brief Add an event to an event group.
933
+ *
934
+ * Add an event to an event group. The event add can fail for a number of reasons:
935
+ * \li The event group is enabled
936
+ * \li The event does not belong to the same event domain as the
937
+ * events that are already in the event group
938
+ * \li Device limitations on the events that can belong to the same group
939
+ * \li The event group is full
940
+ *
941
+ * \note \b Thread-safety: this function is thread safe.
942
+ *
943
+ * \param eventGroup The event group
944
+ * \param event The event to add to the group
945
+ *
946
+ * \retval CUPTI_SUCCESS
947
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
948
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
949
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY
950
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is enabled
951
+ * \retval CUPTI_ERROR_NOT_COMPATIBLE if \p event belongs to a
952
+ * different event domain than the events already in \p eventGroup, or
953
+ * if a device limitation prevents \p event from being collected at
954
+ * the same time as the events already in \p eventGroup
955
+ * \retval CUPTI_ERROR_MAX_LIMIT_REACHED if \p eventGroup is full
956
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
957
+ */
958
+ CUptiResult CUPTIAPI cuptiEventGroupAddEvent(CUpti_EventGroup eventGroup,
959
+ CUpti_EventID event);
960
+
961
+ /**
962
+ * \brief Remove an event from an event group.
963
+ *
964
+ * Remove \p event from the an event group. The event cannot be
965
+ * removed if the event group is enabled.
966
+ * \note \b Thread-safety: this function is thread safe.
967
+ *
968
+ * \param eventGroup The event group
969
+ * \param event The event to remove from the group
970
+ *
971
+ * \retval CUPTI_SUCCESS
972
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
973
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
974
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is enabled
975
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
976
+ */
977
+ CUptiResult CUPTIAPI cuptiEventGroupRemoveEvent(CUpti_EventGroup eventGroup,
978
+ CUpti_EventID event);
979
+
980
+ /**
981
+ * \brief Remove all events from an event group.
982
+ *
983
+ * Remove all events from an event group. Events cannot be removed if
984
+ * the event group is enabled.
985
+ * \note \b Thread-safety: this function is thread safe.
986
+ *
987
+ * \param eventGroup The event group
988
+ *
989
+ * \retval CUPTI_SUCCESS
990
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
991
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is enabled
992
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
993
+ */
994
+ CUptiResult CUPTIAPI cuptiEventGroupRemoveAllEvents(CUpti_EventGroup eventGroup);
995
+
996
+ /**
997
+ * \brief Zero all the event counts in an event group.
998
+ *
999
+ * Zero all the event counts in an event group.
1000
+ * \note \b Thread-safety: this function is thread safe but client
1001
+ * must guard against simultaneous destruction or modification of \p
1002
+ * eventGroup (for example, client must guard against simultaneous
1003
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
1004
+ * etc.), and must guard against simultaneous destruction of the
1005
+ * context in which \p eventGroup was created (for example, client
1006
+ * must guard against simultaneous calls to cudaDeviceReset,
1007
+ * cuCtxDestroy, etc.).
1008
+ *
1009
+ * \param eventGroup The event group
1010
+ *
1011
+ * \retval CUPTI_SUCCESS
1012
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1013
+ * \retval CUPTI_ERROR_HARDWARE
1014
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
1015
+ */
1016
+ CUptiResult CUPTIAPI cuptiEventGroupResetAllEvents(CUpti_EventGroup eventGroup);
1017
+
1018
+ /**
1019
+ * \brief Enable an event group.
1020
+ *
1021
+ * Enable an event group. Enabling an event group zeros the value of
1022
+ * all the events in the group and then starts collection of those
1023
+ * events.
1024
+ * \note \b Thread-safety: this function is thread safe.
1025
+ *
1026
+ * \param eventGroup The event group
1027
+ *
1028
+ * \retval CUPTI_SUCCESS
1029
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1030
+ * \retval CUPTI_ERROR_HARDWARE
1031
+ * \retval CUPTI_ERROR_NOT_READY if \p eventGroup does not contain any events
1032
+ * \retval CUPTI_ERROR_NOT_COMPATIBLE if \p eventGroup cannot be
1033
+ * enabled due to other already enabled event groups
1034
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
1035
+ * \retval CUPTI_ERROR_HARDWARE_BUSY if another client is profiling
1036
+ * and hardware is busy
1037
+ */
1038
+ CUptiResult CUPTIAPI cuptiEventGroupEnable(CUpti_EventGroup eventGroup);
1039
+
1040
+ /**
1041
+ * \brief Disable an event group.
1042
+ *
1043
+ * Disable an event group. Disabling an event group stops collection
1044
+ * of events contained in the group.
1045
+ * \note \b Thread-safety: this function is thread safe.
1046
+ *
1047
+ * \param eventGroup The event group
1048
+ *
1049
+ * \retval CUPTI_SUCCESS
1050
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1051
+ * \retval CUPTI_ERROR_HARDWARE
1052
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
1053
+ */
1054
+ CUptiResult CUPTIAPI cuptiEventGroupDisable(CUpti_EventGroup eventGroup);
1055
+
1056
+ /**
1057
+ * \brief Read the value for an event in an event group.
1058
+ *
1059
+ * Read the value for an event in an event group. The event value is
1060
+ * returned in the \p eventValueBuffer buffer. \p
1061
+ * eventValueBufferSizeBytes indicates the size of the \p
1062
+ * eventValueBuffer buffer. The buffer must be at least sizeof(uint64)
1063
+ * if ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is not set
1064
+ * on the group containing the event. The buffer must be at least
1065
+ * (sizeof(uint64) * number of domain instances) if
1066
+ * ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is set on the
1067
+ * group.
1068
+ *
1069
+ * If any instance of an event counter overflows, the value returned
1070
+ * for that event instance will be ::CUPTI_EVENT_OVERFLOW.
1071
+ *
1072
+ * The only allowed value for \p flags is ::CUPTI_EVENT_READ_FLAG_NONE.
1073
+ *
1074
+ * Reading an event from a disabled event group is not allowed. After
1075
+ * being read, an event's value is reset to zero.
1076
+ * \note \b Thread-safety: this function is thread safe but client
1077
+ * must guard against simultaneous destruction or modification of \p
1078
+ * eventGroup (for example, client must guard against simultaneous
1079
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
1080
+ * etc.), and must guard against simultaneous destruction of the
1081
+ * context in which \p eventGroup was created (for example, client
1082
+ * must guard against simultaneous calls to cudaDeviceReset,
1083
+ * cuCtxDestroy, etc.). If \ref cuptiEventGroupResetAllEvents is
1084
+ * called simultaneously with this function, then returned event
1085
+ * values are undefined.
1086
+ *
1087
+ * \param eventGroup The event group
1088
+ * \param flags Flags controlling the reading mode
1089
+ * \param event The event to read
1090
+ * \param eventValueBufferSizeBytes The size of \p eventValueBuffer
1091
+ * in bytes, and returns the number of bytes written to \p
1092
+ * eventValueBuffer
1093
+ * \param eventValueBuffer Returns the event value(s)
1094
+ *
1095
+ * \retval CUPTI_SUCCESS
1096
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1097
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
1098
+ * \retval CUPTI_ERROR_HARDWARE
1099
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is disabled
1100
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup, \p
1101
+ * eventValueBufferSizeBytes or \p eventValueBuffer is NULL
1102
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if size of \p eventValueBuffer
1103
+ * is not sufficient
1104
+ */
1105
+ CUptiResult CUPTIAPI cuptiEventGroupReadEvent(CUpti_EventGroup eventGroup,
1106
+ CUpti_ReadEventFlags flags,
1107
+ CUpti_EventID event,
1108
+ size_t *eventValueBufferSizeBytes,
1109
+ uint64_t *eventValueBuffer);
1110
+
1111
+ /**
1112
+ * \brief Read the values for all the events in an event group.
1113
+ *
1114
+ * Read the values for all the events in an event group. The event
1115
+ * values are returned in the \p eventValueBuffer buffer. \p
1116
+ * eventValueBufferSizeBytes indicates the size of \p
1117
+ * eventValueBuffer. The buffer must be at least (sizeof(uint64) *
1118
+ * number of events in group) if
1119
+ * ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is not set on
1120
+ * the group containing the events. The buffer must be at least
1121
+ * (sizeof(uint64) * number of domain instances * number of events in
1122
+ * group) if ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is
1123
+ * set on the group.
1124
+ *
1125
+ * The data format returned in \p eventValueBuffer is:
1126
+ * - domain instance 0: event0 event1 ... eventN
1127
+ * - domain instance 1: event0 event1 ... eventN
1128
+ * - ...
1129
+ * - domain instance M: event0 event1 ... eventN
1130
+ *
1131
+ * The event order in \p eventValueBuffer is returned in \p
1132
+ * eventIdArray. The size of \p eventIdArray is specified in \p
1133
+ * eventIdArraySizeBytes. The size should be at least
1134
+ * (sizeof(CUpti_EventID) * number of events in group).
1135
+ *
1136
+ * If any instance of any event counter overflows, the value returned
1137
+ * for that event instance will be ::CUPTI_EVENT_OVERFLOW.
1138
+ *
1139
+ * The only allowed value for \p flags is ::CUPTI_EVENT_READ_FLAG_NONE.
1140
+ *
1141
+ * Reading events from a disabled event group is not allowed. After
1142
+ * being read, an event's value is reset to zero.
1143
+ * \note \b Thread-safety: this function is thread safe but client
1144
+ * must guard against simultaneous destruction or modification of \p
1145
+ * eventGroup (for example, client must guard against simultaneous
1146
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
1147
+ * etc.), and must guard against simultaneous destruction of the
1148
+ * context in which \p eventGroup was created (for example, client
1149
+ * must guard against simultaneous calls to cudaDeviceReset,
1150
+ * cuCtxDestroy, etc.). If \ref cuptiEventGroupResetAllEvents is
1151
+ * called simultaneously with this function, then returned event
1152
+ * values are undefined.
1153
+ *
1154
+ * \param eventGroup The event group
1155
+ * \param flags Flags controlling the reading mode
1156
+ * \param eventValueBufferSizeBytes The size of \p eventValueBuffer in
1157
+ * bytes, and returns the number of bytes written to \p
1158
+ * eventValueBuffer
1159
+ * \param eventValueBuffer Returns the event values
1160
+ * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes,
1161
+ * and returns the number of bytes written to \p eventIdArray
1162
+ * \param eventIdArray Returns the IDs of the events in the same order
1163
+ * as the values return in eventValueBuffer.
1164
+ * \param numEventIdsRead Returns the number of event IDs returned
1165
+ * in \p eventIdArray
1166
+ *
1167
+ * \retval CUPTI_SUCCESS
1168
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1169
+ * \retval CUPTI_ERROR_HARDWARE
1170
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is disabled
1171
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup, \p
1172
+ * eventValueBufferSizeBytes, \p eventValueBuffer, \p
1173
+ * eventIdArraySizeBytes, \p eventIdArray or \p numEventIdsRead is
1174
+ * NULL
1175
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if size of \p eventValueBuffer
1176
+ * or \p eventIdArray is not sufficient
1177
+ */
1178
+ CUptiResult CUPTIAPI cuptiEventGroupReadAllEvents(CUpti_EventGroup eventGroup,
1179
+ CUpti_ReadEventFlags flags,
1180
+ size_t *eventValueBufferSizeBytes,
1181
+ uint64_t *eventValueBuffer,
1182
+ size_t *eventIdArraySizeBytes,
1183
+ CUpti_EventID *eventIdArray,
1184
+ size_t *numEventIdsRead);
1185
+
1186
+ /**
1187
+ * \brief For a set of events, get the grouping that indicates the
1188
+ * number of passes and the event groups necessary to collect the
1189
+ * events.
1190
+ *
1191
+ * The number of events that can be collected simultaneously varies by
1192
+ * device and by the type of the events. When events can be collected
1193
+ * simultaneously, they may need to be grouped into multiple event
1194
+ * groups because they are from different event domains. This function
1195
+ * takes a set of events and determines how many passes are required
1196
+ * to collect all those events, and which events can be collected
1197
+ * simultaneously in each pass.
1198
+ *
1199
+ * The CUpti_EventGroupSets returned in \p eventGroupPasses indicates
1200
+ * how many passes are required to collect the events with the \p
1201
+ * numSets field. Within each event group set, the \p sets array
1202
+ * indicates the event groups that should be collected on each pass.
1203
+ * \note \b Thread-safety: this function is thread safe, but client
1204
+ * must guard against another thread simultaneously destroying \p
1205
+ * context.
1206
+ *
1207
+ * \param context The context for event collection
1208
+ * \param eventIdArraySizeBytes Size of \p eventIdArray in bytes
1209
+ * \param eventIdArray Array of event IDs that need to be grouped
1210
+ * \param eventGroupPasses Returns a CUpti_EventGroupSets object that
1211
+ * indicates the number of passes required to collect the events and
1212
+ * the events to collect on each pass
1213
+ *
1214
+ * \retval CUPTI_SUCCESS
1215
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1216
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
1217
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
1218
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventIdArray or
1219
+ * \p eventGroupPasses is NULL
1220
+ */
1221
+ CUptiResult CUPTIAPI cuptiEventGroupSetsCreate(CUcontext context,
1222
+ size_t eventIdArraySizeBytes,
1223
+ CUpti_EventID *eventIdArray,
1224
+ CUpti_EventGroupSets **eventGroupPasses);
1225
+
1226
+ /**
1227
+ * \brief Destroy a event group sets object.
1228
+ *
1229
+ * Destroy a CUpti_EventGroupSets object.
1230
+ * \note \b Thread-safety: this function is thread safe.
1231
+ *
1232
+ * \param eventGroupSets The object to destroy
1233
+ *
1234
+ * \retval CUPTI_SUCCESS
1235
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1236
+ * \retval CUPTI_ERROR_INVALID_OPERATION if any of the event groups
1237
+ * contained in the sets is enabled
1238
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroupSets is NULL
1239
+ */
1240
+ CUptiResult CUPTIAPI cuptiEventGroupSetsDestroy(CUpti_EventGroupSets *eventGroupSets);
1241
+
1242
+
1243
+ /**
1244
+ * \brief Enable an event group set.
1245
+ *
1246
+ * Enable a set of event groups. Enabling a set of event groups zeros the value of
1247
+ * all the events in all the groups and then starts collection of those events.
1248
+ * \note \b Thread-safety: this function is thread safe.
1249
+ *
1250
+ * \param eventGroupSet The pointer to the event group set
1251
+ *
1252
+ * \retval CUPTI_SUCCESS
1253
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1254
+ * \retval CUPTI_ERROR_HARDWARE
1255
+ * \retval CUPTI_ERROR_NOT_READY if \p eventGroup does not contain any events
1256
+ * \retval CUPTI_ERROR_NOT_COMPATIBLE if \p eventGroup cannot be
1257
+ * enabled due to other already enabled event groups
1258
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroupSet is NULL
1259
+ * \retval CUPTI_ERROR_HARDWARE_BUSY if other client is profiling and hardware is
1260
+ * busy
1261
+ */
1262
+ CUptiResult CUPTIAPI cuptiEventGroupSetEnable(CUpti_EventGroupSet *eventGroupSet);
1263
+
1264
+ /**
1265
+ * \brief Disable an event group set.
1266
+ *
1267
+ * Disable a set of event groups. Disabling a set of event groups
1268
+ * stops collection of events contained in the groups.
1269
+ * \note \b Thread-safety: this function is thread safe.
1270
+ * \note \b If this call fails, some of the event groups in the set may be disabled
1271
+ * and other event groups may remain enabled.
1272
+ *
1273
+ * \param eventGroupSet The pointer to the event group set
1274
+ * \retval CUPTI_SUCCESS
1275
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1276
+ * \retval CUPTI_ERROR_HARDWARE
1277
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroupSet is NULL
1278
+ */
1279
+ CUptiResult CUPTIAPI cuptiEventGroupSetDisable(CUpti_EventGroupSet *eventGroupSet);
1280
+
1281
+ /**
1282
+ * \brief Enable kernel replay mode.
1283
+ *
1284
+ * Set profiling mode for the context to replay mode. In this mode,
1285
+ * any number of events can be collected in one run of the kernel. The
1286
+ * event collection mode will automatically switch to
1287
+ * CUPTI_EVENT_COLLECTION_MODE_KERNEL. In this mode, \ref
1288
+ * cuptiSetEventCollectionMode will return
1289
+ * CUPTI_ERROR_INVALID_OPERATION.
1290
+ * \note \b Kernels might take longer to run if many events are enabled.
1291
+ * \note \b Thread-safety: this function is thread safe.
1292
+ *
1293
+ * \param context The context
1294
+ * \retval CUPTI_SUCCESS
1295
+ */
1296
+ CUptiResult CUPTIAPI cuptiEnableKernelReplayMode(CUcontext context);
1297
+
1298
+ /**
1299
+ * \brief Disable kernel replay mode.
1300
+ *
1301
+ * Set profiling mode for the context to non-replay (default)
1302
+ * mode. Event collection mode will be set to
1303
+ * CUPTI_EVENT_COLLECTION_MODE_KERNEL. All previously enabled
1304
+ * event groups and event group sets will be disabled.
1305
+ * \note \b Thread-safety: this function is thread safe.
1306
+ *
1307
+ * \param context The context
1308
+ * \retval CUPTI_SUCCESS
1309
+ */
1310
+ CUptiResult CUPTIAPI cuptiDisableKernelReplayMode(CUcontext context);
1311
+
1312
+ /**
1313
+ * \brief Function type for getting updates on kernel replay.
1314
+ *
1315
+ * \param kernelName The mangled kernel name
1316
+ * \param numReplaysDone Number of replays done so far
1317
+ * \param customData Pointer of any custom data passed in when subscribing
1318
+ */
1319
+ typedef void (CUPTIAPI *CUpti_KernelReplayUpdateFunc)(
1320
+ const char *kernelName,
1321
+ int numReplaysDone,
1322
+ void *customData);
1323
+
1324
+ /**
1325
+ * \brief Subscribe to kernel replay updates.
1326
+ *
1327
+ * When subscribed, the function pointer passed in will be called each time a
1328
+ * kernel run is finished during kernel replay. Previously subscribed function
1329
+ * pointer will be replaced. Pass in NULL as the function pointer unsubscribes
1330
+ * the update.
1331
+ *
1332
+ * \param updateFunc The update function pointer
1333
+ * \param customData Pointer to any custom data
1334
+ * \retval CUPTI_SUCCESS
1335
+ */
1336
+ CUptiResult CUPTIAPI cuptiKernelReplaySubscribeUpdate(CUpti_KernelReplayUpdateFunc updateFunc, void *customData);
1337
+
1338
+ /** @} */ /* END CUPTI_EVENT_API */
1339
+
1340
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
1341
+ #pragma GCC visibility pop
1342
+ #endif
1343
+
1344
+ #if defined(__cplusplus)
1345
+ }
1346
+ #endif
1347
+
1348
+ #endif /*_CUPTI_EVENTS_H_*/
1349
+
1350
+
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_metrics.h ADDED
@@ -0,0 +1,825 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2011-2020 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_METRIC_H_)
51
+ #define _CUPTI_METRIC_H_
52
+
53
+ #include <cuda.h>
54
+ #include <string.h>
55
+ #include <cuda_stdint.h>
56
+ #include <cupti_result.h>
57
+
58
+ #ifndef CUPTIAPI
59
+ #ifdef _WIN32
60
+ #define CUPTIAPI __stdcall
61
+ #else
62
+ #define CUPTIAPI
63
+ #endif
64
+ #endif
65
+
66
+ #if defined(__cplusplus)
67
+ extern "C" {
68
+ #endif
69
+
70
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
71
+ #pragma GCC visibility push(default)
72
+ #endif
73
+
74
+ /**
75
+ * \defgroup CUPTI_METRIC_API CUPTI Metric API
76
+ * Functions, types, and enums that implement the CUPTI Metric API.
77
+ *
78
+ * \note CUPTI metric API from the header cupti_metrics.h are not supported on devices
79
+ * with compute capability 7.5 and higher (i.e. Turing and later GPU architectures).
80
+ * These API will be deprecated in a future CUDA release. These are replaced by
81
+ * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API
82
+ * in the headers nvperf_host.h and nvperf_target.h which are supported on
83
+ * devices with compute capability 7.0 and higher (i.e. Volta and later GPU
84
+ * architectures).
85
+ *
86
+ * @{
87
+ */
88
+
89
+ /**
90
+ * \brief ID for a metric.
91
+ *
92
+ * A metric provides a measure of some aspect of the device.
93
+ */
94
+ typedef uint32_t CUpti_MetricID;
95
+
96
+ /**
97
+ * \brief A metric category.
98
+ *
99
+ * Each metric is assigned to a category that represents the general
100
+ * type of the metric. A metric's category is accessed using \ref
101
+ * cuptiMetricGetAttribute and the CUPTI_METRIC_ATTR_CATEGORY
102
+ * attribute.
103
+ */
104
+ typedef enum {
105
+ /**
106
+ * A memory related metric.
107
+ */
108
+ CUPTI_METRIC_CATEGORY_MEMORY = 0,
109
+ /**
110
+ * An instruction related metric.
111
+ */
112
+ CUPTI_METRIC_CATEGORY_INSTRUCTION = 1,
113
+ /**
114
+ * A multiprocessor related metric.
115
+ */
116
+ CUPTI_METRIC_CATEGORY_MULTIPROCESSOR = 2,
117
+ /**
118
+ * A cache related metric.
119
+ */
120
+ CUPTI_METRIC_CATEGORY_CACHE = 3,
121
+ /**
122
+ * A texture related metric.
123
+ */
124
+ CUPTI_METRIC_CATEGORY_TEXTURE = 4,
125
+ /**
126
+ *A Nvlink related metric.
127
+ */
128
+ CUPTI_METRIC_CATEGORY_NVLINK = 5,
129
+ /**
130
+ *A PCIe related metric.
131
+ */
132
+ CUPTI_METRIC_CATEGORY_PCIE = 6,
133
+ CUPTI_METRIC_CATEGORY_FORCE_INT = 0x7fffffff,
134
+ } CUpti_MetricCategory;
135
+
136
+ /**
137
+ * \brief A metric evaluation mode.
138
+ *
139
+ * A metric can be evaluated per hardware instance to know the load balancing
140
+ * across instances of a domain or the metric can be evaluated in aggregate mode
141
+ * when the events involved in metric evaluation are from different event
142
+ * domains. It might be possible to evaluate some metrics in both
143
+ * modes for convenience. A metric's evaluation mode is accessed using \ref
144
+ * CUpti_MetricEvaluationMode and the CUPTI_METRIC_ATTR_EVALUATION_MODE
145
+ * attribute.
146
+ */
147
+ typedef enum {
148
+ /**
149
+ * If this bit is set, the metric can be profiled for each instance of the
150
+ * domain. The event values passed to \ref cuptiMetricGetValue can contain
151
+ * values for one instance of the domain. And \ref cuptiMetricGetValue can
152
+ * be called for each instance.
153
+ */
154
+ CUPTI_METRIC_EVALUATION_MODE_PER_INSTANCE = 1,
155
+ /**
156
+ * If this bit is set, the metric can be profiled over all instances. The
157
+ * event values passed to \ref cuptiMetricGetValue can be aggregated values
158
+ * of events for all instances of the domain.
159
+ */
160
+ CUPTI_METRIC_EVALUATION_MODE_AGGREGATE = 1 << 1,
161
+ CUPTI_METRIC_EVALUATION_MODE_FORCE_INT = 0x7fffffff,
162
+ } CUpti_MetricEvaluationMode;
163
+
164
+ /**
165
+ * \brief Kinds of metric values.
166
+ *
167
+ * Metric values can be one of several different kinds. Corresponding
168
+ * to each kind is a member of the CUpti_MetricValue union. The metric
169
+ * value returned by \ref cuptiMetricGetValue should be accessed using
170
+ * the appropriate member of that union based on its value kind.
171
+ */
172
+ typedef enum {
173
+ /**
174
+ * The metric value is a 64-bit double.
175
+ */
176
+ CUPTI_METRIC_VALUE_KIND_DOUBLE = 0,
177
+ /**
178
+ * The metric value is a 64-bit unsigned integer.
179
+ */
180
+ CUPTI_METRIC_VALUE_KIND_UINT64 = 1,
181
+ /**
182
+ * The metric value is a percentage represented by a 64-bit
183
+ * double. For example, 57.5% is represented by the value 57.5.
184
+ */
185
+ CUPTI_METRIC_VALUE_KIND_PERCENT = 2,
186
+ /**
187
+ * The metric value is a throughput represented by a 64-bit
188
+ * integer. The unit for throughput values is bytes/second.
189
+ */
190
+ CUPTI_METRIC_VALUE_KIND_THROUGHPUT = 3,
191
+ /**
192
+ * The metric value is a 64-bit signed integer.
193
+ */
194
+ CUPTI_METRIC_VALUE_KIND_INT64 = 4,
195
+ /**
196
+ * The metric value is a utilization level, as represented by
197
+ * CUpti_MetricValueUtilizationLevel.
198
+ */
199
+ CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL = 5,
200
+
201
+ CUPTI_METRIC_VALUE_KIND_FORCE_INT = 0x7fffffff
202
+ } CUpti_MetricValueKind;
203
+
204
+ /**
205
+ * \brief Enumeration of utilization levels for metrics values of kind
206
+ * CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL. Utilization values can
207
+ * vary from IDLE (0) to MAX (10) but the enumeration only provides
208
+ * specific names for a few values.
209
+ */
210
+ typedef enum {
211
+ CUPTI_METRIC_VALUE_UTILIZATION_IDLE = 0,
212
+ CUPTI_METRIC_VALUE_UTILIZATION_LOW = 2,
213
+ CUPTI_METRIC_VALUE_UTILIZATION_MID = 5,
214
+ CUPTI_METRIC_VALUE_UTILIZATION_HIGH = 8,
215
+ CUPTI_METRIC_VALUE_UTILIZATION_MAX = 10,
216
+ CUPTI_METRIC_VALUE_UTILIZATION_FORCE_INT = 0x7fffffff
217
+ } CUpti_MetricValueUtilizationLevel;
218
+
219
+ /**
220
+ * \brief Metric attributes.
221
+ *
222
+ * Metric attributes describe properties of a metric. These attributes
223
+ * can be read using \ref cuptiMetricGetAttribute.
224
+ */
225
+ typedef enum {
226
+ /**
227
+ * Metric name. Value is a null terminated const c-string.
228
+ */
229
+ CUPTI_METRIC_ATTR_NAME = 0,
230
+ /**
231
+ * Short description of metric. Value is a null terminated const c-string.
232
+ */
233
+ CUPTI_METRIC_ATTR_SHORT_DESCRIPTION = 1,
234
+ /**
235
+ * Long description of metric. Value is a null terminated const c-string.
236
+ */
237
+ CUPTI_METRIC_ATTR_LONG_DESCRIPTION = 2,
238
+ /**
239
+ * Category of the metric. Value is of type CUpti_MetricCategory.
240
+ */
241
+ CUPTI_METRIC_ATTR_CATEGORY = 3,
242
+ /**
243
+ * Value type of the metric. Value is of type CUpti_MetricValueKind.
244
+ */
245
+ CUPTI_METRIC_ATTR_VALUE_KIND = 4,
246
+ /**
247
+ * Metric evaluation mode. Value is of type CUpti_MetricEvaluationMode.
248
+ */
249
+ CUPTI_METRIC_ATTR_EVALUATION_MODE = 5,
250
+ CUPTI_METRIC_ATTR_FORCE_INT = 0x7fffffff,
251
+ } CUpti_MetricAttribute;
252
+
253
+ /**
254
+ * \brief A metric value.
255
+ *
256
+ * Metric values can be one of several different kinds. Corresponding
257
+ * to each kind is a member of the CUpti_MetricValue union. The metric
258
+ * value returned by \ref cuptiMetricGetValue should be accessed using
259
+ * the appropriate member of that union based on its value kind.
260
+ */
261
+ typedef union {
262
+ /*
263
+ * Value for CUPTI_METRIC_VALUE_KIND_DOUBLE.
264
+ */
265
+ double metricValueDouble;
266
+ /*
267
+ * Value for CUPTI_METRIC_VALUE_KIND_UINT64.
268
+ */
269
+ uint64_t metricValueUint64;
270
+ /*
271
+ * Value for CUPTI_METRIC_VALUE_KIND_INT64.
272
+ */
273
+ int64_t metricValueInt64;
274
+ /*
275
+ * Value for CUPTI_METRIC_VALUE_KIND_PERCENT. For example, 57.5% is
276
+ * represented by the value 57.5.
277
+ */
278
+ double metricValuePercent;
279
+ /*
280
+ * Value for CUPTI_METRIC_VALUE_KIND_THROUGHPUT. The unit for
281
+ * throughput values is bytes/second.
282
+ */
283
+ uint64_t metricValueThroughput;
284
+ /*
285
+ * Value for CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL.
286
+ */
287
+ CUpti_MetricValueUtilizationLevel metricValueUtilizationLevel;
288
+ } CUpti_MetricValue;
289
+
290
+ /**
291
+ * \brief Device class.
292
+ *
293
+ * Enumeration of device classes for metric property
294
+ * CUPTI_METRIC_PROPERTY_DEVICE_CLASS.
295
+ */
296
+ typedef enum {
297
+ CUPTI_METRIC_PROPERTY_DEVICE_CLASS_TESLA = 0,
298
+ CUPTI_METRIC_PROPERTY_DEVICE_CLASS_QUADRO = 1,
299
+ CUPTI_METRIC_PROPERTY_DEVICE_CLASS_GEFORCE = 2,
300
+ CUPTI_METRIC_PROPERTY_DEVICE_CLASS_TEGRA = 3,
301
+ } CUpti_MetricPropertyDeviceClass;
302
+
303
+ /**
304
+ * \brief Metric device properties.
305
+ *
306
+ * Metric device properties describe device properties which are needed for a metric.
307
+ * Some of these properties can be collected using cuDeviceGetAttribute.
308
+ */
309
+ typedef enum {
310
+ /*
311
+ * Number of multiprocessors on a device. This can be collected
312
+ * using value of \param CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT of
313
+ * cuDeviceGetAttribute.
314
+ */
315
+ CUPTI_METRIC_PROPERTY_MULTIPROCESSOR_COUNT,
316
+ /*
317
+ * Maximum number of warps on a multiprocessor. This can be
318
+ * collected using ratio of value of \param
319
+ * CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR and \param
320
+ * CU_DEVICE_ATTRIBUTE_WARP_SIZE of cuDeviceGetAttribute.
321
+ */
322
+ CUPTI_METRIC_PROPERTY_WARPS_PER_MULTIPROCESSOR,
323
+ /*
324
+ * GPU Time for kernel in ns. This should be profiled using CUPTI
325
+ * Activity API.
326
+ */
327
+ CUPTI_METRIC_PROPERTY_KERNEL_GPU_TIME,
328
+ /*
329
+ * Clock rate for device in KHz. This should be collected using
330
+ * value of \param CU_DEVICE_ATTRIBUTE_CLOCK_RATE of
331
+ * cuDeviceGetAttribute.
332
+ */
333
+ CUPTI_METRIC_PROPERTY_CLOCK_RATE,
334
+ /*
335
+ * Number of Frame buffer units for device. This should be collected
336
+ * using value of \param CUPTI_DEVICE_ATTRIBUTE_MAX_FRAME_BUFFERS of
337
+ * cuptiDeviceGetAttribute.
338
+ */
339
+ CUPTI_METRIC_PROPERTY_FRAME_BUFFER_COUNT,
340
+ /*
341
+ * Global memory bandwidth in KBytes/sec. This should be collected
342
+ * using value of \param CUPTI_DEVICE_ATTR_GLOBAL_MEMORY_BANDWIDTH
343
+ * of cuptiDeviceGetAttribute.
344
+ */
345
+ CUPTI_METRIC_PROPERTY_GLOBAL_MEMORY_BANDWIDTH,
346
+ /*
347
+ * PCIE link rate in Mega bits/sec. This should be collected using
348
+ * value of \param CUPTI_DEVICE_ATTR_PCIE_LINK_RATE of
349
+ * cuptiDeviceGetAttribute.
350
+ */
351
+ CUPTI_METRIC_PROPERTY_PCIE_LINK_RATE,
352
+ /*
353
+ * PCIE link width for device. This should be collected using
354
+ * value of \param CUPTI_DEVICE_ATTR_PCIE_LINK_WIDTH of
355
+ * cuptiDeviceGetAttribute.
356
+ */
357
+ CUPTI_METRIC_PROPERTY_PCIE_LINK_WIDTH,
358
+ /*
359
+ * PCIE generation for device. This should be collected using
360
+ * value of \param CUPTI_DEVICE_ATTR_PCIE_GEN of
361
+ * cuptiDeviceGetAttribute.
362
+ */
363
+ CUPTI_METRIC_PROPERTY_PCIE_GEN,
364
+ /*
365
+ * The device class. This should be collected using
366
+ * value of \param CUPTI_DEVICE_ATTR_DEVICE_CLASS of
367
+ * cuptiDeviceGetAttribute.
368
+ */
369
+ CUPTI_METRIC_PROPERTY_DEVICE_CLASS,
370
+ /*
371
+ * Peak single precision floating point operations that
372
+ * can be performed in one cycle by the device.
373
+ * This should be collected using value of
374
+ * \param CUPTI_DEVICE_ATTR_FLOP_SP_PER_CYCLE of
375
+ * cuptiDeviceGetAttribute.
376
+ */
377
+ CUPTI_METRIC_PROPERTY_FLOP_SP_PER_CYCLE,
378
+ /*
379
+ * Peak double precision floating point operations that
380
+ * can be performed in one cycle by the device.
381
+ * This should be collected using value of
382
+ * \param CUPTI_DEVICE_ATTR_FLOP_DP_PER_CYCLE of
383
+ * cuptiDeviceGetAttribute.
384
+ */
385
+ CUPTI_METRIC_PROPERTY_FLOP_DP_PER_CYCLE,
386
+ /*
387
+ * Number of L2 units on a device. This can be collected
388
+ * using value of \param CUPTI_DEVICE_ATTR_MAX_L2_UNITS of
389
+ * cuDeviceGetAttribute.
390
+ */
391
+ CUPTI_METRIC_PROPERTY_L2_UNITS,
392
+ /*
393
+ * Whether ECC support is enabled on the device. This can be
394
+ * collected using value of \param CU_DEVICE_ATTRIBUTE_ECC_ENABLED of
395
+ * cuDeviceGetAttribute.
396
+ */
397
+ CUPTI_METRIC_PROPERTY_ECC_ENABLED,
398
+ /*
399
+ * Peak half precision floating point operations that
400
+ * can be performed in one cycle by the device.
401
+ * This should be collected using value of
402
+ * \param CUPTI_DEVICE_ATTR_FLOP_HP_PER_CYCLE of
403
+ * cuptiDeviceGetAttribute.
404
+ */
405
+ CUPTI_METRIC_PROPERTY_FLOP_HP_PER_CYCLE,
406
+ /*
407
+ * NVLINK Bandwitdh for device. This should be collected
408
+ * using value of \param CUPTI_DEVICE_ATTR_GPU_CPU_NVLINK_BW of
409
+ * cuptiDeviceGetAttribute.
410
+ */
411
+ CUPTI_METRIC_PROPERTY_GPU_CPU_NVLINK_BANDWIDTH,
412
+ } CUpti_MetricPropertyID;
413
+
414
+ /**
415
+ * \brief Get the total number of metrics available on any device.
416
+ *
417
+ * Returns the total number of metrics available on any CUDA-capable
418
+ * devices.
419
+ *
420
+ * \param numMetrics Returns the number of metrics
421
+ *
422
+ * \retval CUPTI_SUCCESS
423
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numMetrics is NULL
424
+ */
425
+ CUptiResult CUPTIAPI cuptiGetNumMetrics(uint32_t *numMetrics);
426
+
427
+ /**
428
+ * \brief Get all the metrics available on any device.
429
+ *
430
+ * Returns the metric IDs in \p metricArray for all CUDA-capable
431
+ * devices. The size of the \p metricArray buffer is given by \p
432
+ * *arraySizeBytes. The size of the \p metricArray buffer must be at
433
+ * least \p numMetrics * sizeof(CUpti_MetricID) or all metric IDs will
434
+ * not be returned. The value returned in \p *arraySizeBytes contains
435
+ * the number of bytes returned in \p metricArray.
436
+ *
437
+ * \param arraySizeBytes The size of \p metricArray in bytes, and
438
+ * returns the number of bytes written to \p metricArray
439
+ * \param metricArray Returns the IDs of the metrics
440
+ *
441
+ * \retval CUPTI_SUCCESS
442
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or
443
+ * \p metricArray are NULL
444
+ */
445
+ CUptiResult CUPTIAPI cuptiEnumMetrics(size_t *arraySizeBytes,
446
+ CUpti_MetricID *metricArray);
447
+
448
+ /**
449
+ * \brief Get the number of metrics for a device.
450
+ *
451
+ * Returns the number of metrics available for a device.
452
+ *
453
+ * \param device The CUDA device
454
+ * \param numMetrics Returns the number of metrics available for the
455
+ * device
456
+ *
457
+ * \retval CUPTI_SUCCESS
458
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
459
+ * \retval CUPTI_ERROR_INVALID_DEVICE
460
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numMetrics is NULL
461
+ */
462
+ CUptiResult CUPTIAPI cuptiDeviceGetNumMetrics(CUdevice device,
463
+ uint32_t *numMetrics);
464
+
465
+ /**
466
+ * \brief Get the metrics for a device.
467
+ *
468
+ * Returns the metric IDs in \p metricArray for a device. The size of
469
+ * the \p metricArray buffer is given by \p *arraySizeBytes. The size
470
+ * of the \p metricArray buffer must be at least \p numMetrics *
471
+ * sizeof(CUpti_MetricID) or else all metric IDs will not be
472
+ * returned. The value returned in \p *arraySizeBytes contains the
473
+ * number of bytes returned in \p metricArray.
474
+ *
475
+ * \param device The CUDA device
476
+ * \param arraySizeBytes The size of \p metricArray in bytes, and
477
+ * returns the number of bytes written to \p metricArray
478
+ * \param metricArray Returns the IDs of the metrics for the device
479
+ *
480
+ * \retval CUPTI_SUCCESS
481
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
482
+ * \retval CUPTI_ERROR_INVALID_DEVICE
483
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or
484
+ * \p metricArray are NULL
485
+ */
486
+ CUptiResult CUPTIAPI cuptiDeviceEnumMetrics(CUdevice device,
487
+ size_t *arraySizeBytes,
488
+ CUpti_MetricID *metricArray);
489
+
490
+ /**
491
+ * \brief Get a metric attribute.
492
+ *
493
+ * Returns a metric attribute in \p *value. The size of the \p
494
+ * value buffer is given by \p *valueSize. The value returned in \p
495
+ * *valueSize contains the number of bytes returned in \p value.
496
+ *
497
+ * If the attribute value is a c-string that is longer than \p
498
+ * *valueSize, then only the first \p *valueSize characters will be
499
+ * returned and there will be no terminating null byte.
500
+ *
501
+ * \param metric ID of the metric
502
+ * \param attrib The metric attribute to read
503
+ * \param valueSize The size of the \p value buffer in bytes, and
504
+ * returns the number of bytes written to \p value
505
+ * \param value Returns the attribute's value
506
+ *
507
+ * \retval CUPTI_SUCCESS
508
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
509
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
510
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
511
+ * is NULL, or if \p attrib is not a metric attribute
512
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
513
+ * attribute values, indicates that the \p value buffer is too small
514
+ * to hold the attribute value.
515
+ */
516
+ CUptiResult CUPTIAPI cuptiMetricGetAttribute(CUpti_MetricID metric,
517
+ CUpti_MetricAttribute attrib,
518
+ size_t *valueSize,
519
+ void *value);
520
+
521
+ /**
522
+ * \brief Find an metric by name.
523
+ *
524
+ * Find a metric by name and return the metric ID in \p *metric.
525
+ *
526
+ * \param device The CUDA device
527
+ * \param metricName The name of metric to find
528
+ * \param metric Returns the ID of the found metric or undefined if
529
+ * unable to find the metric
530
+ *
531
+ * \retval CUPTI_SUCCESS
532
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
533
+ * \retval CUPTI_ERROR_INVALID_DEVICE
534
+ * \retval CUPTI_ERROR_INVALID_METRIC_NAME if unable to find a metric
535
+ * with name \p metricName. In this case \p *metric is undefined
536
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricName or \p
537
+ * metric are NULL.
538
+ */
539
+ CUptiResult CUPTIAPI cuptiMetricGetIdFromName(CUdevice device,
540
+ const char *metricName,
541
+ CUpti_MetricID *metric);
542
+
543
+ /**
544
+ * \brief Get number of events required to calculate a metric.
545
+ *
546
+ * Returns the number of events in \p numEvents that are required to
547
+ * calculate a metric.
548
+ *
549
+ * \param metric ID of the metric
550
+ * \param numEvents Returns the number of events required for the metric
551
+ *
552
+ * \retval CUPTI_SUCCESS
553
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
554
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
555
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numEvents is NULL
556
+ */
557
+ CUptiResult CUPTIAPI cuptiMetricGetNumEvents(CUpti_MetricID metric,
558
+ uint32_t *numEvents);
559
+
560
+ /**
561
+ * \brief Get the events required to calculating a metric.
562
+ *
563
+ * Gets the event IDs in \p eventIdArray required to calculate a \p
564
+ * metric. The size of the \p eventIdArray buffer is given by \p
565
+ * *eventIdArraySizeBytes and must be at least \p numEvents *
566
+ * sizeof(CUpti_EventID) or all events will not be returned. The value
567
+ * returned in \p *eventIdArraySizeBytes contains the number of bytes
568
+ * returned in \p eventIdArray.
569
+ *
570
+ * \param metric ID of the metric
571
+ * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes,
572
+ * and returns the number of bytes written to \p eventIdArray
573
+ * \param eventIdArray Returns the IDs of the events required to
574
+ * calculate \p metric
575
+ *
576
+ * \retval CUPTI_SUCCESS
577
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
578
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
579
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventIdArraySizeBytes or \p
580
+ * eventIdArray are NULL.
581
+ */
582
+ CUptiResult CUPTIAPI cuptiMetricEnumEvents(CUpti_MetricID metric,
583
+ size_t *eventIdArraySizeBytes,
584
+ CUpti_EventID *eventIdArray);
585
+
586
+ /**
587
+ * \brief Get number of properties required to calculate a metric.
588
+ *
589
+ * Returns the number of properties in \p numProp that are required to
590
+ * calculate a metric.
591
+ *
592
+ * \param metric ID of the metric
593
+ * \param numProp Returns the number of properties required for the
594
+ * metric
595
+ *
596
+ * \retval CUPTI_SUCCESS
597
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
598
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
599
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numProp is NULL
600
+ */
601
+ CUptiResult CUPTIAPI cuptiMetricGetNumProperties(CUpti_MetricID metric,
602
+ uint32_t *numProp);
603
+
604
+ /**
605
+ * \brief Get the properties required to calculating a metric.
606
+ *
607
+ * Gets the property IDs in \p propIdArray required to calculate a \p
608
+ * metric. The size of the \p propIdArray buffer is given by \p
609
+ * *propIdArraySizeBytes and must be at least \p numProp *
610
+ * sizeof(CUpti_DeviceAttribute) or all properties will not be
611
+ * returned. The value returned in \p *propIdArraySizeBytes contains
612
+ * the number of bytes returned in \p propIdArray.
613
+ *
614
+ * \param metric ID of the metric
615
+ * \param propIdArraySizeBytes The size of \p propIdArray in bytes,
616
+ * and returns the number of bytes written to \p propIdArray
617
+ * \param propIdArray Returns the IDs of the properties required to
618
+ * calculate \p metric
619
+ *
620
+ * \retval CUPTI_SUCCESS
621
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
622
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
623
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p propIdArraySizeBytes or \p
624
+ * propIdArray are NULL.
625
+ */
626
+ CUptiResult CUPTIAPI cuptiMetricEnumProperties(CUpti_MetricID metric,
627
+ size_t *propIdArraySizeBytes,
628
+ CUpti_MetricPropertyID *propIdArray);
629
+
630
+
631
+ /**
632
+ * \brief For a metric get the groups of events that must be collected
633
+ * in the same pass.
634
+ *
635
+ * For a metric get the groups of events that must be collected in the
636
+ * same pass to ensure that the metric is calculated correctly. If the
637
+ * events are not collected as specified then the metric value may be
638
+ * inaccurate.
639
+ *
640
+ * The function returns NULL if a metric does not have any required
641
+ * event group. In this case the events needed for the metric can be
642
+ * grouped in any manner for collection.
643
+ *
644
+ * \param context The context for event collection
645
+ * \param metric The metric ID
646
+ * \param eventGroupSets Returns a CUpti_EventGroupSets object that
647
+ * indicates the events that must be collected in the same pass to
648
+ * ensure the metric is calculated correctly. Returns NULL if no
649
+ * grouping is required for metric
650
+ * \retval CUPTI_SUCCESS
651
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
652
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
653
+ */
654
+ CUptiResult CUPTIAPI cuptiMetricGetRequiredEventGroupSets(CUcontext context,
655
+ CUpti_MetricID metric,
656
+ CUpti_EventGroupSets **eventGroupSets);
657
+
658
+ /**
659
+ * \brief For a set of metrics, get the grouping that indicates the
660
+ * number of passes and the event groups necessary to collect the
661
+ * events required for those metrics.
662
+ *
663
+ * For a set of metrics, get the grouping that indicates the number of
664
+ * passes and the event groups necessary to collect the events
665
+ * required for those metrics.
666
+ *
667
+ * \see cuptiEventGroupSetsCreate for details on event group set
668
+ * creation.
669
+ *
670
+ * \param context The context for event collection
671
+ * \param metricIdArraySizeBytes Size of the metricIdArray in bytes
672
+ * \param metricIdArray Array of metric IDs
673
+ * \param eventGroupPasses Returns a CUpti_EventGroupSets object that
674
+ * indicates the number of passes required to collect the events and
675
+ * the events to collect on each pass
676
+ *
677
+ * \retval CUPTI_SUCCESS
678
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
679
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
680
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
681
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricIdArray or
682
+ * \p eventGroupPasses is NULL
683
+ */
684
+ CUptiResult CUPTIAPI cuptiMetricCreateEventGroupSets(CUcontext context,
685
+ size_t metricIdArraySizeBytes,
686
+ CUpti_MetricID *metricIdArray,
687
+ CUpti_EventGroupSets **eventGroupPasses);
688
+
689
+ /**
690
+ * \brief Calculate the value for a metric.
691
+ *
692
+ * Use the events collected for a metric to calculate the metric
693
+ * value. Metric value evaluation depends on the evaluation mode
694
+ * \ref CUpti_MetricEvaluationMode that the metric supports.
695
+ * If a metric has evaluation mode as CUPTI_METRIC_EVALUATION_MODE_PER_INSTANCE,
696
+ * then it assumes that the input event value is for one domain instance.
697
+ * If a metric has evaluation mode as CUPTI_METRIC_EVALUATION_MODE_AGGREGATE,
698
+ * it assumes that input event values are
699
+ * normalized to represent all domain instances on a device. For the
700
+ * most accurate metric collection, the events required for the metric
701
+ * should be collected for all profiled domain instances. For example,
702
+ * to collect all instances of an event, set the
703
+ * CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES attribute on
704
+ * the group containing the event to 1. The normalized value for the
705
+ * event is then: (\p sum_event_values * \p totalInstanceCount) / \p
706
+ * instanceCount, where \p sum_event_values is the summation of the
707
+ * event values across all profiled domain instances, \p
708
+ * totalInstanceCount is obtained from querying
709
+ * CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT and \p instanceCount
710
+ * is obtained from querying CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT (or
711
+ * CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT).
712
+ *
713
+ * \param device The CUDA device that the metric is being calculated for
714
+ * \param metric The metric ID
715
+ * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes
716
+ * \param eventIdArray The event IDs required to calculate \p metric
717
+ * \param eventValueArraySizeBytes The size of \p eventValueArray in bytes
718
+ * \param eventValueArray The normalized event values required to
719
+ * calculate \p metric. The values must be order to match the order of
720
+ * events in \p eventIdArray
721
+ * \param timeDuration The duration over which the events were
722
+ * collected, in ns
723
+ * \param metricValue Returns the value for the metric
724
+ *
725
+ * \retval CUPTI_SUCCESS
726
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
727
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
728
+ * \retval CUPTI_ERROR_INVALID_OPERATION
729
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if the
730
+ * eventIdArray does not contain all the events needed for metric
731
+ * \retval CUPTI_ERROR_INVALID_EVENT_VALUE if any of the
732
+ * event values required for the metric is CUPTI_EVENT_OVERFLOW
733
+ * \retval CUPTI_ERROR_INVALID_METRIC_VALUE if the computed metric value
734
+ * cannot be represented in the metric's value type. For example,
735
+ * if the metric value type is unsigned and the computed metric value is negative
736
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricValue,
737
+ * \p eventIdArray or \p eventValueArray is NULL
738
+ */
739
+ CUptiResult CUPTIAPI cuptiMetricGetValue(CUdevice device,
740
+ CUpti_MetricID metric,
741
+ size_t eventIdArraySizeBytes,
742
+ CUpti_EventID *eventIdArray,
743
+ size_t eventValueArraySizeBytes,
744
+ uint64_t *eventValueArray,
745
+ uint64_t timeDuration,
746
+ CUpti_MetricValue *metricValue);
747
+
748
+ /**
749
+ * \brief Calculate the value for a metric.
750
+ *
751
+ * Use the events and properties collected for a metric to calculate
752
+ * the metric value. Metric value evaluation depends on the evaluation
753
+ * mode \ref CUpti_MetricEvaluationMode that the metric supports. If
754
+ * a metric has evaluation mode as
755
+ * CUPTI_METRIC_EVALUATION_MODE_PER_INSTANCE, then it assumes that the
756
+ * input event value is for one domain instance. If a metric has
757
+ * evaluation mode as CUPTI_METRIC_EVALUATION_MODE_AGGREGATE, it
758
+ * assumes that input event values are normalized to represent all
759
+ * domain instances on a device. For the most accurate metric
760
+ * collection, the events required for the metric should be collected
761
+ * for all profiled domain instances. For example, to collect all
762
+ * instances of an event, set the
763
+ * CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES attribute on
764
+ * the group containing the event to 1. The normalized value for the
765
+ * event is then: (\p sum_event_values * \p totalInstanceCount) / \p
766
+ * instanceCount, where \p sum_event_values is the summation of the
767
+ * event values across all profiled domain instances, \p
768
+ * totalInstanceCount is obtained from querying
769
+ * CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT and \p instanceCount
770
+ * is obtained from querying CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT (or
771
+ * CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT).
772
+ *
773
+ * \param metric The metric ID
774
+ * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes
775
+ * \param eventIdArray The event IDs required to calculate \p metric
776
+ * \param eventValueArraySizeBytes The size of \p eventValueArray in bytes
777
+ * \param eventValueArray The normalized event values required to
778
+ * calculate \p metric. The values must be order to match the order of
779
+ * events in \p eventIdArray
780
+ * \param propIdArraySizeBytes The size of \p propIdArray in bytes
781
+ * \param propIdArray The metric property IDs required to calculate \p metric
782
+ * \param propValueArraySizeBytes The size of \p propValueArray in bytes
783
+ * \param propValueArray The metric property values required to
784
+ * calculate \p metric. The values must be order to match the order of
785
+ * metric properties in \p propIdArray
786
+ * \param metricValue Returns the value for the metric
787
+ *
788
+ * \retval CUPTI_SUCCESS
789
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
790
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
791
+ * \retval CUPTI_ERROR_INVALID_OPERATION
792
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if the
793
+ * eventIdArray does not contain all the events needed for metric
794
+ * \retval CUPTI_ERROR_INVALID_EVENT_VALUE if any of the
795
+ * event values required for the metric is CUPTI_EVENT_OVERFLOW
796
+ * \retval CUPTI_ERROR_NOT_COMPATIBLE if the computed metric value
797
+ * cannot be represented in the metric's value type. For example,
798
+ * if the metric value type is unsigned and the computed metric value is negative
799
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricValue,
800
+ * \p eventIdArray or \p eventValueArray is NULL
801
+ */
802
+ CUptiResult CUPTIAPI cuptiMetricGetValue2(CUpti_MetricID metric,
803
+ size_t eventIdArraySizeBytes,
804
+ CUpti_EventID *eventIdArray,
805
+ size_t eventValueArraySizeBytes,
806
+ uint64_t *eventValueArray,
807
+ size_t propIdArraySizeBytes,
808
+ CUpti_MetricPropertyID *propIdArray,
809
+ size_t propValueArraySizeBytes,
810
+ uint64_t *propValueArray,
811
+ CUpti_MetricValue *metricValue);
812
+
813
+ /** @} */ /* END CUPTI_METRIC_API */
814
+
815
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
816
+ #pragma GCC visibility pop
817
+ #endif
818
+
819
+ #if defined(__cplusplus)
820
+ }
821
+ #endif
822
+
823
+ #endif /*_CUPTI_METRIC_H_*/
824
+
825
+
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_nvtx_cbid.h ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2013-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
51
+ #pragma GCC visibility push(default)
52
+ #endif
53
+
54
+ typedef enum {
55
+ CUPTI_CBID_NVTX_INVALID = 0,
56
+ CUPTI_CBID_NVTX_nvtxMarkA = 1,
57
+ CUPTI_CBID_NVTX_nvtxMarkW = 2,
58
+ CUPTI_CBID_NVTX_nvtxMarkEx = 3,
59
+ CUPTI_CBID_NVTX_nvtxRangeStartA = 4,
60
+ CUPTI_CBID_NVTX_nvtxRangeStartW = 5,
61
+ CUPTI_CBID_NVTX_nvtxRangeStartEx = 6,
62
+ CUPTI_CBID_NVTX_nvtxRangeEnd = 7,
63
+ CUPTI_CBID_NVTX_nvtxRangePushA = 8,
64
+ CUPTI_CBID_NVTX_nvtxRangePushW = 9,
65
+ CUPTI_CBID_NVTX_nvtxRangePushEx = 10,
66
+ CUPTI_CBID_NVTX_nvtxRangePop = 11,
67
+ CUPTI_CBID_NVTX_nvtxNameCategoryA = 12,
68
+ CUPTI_CBID_NVTX_nvtxNameCategoryW = 13,
69
+ CUPTI_CBID_NVTX_nvtxNameOsThreadA = 14,
70
+ CUPTI_CBID_NVTX_nvtxNameOsThreadW = 15,
71
+ CUPTI_CBID_NVTX_nvtxNameCuDeviceA = 16,
72
+ CUPTI_CBID_NVTX_nvtxNameCuDeviceW = 17,
73
+ CUPTI_CBID_NVTX_nvtxNameCuContextA = 18,
74
+ CUPTI_CBID_NVTX_nvtxNameCuContextW = 19,
75
+ CUPTI_CBID_NVTX_nvtxNameCuStreamA = 20,
76
+ CUPTI_CBID_NVTX_nvtxNameCuStreamW = 21,
77
+ CUPTI_CBID_NVTX_nvtxNameCuEventA = 22,
78
+ CUPTI_CBID_NVTX_nvtxNameCuEventW = 23,
79
+ CUPTI_CBID_NVTX_nvtxNameCudaDeviceA = 24,
80
+ CUPTI_CBID_NVTX_nvtxNameCudaDeviceW = 25,
81
+ CUPTI_CBID_NVTX_nvtxNameCudaStreamA = 26,
82
+ CUPTI_CBID_NVTX_nvtxNameCudaStreamW = 27,
83
+ CUPTI_CBID_NVTX_nvtxNameCudaEventA = 28,
84
+ CUPTI_CBID_NVTX_nvtxNameCudaEventW = 29,
85
+ CUPTI_CBID_NVTX_nvtxDomainMarkEx = 30,
86
+ CUPTI_CBID_NVTX_nvtxDomainRangeStartEx = 31,
87
+ CUPTI_CBID_NVTX_nvtxDomainRangeEnd = 32,
88
+ CUPTI_CBID_NVTX_nvtxDomainRangePushEx = 33,
89
+ CUPTI_CBID_NVTX_nvtxDomainRangePop = 34,
90
+ CUPTI_CBID_NVTX_nvtxDomainResourceCreate = 35,
91
+ CUPTI_CBID_NVTX_nvtxDomainResourceDestroy = 36,
92
+ CUPTI_CBID_NVTX_nvtxDomainNameCategoryA = 37,
93
+ CUPTI_CBID_NVTX_nvtxDomainNameCategoryW = 38,
94
+ CUPTI_CBID_NVTX_nvtxDomainRegisterStringA = 39,
95
+ CUPTI_CBID_NVTX_nvtxDomainRegisterStringW = 40,
96
+ CUPTI_CBID_NVTX_nvtxDomainCreateA = 41,
97
+ CUPTI_CBID_NVTX_nvtxDomainCreateW = 42,
98
+ CUPTI_CBID_NVTX_nvtxDomainDestroy = 43,
99
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserCreate = 44,
100
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserDestroy = 45,
101
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserAcquireStart = 46,
102
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserAcquireFailed = 47,
103
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserAcquireSuccess = 48,
104
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserReleasing = 49,
105
+ CUPTI_CBID_NVTX_SIZE,
106
+ CUPTI_CBID_NVTX_FORCE_INT = 0x7fffffff
107
+ } CUpti_nvtx_api_trace_cbid;
108
+
109
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
110
+ #pragma GCC visibility pop
111
+ #endif
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling.h ADDED
@@ -0,0 +1,936 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_PCSAMPLING_H_)
51
+ #define _CUPTI_PCSAMPLING_H_
52
+
53
+ #include <cuda.h>
54
+ #include <stdint.h>
55
+ #include <stddef.h>
56
+ #include "cupti_result.h"
57
+ #include "cupti_common.h"
58
+
59
+
60
+ #if defined(__cplusplus)
61
+ extern "C" {
62
+ #endif
63
+
64
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
65
+ #pragma GCC visibility push(default)
66
+ #endif
67
+
68
+ /**
69
+ * \defgroup CUPTI_PCSAMPLING_API CUPTI PC Sampling API
70
+ * Functions, types, and enums that implement the CUPTI PC Sampling API.
71
+ * @{
72
+ */
73
+
74
+ #ifndef CUPTI_PCSAMPLING_STRUCT_SIZE
75
+ #define CUPTI_PCSAMPLING_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
76
+ #endif
77
+
78
+ #ifndef CUPTI_STALL_REASON_STRING_SIZE
79
+ #define CUPTI_STALL_REASON_STRING_SIZE 128
80
+ #endif
81
+
82
+ /**
83
+ * \brief PC Sampling collection mode
84
+ */
85
+ typedef enum
86
+ {
87
+ /**
88
+ * INVALID Value
89
+ */
90
+ CUPTI_PC_SAMPLING_COLLECTION_MODE_INVALID = 0,
91
+ /**
92
+ * Continuous mode. Kernels are not serialized in this mode.
93
+ */
94
+ CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS = 1,
95
+ /**
96
+ * Serialized mode. Kernels are serialized in this mode.
97
+ */
98
+ CUPTI_PC_SAMPLING_COLLECTION_MODE_KERNEL_SERIALIZED = 2,
99
+ } CUpti_PCSamplingCollectionMode;
100
+
101
+ /**
102
+ * \brief PC Sampling stall reasons
103
+ */
104
+ typedef struct PACKED_ALIGNMENT
105
+ {
106
+ /**
107
+ * [r] Collected stall reason index
108
+ */
109
+ uint32_t pcSamplingStallReasonIndex;
110
+ /**
111
+ * [r] Number of times the PC was sampled with the stallReason.
112
+ */
113
+ uint32_t samples;
114
+ } CUpti_PCSamplingStallReason;
115
+
116
+ /**
117
+ * \brief PC Sampling data
118
+ */
119
+ typedef struct PACKED_ALIGNMENT
120
+ {
121
+ /**
122
+ * [w] Size of the data structure.
123
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
124
+ * available in the structure. Used to preserve backward compatibility.
125
+ */
126
+ size_t size;
127
+ /**
128
+ * [r] Unique cubin id
129
+ */
130
+ uint64_t cubinCrc;
131
+ /**
132
+ * [r] PC offset
133
+ */
134
+ uint64_t pcOffset;
135
+ /**
136
+ * The function's unique symbol index in the module.
137
+ */
138
+ uint32_t functionIndex;
139
+ /**
140
+ * Padding
141
+ */
142
+ uint32_t pad;
143
+ /**
144
+ * [r] The function name. This name string might be shared across all the records
145
+ * including records from activity APIs representing the same function, and so it should not be
146
+ * modified or freed until post processing of all the records is done. Once done, it is user’s responsibility to
147
+ * free the memory using free() function.
148
+ */
149
+ char* functionName;
150
+ /**
151
+ * [r] Collected stall reason count
152
+ */
153
+ size_t stallReasonCount;
154
+ /**
155
+ * [r] Stall reason id
156
+ * Total samples
157
+ */
158
+ CUpti_PCSamplingStallReason *stallReason;
159
+ /**
160
+ * The correlation ID of the kernel to which this result is associated. Only valid for serialized mode of pc sampling collection.
161
+ * For continous mode of collection the correlationId will be set to 0.
162
+ */
163
+ uint32_t correlationId;
164
+ } CUpti_PCSamplingPCData;
165
+
166
+ /**
167
+ * \brief PC Sampling output data format
168
+ */
169
+ typedef enum
170
+ {
171
+ CUPTI_PC_SAMPLING_OUTPUT_DATA_FORMAT_INVALID = 0,
172
+ /**
173
+ * HW buffer data will be parsed during collection of data
174
+ */
175
+ CUPTI_PC_SAMPLING_OUTPUT_DATA_FORMAT_PARSED = 1,
176
+ } CUpti_PCSamplingOutputDataFormat;
177
+
178
+ /**
179
+ * \brief Collected PC Sampling data
180
+ *
181
+ */
182
+ typedef struct PACKED_ALIGNMENT
183
+ {
184
+ /**
185
+ * [w] Size of the data structure.
186
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
187
+ * available in the structure. Used to preserve backward compatibility.
188
+ */
189
+ size_t size;
190
+ /**
191
+ * [w] Number of PCs to be collected
192
+ */
193
+ size_t collectNumPcs;
194
+ /**
195
+ * [r] Number of samples collected across all PCs.
196
+ * It includes samples for user modules, samples for non-user kernels and dropped samples.
197
+ * It includes counts for all non selected stall reasons.
198
+ * CUPTI does not provide PC records for non-user kernels.
199
+ * CUPTI does not provide PC records for instructions for which all selected stall reason metrics counts are zero.
200
+ */
201
+ uint64_t totalSamples;
202
+ /**
203
+ * [r] Number of samples that were dropped by hardware due to backpressure/overflow.
204
+ */
205
+ uint64_t droppedSamples;
206
+ /**
207
+ * [r] Number of PCs collected
208
+ */
209
+ size_t totalNumPcs;
210
+ /**
211
+ * [r] Number of PCs available for collection
212
+ */
213
+ size_t remainingNumPcs;
214
+ /**
215
+ * [r] Unique identifier for each range.
216
+ * Data collected across multiple ranges in multiple buffers can be identified using range id.
217
+ */
218
+ uint64_t rangeId;
219
+ /**
220
+ * [r] Profiled PC data
221
+ * This data struct should have enough memory to collect number of PCs mentioned in \brief collectNumPcs
222
+ */
223
+ CUpti_PCSamplingPCData *pPcData;
224
+ /**
225
+ * [r] Number of samples collected across all non user kernels PCs.
226
+ * It includes samples for non-user kernels.
227
+ * It includes counts for all non selected stall reasons as well.
228
+ * CUPTI does not provide PC records for non-user kernels.
229
+ */
230
+ uint64_t nonUsrKernelsTotalSamples;
231
+
232
+ /**
233
+ * [r] Status of the hardware buffer.
234
+ * CUPTI returns the error code CUPTI_ERROR_OUT_OF_MEMORY when hardware buffer is full.
235
+ * When hardware buffer is full, user will get pc data as 0. To mitigate this issue, one or more of the below options can be tried:
236
+ * 1. Increase the hardware buffer size using the attribute CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_HARDWARE_BUFFER_SIZE
237
+ * 2. Decrease the thread sleep span using the attribute CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_WORKER_THREAD_PERIODIC_SLEEP_SPAN
238
+ * 3. Decrease the sampling frequency using the attribute CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_PERIOD
239
+ */
240
+ uint8_t hardwareBufferFull;
241
+ } CUpti_PCSamplingData;
242
+
243
+ /**
244
+ * \brief PC Sampling configuration attributes
245
+ *
246
+ * PC Sampling configuration attribute types. These attributes can be read
247
+ * using \ref cuptiPCSamplingGetConfigurationAttribute and can be written
248
+ * using \ref cuptiPCSamplingSetConfigurationAttribute. Attributes marked
249
+ * [r] can only be read using \ref cuptiPCSamplingGetConfigurationAttribute
250
+ * [w] can only be written using \ref cuptiPCSamplingSetConfigurationAttribute
251
+ * [rw] can be read using \ref cuptiPCSamplingGetConfigurationAttribute and
252
+ * written using \ref cuptiPCSamplingSetConfigurationAttribute
253
+ */
254
+ typedef enum
255
+ {
256
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_INVALID = 0,
257
+ /**
258
+ * [rw] Sampling period for PC Sampling.
259
+ * DEFAULT - CUPTI defined value based on number of SMs
260
+ * Valid values for the sampling
261
+ * periods are between 5 to 31 both inclusive. This will set the
262
+ * sampling period to (2^samplingPeriod) cycles.
263
+ * For e.g. for sampling period = 5 to 31, cycles = 32, 64, 128,..., 2^31
264
+ * Value is a uint32_t
265
+ */
266
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_PERIOD = 1,
267
+ /**
268
+ * [w] Number of stall reasons to collect.
269
+ * DEFAULT - All stall reasons will be collected
270
+ * Value is a size_t
271
+ * [w] Stall reasons to collect
272
+ * DEFAULT - All stall reasons will be collected
273
+ * Input value should be a pointer pointing to array of stall reason indexes
274
+ * containing all the stall reason indexes to collect.
275
+ */
276
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_STALL_REASON = 2,
277
+ /**
278
+ * [rw] Size of SW buffer for raw PC counter data downloaded from HW buffer
279
+ * DEFAULT - 1 MB, which can accommodate approximately 5500 PCs
280
+ * with all stall reasons
281
+ * Approximately it takes 16 Bytes (and some fixed size memory)
282
+ * to accommodate one PC with one stall reason
283
+ * For e.g. 1 PC with 1 stall reason = 32 Bytes
284
+ * 1 PC with 2 stall reason = 48 Bytes
285
+ * 1 PC with 4 stall reason = 96 Bytes
286
+ * Value is a size_t
287
+ */
288
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SCRATCH_BUFFER_SIZE = 3,
289
+ /**
290
+ * [rw] Size of HW buffer in bytes
291
+ * DEFAULT - 512 MB
292
+ * If sampling period is too less, HW buffer can overflow
293
+ * and drop PC data
294
+ * Value is a size_t
295
+ */
296
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_HARDWARE_BUFFER_SIZE = 4,
297
+ /**
298
+ * [rw] PC Sampling collection mode
299
+ * DEFAULT - CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS
300
+ * Input value should be of type \ref CUpti_PCSamplingCollectionMode.
301
+ */
302
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_COLLECTION_MODE = 5,
303
+ /**
304
+ * [rw] Control over PC Sampling data collection range
305
+ * Default - 0
306
+ * 1 - Allows user to start and stop PC Sampling using APIs -
307
+ * \ref cuptiPCSamplingStart() - Start PC Sampling
308
+ * \ref cuptiPCSamplingStop() - Stop PC Sampling
309
+ * Value is a uint32_t
310
+ */
311
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL = 6,
312
+ /**
313
+ * [w] Value for output data format
314
+ * Default - CUPTI_PC_SAMPLING_OUTPUT_DATA_FORMAT_PARSED
315
+ * Input value should be of type \ref CUpti_PCSamplingOutputDataFormat.
316
+ */
317
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_OUTPUT_DATA_FORMAT = 7,
318
+ /**
319
+ * [w] Data buffer to hold collected PC Sampling data PARSED_DATA
320
+ * Default - none.
321
+ * Buffer type is void * which can point to PARSED_DATA
322
+ * Refer \ref CUpti_PCSamplingData for buffer format for PARSED_DATA
323
+ */
324
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_DATA_BUFFER = 8,
325
+ /**
326
+ * [rw] Control sleep time of the worker threads created by CUPTI for various PC sampling operations.
327
+ * CUPTI creates multiple worker threads to offload certain operations to these threads. This includes decoding of HW data to
328
+ * the CUPTI PC sampling data and correlating PC data to SASS instructions. CUPTI wakes up these threads periodically.
329
+ * Default - 100 milliseconds.
330
+ * Value is a uint32_t
331
+ */
332
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_WORKER_THREAD_PERIODIC_SLEEP_SPAN = 9,
333
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_FORCE_INT = 0x7fffffff,
334
+ } CUpti_PCSamplingConfigurationAttributeType;
335
+
336
+ /**
337
+ * \brief PC sampling configuration information structure
338
+ *
339
+ * This structure provides \ref CUpti_PCSamplingConfigurationAttributeType which can be configured
340
+ * or queried for PC sampling configuration
341
+ */
342
+ typedef struct
343
+ {
344
+ /**
345
+ * Refer \ref CUpti_PCSamplingConfigurationAttributeType for all supported attribute types
346
+ */
347
+ CUpti_PCSamplingConfigurationAttributeType attributeType;
348
+ /*
349
+ * Configure or query status for \p attributeType
350
+ * CUPTI_SUCCESS for valid \p attributeType and \p attributeData
351
+ * CUPTI_ERROR_INVALID_OPERATION if \p attributeData is not valid
352
+ * CUPTI_ERROR_INVALID_PARAMETER if \p attributeType is not valid
353
+ */
354
+ CUptiResult attributeStatus;
355
+ union
356
+ {
357
+ /**
358
+ * Invalid Value
359
+ */
360
+ struct
361
+ {
362
+ uint64_t data[3];
363
+ } invalidData;
364
+ /**
365
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_PERIOD
366
+ */
367
+ struct
368
+ {
369
+ uint32_t samplingPeriod;
370
+ } samplingPeriodData;
371
+ /**
372
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_STALL_REASON
373
+ */
374
+ struct
375
+ {
376
+ size_t stallReasonCount;
377
+ uint32_t *pStallReasonIndex;
378
+ } stallReasonData;
379
+ /**
380
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SCRATCH_BUFFER_SIZE
381
+ */
382
+ struct
383
+ {
384
+ size_t scratchBufferSize;
385
+ } scratchBufferSizeData;
386
+ /**
387
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_HARDWARE_BUFFER_SIZE
388
+ */
389
+ struct
390
+ {
391
+ size_t hardwareBufferSize;
392
+ } hardwareBufferSizeData;
393
+ /**
394
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_COLLECTION_MODE
395
+ */
396
+ struct
397
+ {
398
+ CUpti_PCSamplingCollectionMode collectionMode;
399
+ } collectionModeData;
400
+ /**
401
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL
402
+ */
403
+ struct
404
+ {
405
+ uint32_t enableStartStopControl;
406
+ } enableStartStopControlData;
407
+ /**
408
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_OUTPUT_DATA_FORMAT
409
+ */
410
+ struct
411
+ {
412
+ CUpti_PCSamplingOutputDataFormat outputDataFormat;
413
+ } outputDataFormatData;
414
+ /**
415
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_DATA_BUFFER
416
+ */
417
+ struct
418
+ {
419
+ void *samplingDataBuffer;
420
+ } samplingDataBufferData;
421
+ /**
422
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_WORKER_THREAD_PERIODIC_SLEEP_SPAN
423
+ */
424
+ struct
425
+ {
426
+ uint32_t workerThreadPeriodicSleepSpan;
427
+ } workerThreadPeriodicSleepSpanData;
428
+
429
+ } attributeData;
430
+ } CUpti_PCSamplingConfigurationInfo;
431
+
432
+ /**
433
+ * \brief PC sampling configuration structure
434
+ *
435
+ * This structure configures PC sampling using \ref cuptiPCSamplingSetConfigurationAttribute
436
+ * and queries PC sampling default configuration using \ref cuptiPCSamplingGetConfigurationAttribute
437
+ */
438
+ typedef struct
439
+ {
440
+ /**
441
+ * [w] Size of the data structure i.e. CUpti_PCSamplingConfigurationInfoParamsSize
442
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
443
+ * available in the structure. Used to preserve backward compatibility.
444
+ */
445
+ size_t size;
446
+ /**
447
+ * [w] Assign to NULL
448
+ */
449
+ void* pPriv;
450
+ /**
451
+ * [w] CUcontext
452
+ */
453
+ CUcontext ctx;
454
+ /**
455
+ * [w] Number of attributes to configure using \ref cuptiPCSamplingSetConfigurationAttribute or query
456
+ * using \ref cuptiPCSamplingGetConfigurationAttribute
457
+ */
458
+ size_t numAttributes;
459
+ /**
460
+ * Refer \ref CUpti_PCSamplingConfigurationInfo
461
+ */
462
+ CUpti_PCSamplingConfigurationInfo *pPCSamplingConfigurationInfo;
463
+ } CUpti_PCSamplingConfigurationInfoParams;
464
+ #define CUpti_PCSamplingConfigurationInfoParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingConfigurationInfoParams,pPCSamplingConfigurationInfo)
465
+
466
+ /**
467
+ * \brief Write PC Sampling configuration attribute.
468
+ *
469
+ * \param pParams A pointer to \ref CUpti_PCSamplingConfigurationInfoParams
470
+ * containing PC sampling configuration.
471
+ *
472
+ * \retval CUPTI_SUCCESS
473
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
474
+ * some invalid \p attrib.
475
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if attribute \p value is not valid
476
+ * or any \p pParams is not valid
477
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
478
+ * does not support the API
479
+ */
480
+ CUptiResult CUPTIAPI cuptiPCSamplingSetConfigurationAttribute(CUpti_PCSamplingConfigurationInfoParams *pParams);
481
+
482
+ /**
483
+ * \brief Read PC Sampling configuration attribute.
484
+ *
485
+ * \param pParams A pointer to \ref CUpti_PCSamplingConfigurationInfoParams
486
+ * containing PC sampling configuration.
487
+ *
488
+ * \retval CUPTI_SUCCESS
489
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
490
+ * some invalid attribute.
491
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p attrib is not valid
492
+ * or any \p pParams is not valid
493
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT indicates that
494
+ * the \p value buffer is too small to hold the attribute value
495
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
496
+ * does not support the API
497
+ */
498
+ CUptiResult CUPTIAPI cuptiPCSamplingGetConfigurationAttribute(CUpti_PCSamplingConfigurationInfoParams *pParams);
499
+
500
+ /**
501
+ * \brief Params for cuptiPCSamplingEnable
502
+ */
503
+ typedef struct
504
+ {
505
+ /**
506
+ * [w] Size of the data structure i.e. CUpti_PCSamplingGetDataParamsSize
507
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
508
+ * available in the structure. Used to preserve backward compatibility.
509
+ */
510
+ size_t size;
511
+ /**
512
+ * [w] Assign to NULL
513
+ */
514
+ void* pPriv;
515
+ /**
516
+ * [w] CUcontext
517
+ */
518
+ CUcontext ctx;
519
+ /**
520
+ * \param pcSamplingData Data buffer to hold collected PC Sampling data PARSED_DATA
521
+ * Buffer type is void * which can point to PARSED_DATA
522
+ * Refer \ref CUpti_PCSamplingData for buffer format for PARSED_DATA
523
+ */
524
+ void *pcSamplingData;
525
+ } CUpti_PCSamplingGetDataParams;
526
+ #define CUpti_PCSamplingGetDataParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingGetDataParams, pcSamplingData)
527
+ /**
528
+ * \brief Flush GPU PC sampling data periodically.
529
+ *
530
+ * Flushing of GPU PC Sampling data is required at following point to maintain uniqueness of PCs:
531
+ * For \brief CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS, after every module load-unload-load
532
+ * For \brief CUPTI_PC_SAMPLING_COLLECTION_MODE_KERNEL_SERIALIZED, after every kernel ends
533
+ * If configuration option \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL
534
+ * is enabled, then after every range end i.e. \brief cuptiPCSamplingStop()
535
+ *
536
+ * If application is profiled in \brief CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS, with disabled
537
+ * \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL, and there is no module unload,
538
+ * user can collect data in two ways:
539
+ * Use \brief cuptiPCSamplingGetData() API periodically
540
+ * Use \brief cuptiPCSamplingDisable() on application exit and read GPU PC sampling data from sampling
541
+ * data buffer passed during configuration.
542
+ * Note: In case, \brief cuptiPCSamplingGetData() API is not called periodically, then sampling data buffer
543
+ * passed during configuration should be large enough to hold all PCs data.
544
+ * \brief cuptiPCSamplingGetData() API never does device synchronization.
545
+ * It is possible that when the API is called there is some unconsumed data from the HW buffer. In this case
546
+ * CUPTI provides only the data available with it at that moment.
547
+ *
548
+ * \param pParams A pointer to \ref CUpti_PCSamplingGetDataParams
549
+ *
550
+ * \retval CUPTI_SUCCESS
551
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called without
552
+ * enabling PC sampling.
553
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
554
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
555
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY indicates that the HW buffer is full
556
+ * does not support the API
557
+ */
558
+ CUptiResult CUPTIAPI cuptiPCSamplingGetData(CUpti_PCSamplingGetDataParams *pParams);
559
+
560
+ /**
561
+ * \brief Params for cuptiPCSamplingEnable
562
+ */
563
+ typedef struct
564
+ {
565
+ /**
566
+ * [w] Size of the data structure i.e. CUpti_PCSamplingEnableParamsSize
567
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
568
+ * available in the structure. Used to preserve backward compatibility.
569
+ */
570
+ size_t size;
571
+ /**
572
+ * [w] Assign to NULL
573
+ */
574
+ void* pPriv;
575
+ /**
576
+ * [w] CUcontext
577
+ */
578
+ CUcontext ctx;
579
+ } CUpti_PCSamplingEnableParams;
580
+ #define CUpti_PCSamplingEnableParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingEnableParams, ctx)
581
+
582
+ /**
583
+ * \brief Enable PC sampling.
584
+ *
585
+ * \param pParams A pointer to \ref CUpti_PCSamplingEnableParams
586
+ *
587
+ * \retval CUPTI_SUCCESS
588
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
589
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
590
+ * does not support the API
591
+ */
592
+ CUptiResult CUPTIAPI cuptiPCSamplingEnable(CUpti_PCSamplingEnableParams *pParams);
593
+
594
+ /**
595
+ * \brief Params for cuptiPCSamplingDisable
596
+ */
597
+ typedef struct
598
+ {
599
+ /**
600
+ * [w] Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
601
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
602
+ * available in the structure. Used to preserve backward compatibility.
603
+ */
604
+ size_t size;
605
+ /**
606
+ * [w] Assign to NULL
607
+ */
608
+ void* pPriv;
609
+ /**
610
+ * [w] CUcontext
611
+ */
612
+ CUcontext ctx;
613
+ } CUpti_PCSamplingDisableParams;
614
+ #define CUpti_PCSamplingDisableParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingDisableParams, ctx)
615
+
616
+ /**
617
+ * \brief Disable PC sampling.
618
+ *
619
+ * For application which doesn't destroy the CUDA context explicitly,
620
+ * this API does the PC Sampling tear-down, joins threads and copies PC records in the buffer provided
621
+ * during the PC sampling configuration. PC records which can't be accommodated in the buffer are discarded.
622
+ *
623
+ * \param pParams A pointer to \ref CUpti_PCSamplingDisableParams
624
+ *
625
+ * \retval CUPTI_SUCCESS
626
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
627
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
628
+ * does not support the API
629
+ */
630
+ CUptiResult CUPTIAPI cuptiPCSamplingDisable(CUpti_PCSamplingDisableParams *pParams);
631
+
632
+ /**
633
+ * \brief Params for cuptiPCSamplingStart
634
+ */
635
+ typedef struct
636
+ {
637
+ /**
638
+ * [w] Size of the data structure i.e. CUpti_PCSamplingStartParamsSize
639
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
640
+ * available in the structure. Used to preserve backward compatibility.
641
+ */
642
+ size_t size;
643
+ /**
644
+ * [w] Assign to NULL
645
+ */
646
+ void* pPriv;
647
+ /**
648
+ * [w] CUcontext
649
+ */
650
+ CUcontext ctx;
651
+ } CUpti_PCSamplingStartParams;
652
+ #define CUpti_PCSamplingStartParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingStartParams, ctx)
653
+
654
+ /**
655
+ * \brief Start PC sampling.
656
+ *
657
+ * User can collect PC Sampling data for user-defined range specified by Start/Stop APIs.
658
+ * This API can be used to mark starting of range. Set configuration option
659
+ * \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL to use this API.
660
+ *
661
+ * \param pParams A pointer to \ref CUpti_PCSamplingStartParams
662
+ *
663
+ * \retval CUPTI_SUCCESS
664
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
665
+ * incorrect PC Sampling configuration.
666
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
667
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
668
+ * does not support the API
669
+ */
670
+ CUptiResult CUPTIAPI cuptiPCSamplingStart(CUpti_PCSamplingStartParams *pParams);
671
+
672
+ /**
673
+ * \brief Params for cuptiPCSamplingStop
674
+ */
675
+ typedef struct
676
+ {
677
+ /**
678
+ * [w] Size of the data structure i.e. CUpti_PCSamplingStopParamsSize
679
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
680
+ * available in the structure. Used to preserve backward compatibility.
681
+ */
682
+ size_t size;
683
+ /**
684
+ * [w] Assign to NULL
685
+ */
686
+ void* pPriv;
687
+ /**
688
+ * [w] CUcontext
689
+ */
690
+ CUcontext ctx;
691
+ } CUpti_PCSamplingStopParams;
692
+ #define CUpti_PCSamplingStopParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingStopParams, ctx)
693
+
694
+ /**
695
+ * \brief Stop PC sampling.
696
+ *
697
+ * User can collect PC Sampling data for user-defined range specified by Start/Stop APIs.
698
+ * This API can be used to mark end of range. Set configuration option
699
+ * \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL to use this API.
700
+ *
701
+ * \param pParams A pointer to \ref CUpti_PCSamplingStopParams
702
+ *
703
+ * \retval CUPTI_SUCCESS
704
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
705
+ * incorrect PC Sampling configuration.
706
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
707
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
708
+ * does not support the API
709
+ */
710
+ CUptiResult CUPTIAPI cuptiPCSamplingStop(CUpti_PCSamplingStopParams *pParams);
711
+
712
+ /**
713
+ * \brief Params for cuptiPCSamplingGetNumStallReasons
714
+ */
715
+ typedef struct
716
+ {
717
+ /**
718
+ * [w] Size of the data structure i.e. CUpti_PCSamplingGetNumStallReasonsParamsSize
719
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
720
+ * available in the structure. Used to preserve backward compatibility.
721
+ */
722
+ size_t size;
723
+ /**
724
+ * [w] Assign to NULL
725
+ */
726
+ void* pPriv;
727
+ /**
728
+ * [w] CUcontext
729
+ */
730
+ CUcontext ctx;
731
+ /**
732
+ * [r] Number of stall reasons
733
+ */
734
+ size_t *numStallReasons;
735
+ } CUpti_PCSamplingGetNumStallReasonsParams;
736
+ #define CUpti_PCSamplingGetNumStallReasonsParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingGetNumStallReasonsParams, numStallReasons)
737
+
738
+ /**
739
+ * \brief Get PC sampling stall reason count.
740
+ *
741
+ * \param pParams A pointer to \ref CUpti_PCSamplingGetNumStallReasonsParams
742
+ *
743
+ * \retval CUPTI_SUCCESS
744
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
745
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
746
+ * does not support the API
747
+ */
748
+ CUptiResult CUPTIAPI cuptiPCSamplingGetNumStallReasons(CUpti_PCSamplingGetNumStallReasonsParams *pParams);
749
+
750
+ /**
751
+ * \brief Params for cuptiPCSamplingGetStallReasons
752
+ */
753
+ typedef struct
754
+ {
755
+ /**
756
+ * [w] Size of the data structure i.e. CUpti_PCSamplingGetStallReasonsParamsSize
757
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
758
+ * available in the structure. Used to preserve backward compatibility.
759
+ */
760
+ size_t size;
761
+ /**
762
+ * [w] Assign to NULL
763
+ */
764
+ void* pPriv;
765
+ /**
766
+ * [w] CUcontext
767
+ */
768
+ CUcontext ctx;
769
+ /**
770
+ * [w] Number of stall reasons
771
+ */
772
+ size_t numStallReasons;
773
+ /**
774
+ * [r] Stall reason index
775
+ */
776
+ uint32_t *stallReasonIndex;
777
+ /**
778
+ * [r] Stall reasons name
779
+ */
780
+ char **stallReasons;
781
+ } CUpti_PCSamplingGetStallReasonsParams;
782
+ #define CUpti_PCSamplingGetStallReasonsParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingGetStallReasonsParams, stallReasons)
783
+
784
+ /**
785
+ * \brief Get PC sampling stall reasons.
786
+ *
787
+ * \param pParams A pointer to \ref CUpti_PCSamplingGetStallReasonsParams
788
+ *
789
+ * \retval CUPTI_SUCCESS
790
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
791
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
792
+ * does not support the API
793
+ */
794
+ CUptiResult CUPTIAPI cuptiPCSamplingGetStallReasons(CUpti_PCSamplingGetStallReasonsParams *pParams);
795
+
796
+
797
+ /**
798
+ * \brief Params for cuptiGetSassToSourceCorrelation
799
+ */
800
+ typedef struct CUpti_GetSassToSourceCorrelationParams {
801
+ /**
802
+ * [w] Size of the data structure i.e. CUpti_GetSassToSourceCorrelationParamsSize
803
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
804
+ * available in the structure. Used to preserve backward compatibility.
805
+ */
806
+ size_t size;
807
+ /**
808
+ * [w] Pointer to cubin binary where function belongs.
809
+ */
810
+ const void* cubin;
811
+ /**
812
+ * [w] Function name to which PC belongs.
813
+ */
814
+ const char *functionName;
815
+ /**
816
+ * [w] Size of cubin binary.
817
+ */
818
+ size_t cubinSize;
819
+ /**
820
+ * [r] Line number in the source code.
821
+ */
822
+ uint32_t lineNumber;
823
+ /**
824
+ * [w] PC offset
825
+ */
826
+ uint64_t pcOffset;
827
+ /**
828
+ * [r] Path for the source file.
829
+ */
830
+ char *fileName;
831
+ /**
832
+ * [r] Path for the directory of source file.
833
+ */
834
+ char *dirName;
835
+ } CUpti_GetSassToSourceCorrelationParams;
836
+
837
+ #define CUpti_GetSassToSourceCorrelationParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_GetSassToSourceCorrelationParams, dirName)
838
+
839
+ /**
840
+ * \brief SASS to Source correlation.
841
+ *
842
+ * \param pParams A pointer to \ref CUpti_GetSassToSourceCorrelationParams
843
+ *
844
+ * It is expected from user to free allocated memory for fileName and dirName after use.
845
+ *
846
+ * \retval CUPTI_SUCCESS
847
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if either of the parameters cubin or functionName
848
+ * is NULL or cubinSize is zero or size field is not set correctly.
849
+ * \retval CUPTI_ERROR_INVALID_MODULE provided cubin is invalid.
850
+ * \retval CUPTI_ERROR_UNKNOWN an internal error occurred.
851
+ * This error code is also used for cases when the function is not present in the module.
852
+ * A better error code will be returned in the future release.
853
+ */
854
+ CUptiResult CUPTIAPI cuptiGetSassToSourceCorrelation(CUpti_GetSassToSourceCorrelationParams *pParams);
855
+
856
+ /**
857
+ * \brief Params for cuptiGetCubinCrc
858
+ */
859
+ typedef struct {
860
+ /**
861
+ * [w] Size of configuration structure.
862
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
863
+ * available in the structure. Used to preserve backward compatibility.
864
+ */
865
+ size_t size;
866
+ /**
867
+ * [w] Size of cubin binary.
868
+ */
869
+ size_t cubinSize;
870
+ /**
871
+ * [w] Pointer to cubin binary
872
+ */
873
+ const void* cubin;
874
+ /**
875
+ * [r] Computed CRC will be stored in it.
876
+ */
877
+ uint64_t cubinCrc;
878
+ } CUpti_GetCubinCrcParams;
879
+ #define CUpti_GetCubinCrcParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_GetCubinCrcParams, cubinCrc)
880
+
881
+ /**
882
+ * \brief Get the CRC of cubin.
883
+ *
884
+ * This function returns the CRC of provided cubin binary.
885
+ *
886
+ * \param pParams A pointer to \ref CUpti_GetCubinCrcParams
887
+ *
888
+ * \retval CUPTI_SUCCESS
889
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if parameter cubin is NULL or
890
+ * provided cubinSize is zero or size field is not set.
891
+ */
892
+ CUptiResult CUPTIAPI cuptiGetCubinCrc(CUpti_GetCubinCrcParams *pParams);
893
+
894
+ /**
895
+ * \brief Function type for callback used by CUPTI to request crc of
896
+ * loaded module.
897
+ *
898
+ * This callback function ask for crc of provided module in function.
899
+ * The provided crc will be stored in PC sampling records i.e. in the field 'cubinCrc' of the PC sampling
900
+ * struct CUpti_PCSamplingPCData. The CRC is uses during the offline source correlation to uniquely identify the module.
901
+ *
902
+ * \param cubin The pointer to cubin binary
903
+ * \param cubinSize The size of cubin binary.
904
+ * \param cubinCrc Returns the computed crc of cubin.
905
+ */
906
+ typedef void (CUPTIAPI *CUpti_ComputeCrcCallbackFunc)(
907
+ const void* cubin,
908
+ size_t cubinSize,
909
+ uint64_t *cubinCrc);
910
+
911
+ /**
912
+ * \brief Register callback function with CUPTI to use
913
+ * your own algorithm to compute cubin crc.
914
+ *
915
+ * This function registers a callback function and it gets called
916
+ * from CUPTI when a CUDA module is loaded.
917
+ *
918
+ * \param funcComputeCubinCrc callback is invoked when a CUDA module
919
+ * is loaded.
920
+ *
921
+ * \retval CUPTI_SUCCESS
922
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p funcComputeCubinCrc is NULL.
923
+ */
924
+ CUptiResult CUPTIAPI cuptiRegisterComputeCrcCallback(CUpti_ComputeCrcCallbackFunc funcComputeCubinCrc);
925
+
926
+ /** @} */ /* END CUPTI_PCSAMPLING_API */
927
+
928
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
929
+ #pragma GCC visibility pop
930
+ #endif
931
+
932
+ #if defined(__cplusplus)
933
+ }
934
+ #endif
935
+
936
+ #endif /*_CUPTI_PCSAMPLING_H_*/
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling_util.h ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if !defined(_CUPTI_PCSAMPLING_UTIL_H_)
2
+ #define _CUPTI_PCSAMPLING_UTIL_H_
3
+
4
+ #include <cupti_pcsampling.h>
5
+ #include <fstream>
6
+
7
+ #include <cupti_common.h>
8
+
9
+ #ifndef CUPTI_UTIL_STRUCT_SIZE
10
+ #define CUPTI_UTIL_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
11
+ #endif
12
+
13
+ #ifndef CHECK_PC_SAMPLING_STRUCT_FIELD_EXISTS
14
+ #define CHECK_PC_SAMPLING_STRUCT_FIELD_EXISTS(type, member, structSize) \
15
+ (offsetof(type, member) < structSize)
16
+ #endif
17
+
18
+ #if defined(__cplusplus)
19
+ extern "C" {
20
+ #endif
21
+
22
+ #if defined(__GNUC__)
23
+ #pragma GCC visibility push(default)
24
+ #endif
25
+
26
+ namespace CUPTI { namespace PcSamplingUtil {
27
+
28
+ /**
29
+ * \defgroup CUPTI_PCSAMPLING_UTILITY CUPTI PC Sampling Utility API
30
+ * Functions, types, and enums that implement the CUPTI PC Sampling Utility API.
31
+ * @{
32
+ */
33
+
34
+ /**
35
+ * \brief Header info will be stored in file.
36
+ */
37
+ typedef struct PACKED_ALIGNMENT {
38
+ /**
39
+ * Version of file format.
40
+ */
41
+ uint32_t version;
42
+ /**
43
+ * Total number of buffers present in the file.
44
+ */
45
+ uint32_t totalBuffers;
46
+ } Header;
47
+
48
+ /**
49
+ * \brief BufferInfo will be stored in the file for every buffer
50
+ * i.e for every call of UtilDumpPcSamplingBufferInFile() API.
51
+ */
52
+ typedef struct PACKED_ALIGNMENT {
53
+ /**
54
+ * Total number of PC records.
55
+ */
56
+ uint64_t recordCount;
57
+ /**
58
+ * Count of all stall reasons supported on the GPU
59
+ */
60
+ size_t numStallReasons;
61
+ /**
62
+ * Total number of stall reasons in single record.
63
+ */
64
+ uint64_t numSelectedStallReasons;
65
+ /**
66
+ * Buffer size in Bytes.
67
+ */
68
+ uint64_t bufferByteSize;
69
+ } BufferInfo;
70
+
71
+ /**
72
+ * \brief All available stall reasons name and respective indexes
73
+ * will be stored in it.
74
+ */
75
+ typedef struct PACKED_ALIGNMENT {
76
+ /**
77
+ * Number of all available stall reasons
78
+ */
79
+ size_t numStallReasons;
80
+ /**
81
+ * Stall reasons names of all available stall reasons
82
+ */
83
+ char **stallReasons;
84
+ /**
85
+ * Stall reason index of all available stall reasons
86
+ */
87
+ uint32_t *stallReasonIndex;
88
+ } PcSamplingStallReasons;
89
+
90
+ /**
91
+ * \brief CUPTI PC sampling buffer types.
92
+ *
93
+ */
94
+ typedef enum {
95
+ /**
96
+ * Invalid buffer type.
97
+ */
98
+ PC_SAMPLING_BUFFER_INVALID = 0,
99
+ /**
100
+ * Refers to CUpti_PCSamplingData buffer.
101
+ */
102
+ PC_SAMPLING_BUFFER_PC_TO_COUNTER_DATA = 1
103
+ } PcSamplingBufferType;
104
+
105
+ /**
106
+ * \brief CUPTI PC sampling utility API result codes.
107
+ *
108
+ * Error and result codes returned by CUPTI PC sampling utility API.
109
+ */
110
+ typedef enum {
111
+ /**
112
+ * No error
113
+ */
114
+ CUPTI_UTIL_SUCCESS = 0,
115
+ /**
116
+ * One or more of the parameters are invalid.
117
+ */
118
+ CUPTI_UTIL_ERROR_INVALID_PARAMETER = 1,
119
+ /**
120
+ * Unable to create a new file
121
+ */
122
+ CUPTI_UTIL_ERROR_UNABLE_TO_CREATE_FILE = 2,
123
+ /**
124
+ * Unable to open a file
125
+ */
126
+ CUPTI_UTIL_ERROR_UNABLE_TO_OPEN_FILE = 3,
127
+ /**
128
+ * Read or write operation failed
129
+ */
130
+ CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED = 4,
131
+ /**
132
+ * Provided file handle is corrupted.
133
+ */
134
+ CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED = 5,
135
+ /**
136
+ * seek operation failed.
137
+ */
138
+ CUPTI_UTIL_ERROR_SEEK_OPERATION_FAILED = 6,
139
+ /**
140
+ * Unable to allocate enough memory to perform the requested
141
+ * operation.
142
+ */
143
+ CUPTI_UTIL_ERROR_OUT_OF_MEMORY = 7,
144
+ /**
145
+ * An unknown internal error has occurred.
146
+ */
147
+ CUPTI_UTIL_ERROR_UNKNOWN = 999,
148
+ CUPTI_UTIL_ERROR_FORCE_INT = 0x7fffffff
149
+ } CUptiUtilResult;
150
+
151
+ /**
152
+ * \brief Params for \ref CuptiUtilPutPcSampData
153
+ */
154
+ typedef struct {
155
+ /**
156
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
157
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
158
+ * available in the structure. Used to preserve backward compatibility.
159
+ */
160
+ size_t size;
161
+ /**
162
+ * Type of buffer to store in file
163
+ */
164
+ PcSamplingBufferType bufferType;
165
+ /**
166
+ * PC sampling buffer.
167
+ */
168
+ void *pSamplingData;
169
+ /**
170
+ * Number of configured attributes
171
+ */
172
+ size_t numAttributes;
173
+ /**
174
+ * Refer \ref CUpti_PCSamplingConfigurationInfo
175
+ * It is expected to provide configuration details of at least
176
+ * CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_STALL_REASON attribute.
177
+ */
178
+ CUpti_PCSamplingConfigurationInfo *pPCSamplingConfigurationInfo;
179
+ /**
180
+ * Refer \ref PcSamplingStallReasons.
181
+ */
182
+ PcSamplingStallReasons *pPcSamplingStallReasons;
183
+ /**
184
+ * File name to store buffer into it.
185
+ */
186
+ const char* fileName;
187
+ } CUptiUtil_PutPcSampDataParams;
188
+ #define CUptiUtil_PutPcSampDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_PutPcSampDataParams, fileName)
189
+
190
+ /**
191
+ * \brief Dump PC sampling data into the file.
192
+ *
193
+ * This API can be called multiple times.
194
+ * It will append buffer in the file.
195
+ * For every buffer it will store BufferInfo
196
+ * so that before retrieving data it will help to allocate buffer
197
+ * to store retrieved data.
198
+ * This API creates file if file does not present.
199
+ * If stallReasonIndex or stallReasons pointer of \ref CUptiUtil_PutPcSampDataParams is NULL
200
+ * then stall reasons data will not be stored in file.
201
+ * It is expected to store all available stall reason data at least once to refer it during
202
+ * offline correlation.
203
+ *
204
+ * \retval CUPTI_UTIL_SUCCESS
205
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if buffer type is invalid
206
+ * or if either of pSamplingData, pParams pointer is NULL or stall reason configuration details not provided
207
+ * or filename is empty.
208
+ * \retval CUPTI_UTIL_ERROR_UNABLE_TO_CREATE_FILE
209
+ * \retval CUPTI_UTIL_ERROR_UNABLE_TO_OPEN_FILE
210
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED
211
+ */
212
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilPutPcSampData(CUptiUtil_PutPcSampDataParams *pParams);
213
+
214
+ /**
215
+ * \brief Params for \ref CuptiUtilGetHeaderData
216
+ */
217
+ typedef struct {
218
+ /**
219
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
220
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
221
+ * available in the structure. Used to preserve backward compatibility.
222
+ */
223
+ size_t size;
224
+ /**
225
+ * File handle.
226
+ */
227
+ std::ifstream *fileHandler;
228
+ /**
229
+ * Header Info.
230
+ */
231
+ Header headerInfo;
232
+
233
+ } CUptiUtil_GetHeaderDataParams;
234
+ #define CUptiUtil_GetHeaderDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_GetHeaderDataParams, headerInfo)
235
+
236
+ /**
237
+ * \brief Get header data of file.
238
+ *
239
+ * This API must be called once initially while retrieving data from file.
240
+ * \ref Header structure, it gives info about total number
241
+ * of buffers present in the file.
242
+ *
243
+ * \retval CUPTI_UTIL_SUCCESS
244
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if either of pParam or fileHandle is NULL or param struct size is incorrect.
245
+ * \retval CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED file handle is not in good state to read data from file
246
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED failed to read data from file.
247
+ */
248
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilGetHeaderData(CUptiUtil_GetHeaderDataParams *pParams);
249
+
250
+ /**
251
+ * \brief Params for \ref CuptiUtilGetBufferInfo
252
+ */
253
+ typedef struct {
254
+ /**
255
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
256
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
257
+ * available in the structure. Used to preserve backward compatibility.
258
+ */
259
+ size_t size;
260
+ /**
261
+ * File handle.
262
+ */
263
+ std::ifstream *fileHandler;
264
+ /**
265
+ * Buffer Info.
266
+ */
267
+ BufferInfo bufferInfoData;
268
+ } CUptiUtil_GetBufferInfoParams;
269
+ #define CUptiUtil_GetBufferInfoParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_GetBufferInfoParams, bufferInfoData)
270
+
271
+ /**
272
+ * \brief Get buffer info data of file.
273
+ *
274
+ * This API must be called every time before calling CuptiUtilGetPcSampData API.
275
+ * \ref BufferInfo structure, it gives info about recordCount and stallReasonCount
276
+ * of every record in the buffer. This will help to allocate exact buffer to retrieve data into it.
277
+ *
278
+ * \retval CUPTI_UTIL_SUCCESS
279
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if either of pParam or fileHandle is NULL or param struct size is incorrect.
280
+ * \retval CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED file handle is not in good state to read data from file.
281
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED failed to read data from file.
282
+ */
283
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilGetBufferInfo(CUptiUtil_GetBufferInfoParams *pParams);
284
+
285
+ /**
286
+ * \brief Params for \ref CuptiUtilGetPcSampData
287
+ */
288
+ typedef struct {
289
+ /**
290
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
291
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
292
+ * available in the structure. Used to preserve backward compatibility.
293
+ */
294
+ size_t size;
295
+ /**
296
+ * File handle.
297
+ */
298
+ std::ifstream *fileHandler;
299
+ /**
300
+ * Type of buffer to store in file
301
+ */
302
+ PcSamplingBufferType bufferType;
303
+ /**
304
+ * Pointer to collected buffer info using \ref CuptiUtilGetBufferInfo
305
+ */
306
+ BufferInfo *pBufferInfoData;
307
+ /**
308
+ * Pointer to allocated memory to store retrieved data from file.
309
+ */
310
+ void *pSamplingData;
311
+ /**
312
+ * Number of configuration attributes
313
+ */
314
+ size_t numAttributes;
315
+ /**
316
+ * Refer \ref CUpti_PCSamplingConfigurationInfo
317
+ */
318
+ CUpti_PCSamplingConfigurationInfo *pPCSamplingConfigurationInfo;
319
+ /**
320
+ * Refer \ref PcSamplingStallReasons.
321
+ * For stallReasons field of \ref PcSamplingStallReasons it is expected to
322
+ * allocate memory for each string element of array.
323
+ */
324
+ PcSamplingStallReasons *pPcSamplingStallReasons;
325
+ } CUptiUtil_GetPcSampDataParams;
326
+ #define CUptiUtil_GetPcSampDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_GetPcSampDataParams, pPcSamplingStallReasons)
327
+
328
+ /**
329
+ * \brief Retrieve PC sampling data from file into allocated buffer.
330
+ *
331
+ * This API must be called after CuptiUtilGetBufferInfo API.
332
+ * It will retrieve data from file into allocated buffer.
333
+ *
334
+ * \retval CUPTI_UTIL_SUCCESS
335
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if buffer type is invalid
336
+ * or if either of pSampData, pParams is NULL. If pPcSamplingStallReasons is not NULL then
337
+ * error out if either of stallReasonIndex, stallReasons or stallReasons array element pointer is NULL.
338
+ * or filename is empty.
339
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED
340
+ * \retval CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED file handle is not in good state to read data from file.
341
+ */
342
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilGetPcSampData(CUptiUtil_GetPcSampDataParams *pParams);
343
+
344
+ /**
345
+ * \brief Params for \ref CuptiUtilMergePcSampData
346
+ */
347
+ typedef struct
348
+ {
349
+ /**
350
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
351
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
352
+ * available in the structure. Used to preserve backward compatibility.
353
+ */
354
+ size_t size;
355
+ /**
356
+ * Number of buffers to merge.
357
+ */
358
+ size_t numberOfBuffers;
359
+ /**
360
+ * Pointer to array of buffers to merge
361
+ */
362
+ CUpti_PCSamplingData *PcSampDataBuffer;
363
+ /**
364
+ * Pointer to array of merged buffers as per the range id.
365
+ */
366
+ CUpti_PCSamplingData **MergedPcSampDataBuffers;
367
+ /**
368
+ * Number of merged buffers.
369
+ */
370
+ size_t *numMergedBuffer;
371
+ } CUptiUtil_MergePcSampDataParams;
372
+ #define CUptiUtil_MergePcSampDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_MergePcSampDataParams, numMergedBuffer)
373
+
374
+ /**
375
+ * \brief Merge PC sampling data range id wise.
376
+ *
377
+ * This API merge PC sampling data range id wise.
378
+ * It allocates memory for merged data and fill data in it
379
+ * and provide buffer pointer in MergedPcSampDataBuffers field.
380
+ * It is expected from user to free merge data buffers after use.
381
+ *
382
+ * \retval CUPTI_UTIL_SUCCESS
383
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if param struct size is invalid
384
+ * or count of buffers to merge is invalid i.e less than 1
385
+ * or either of PcSampDataBuffer, MergedPcSampDataBuffers, numMergedBuffer is NULL
386
+ * \retval CUPTI_UTIL_ERROR_OUT_OF_MEMORY Unable to allocate memory for merged buffer.
387
+ */
388
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilMergePcSampData(CUptiUtil_MergePcSampDataParams *pParams);
389
+
390
+ /** @} */ /* END CUPTI_PCSAMPLING_UTILITY */
391
+
392
+ } }
393
+
394
+ #if defined(__GNUC__)
395
+ #pragma GCC visibility pop
396
+ #endif
397
+
398
+ #if defined(__cplusplus)
399
+ }
400
+ #endif
401
+
402
+ #endif
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_pmsampling.h ADDED
@@ -0,0 +1,474 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2024 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_PMSAMPLING_H_)
51
+ #define _CUPTI_PMSAMPLING_H_
52
+
53
+ #include <cuda.h>
54
+ #include <cupti_result.h>
55
+ #include <stddef.h>
56
+ #include <stdint.h>
57
+
58
+ #ifdef __cplusplus
59
+ extern "C" {
60
+ #endif
61
+
62
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
63
+ #pragma GCC visibility push(default)
64
+ #endif
65
+
66
+ #ifndef CUPTI_PROFILER_STRUCT_SIZE
67
+ #define CUPTI_PROFILER_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
68
+ #endif
69
+
70
+ /* CUPTI PM sampling APIs */
71
+ /**
72
+ * \defgroup CUPTI_PM_SAMPLING_API CUPTI PM Sampling API
73
+ * Functions to enable, disable, start, stop, and decode PM sampling.
74
+ * @{
75
+ */
76
+ typedef struct CUpti_PmSampling_Object CUpti_PmSampling_Object;
77
+
78
+ typedef enum CUpti_PmSampling_TriggerMode
79
+ {
80
+ /// The trigger is based off of the SYSCLK frequency, note SYS frequency by default is variable.
81
+ /// the sample interval (set in the struct CUpti_PmSampling_SetConfig_Params) is in terms of clocks.
82
+ CUPTI_PM_SAMPLING_TRIGGER_MODE_GPU_SYSCLK_INTERVAL = 0,
83
+ /// The trigger is based off of a fixed frequency source.
84
+ /// The sample interval (set in the struct CUpti_PmSampling_SetConfig_Params) is in terms of nanoseconds.
85
+ /// Note: This trigger mode is not supported on Turing GPU architecture and GA100 GPU.
86
+ /// It is supported on Ampere GA10x and later GPU architectures.
87
+ CUPTI_PM_SAMPLING_TRIGGER_MODE_GPU_TIME_INTERVAL = 1,
88
+ CUPTI_PM_SAMPLING_TRIGGER_MODE_COUNT
89
+ } CUpti_PmSampling_TriggerMode;
90
+
91
+ typedef enum CUpti_PmSampling_DecodeStopReason
92
+ {
93
+ CUPTI_PM_SAMPLING_DECODE_STOP_REASON_OTHER = 0,
94
+ /// Counter data image is full.
95
+ CUPTI_PM_SAMPLING_DECODE_STOP_REASON_COUNTER_DATA_FULL,
96
+ /// All the records in the hardware buffer is decoded.
97
+ CUPTI_PM_SAMPLING_DECODE_STOP_REASON_END_OF_RECORDS,
98
+ CUPTI_PM_SAMPLING_DECODE_STOP_REASON_COUNT
99
+ } CUpti_PmSampling_DecodeStopReason;
100
+
101
+ /**
102
+ * \brief Params for cuptiPmSamplingSetConfig
103
+ */
104
+ typedef struct CUpti_PmSampling_SetConfig_Params
105
+ {
106
+ /// [in] Size of the data structure.
107
+ size_t structSize;
108
+ /// [in] Set to NULL.
109
+ void* pPriv;
110
+ /// [in] PM sampling object.
111
+ CUpti_PmSampling_Object* pPmSamplingObject;
112
+ /// [in] Size of the config image.
113
+ size_t configSize;
114
+ /// [in] Config image.
115
+ const uint8_t* pConfig;
116
+ /// [in] The hardware buffer size in which raw PM sampling data
117
+ /// will be stored. These samples will be decoded to counter data
118
+ /// image with \ref cuptiPmSamplingDecodeData call.
119
+ size_t hardwareBufferSize;
120
+ /// [in] For the trigger mode `CUPTI_PM_SAMPLING_TRIGGER_MODE_GPU_SYSCLK_INTERVAL`, sampling interval
121
+ /// is the number of sys clock cycles. For the trigger mode `CUPTI_PM_SAMPLING_TRIGGER_MODE_GPU_TIME_INTERVAL`,
122
+ /// sampling interval is in nanoseconds.
123
+ uint64_t samplingInterval;
124
+ /// [in] Trigger mode.
125
+ /// Note: CUPTI_PM_SAMPLING_TRIGGER_MODE_GPU_TIME_INTERVAL is not supported in Turing and GA100.
126
+ /// Supported from GA10x onwards.
127
+ CUpti_PmSampling_TriggerMode triggerMode;
128
+ } CUpti_PmSampling_SetConfig_Params;
129
+
130
+ #define CUpti_PmSampling_SetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_PmSampling_SetConfig_Params, triggerMode)
131
+
132
+ /**
133
+ * \brief Set the configuration for PM sampling like sampling interval, maximum number of samples
134
+ * filled in HW buffer, trigger mode and the config image which has scheduling info for metric collection.
135
+ *
136
+ * \param pParams A pointer to \ref CUpti_PmSampling_SetConfig_Params
137
+ *
138
+ * \retval CUPTI_SUCCESS
139
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
140
+ * \retval CUPTI_ERROR_NOT_SUPPORTED for config image which require multiple passes for data collection
141
+ */
142
+ CUptiResult CUPTIAPI cuptiPmSamplingSetConfig(CUpti_PmSampling_SetConfig_Params* pParams);
143
+
144
+ /**
145
+ * \brief Params for cuptiPmSamplingEnable
146
+ */
147
+ typedef struct CUpti_PmSampling_Enable_Params
148
+ {
149
+ /// [in] Size of the data structure.
150
+ size_t structSize;
151
+ /// [in] Set to NULL.
152
+ void* pPriv;
153
+ /// [in] Device index.
154
+ size_t deviceIndex;
155
+ /// [out] PM sampling object.
156
+ CUpti_PmSampling_Object* pPmSamplingObject;
157
+ } CUpti_PmSampling_Enable_Params;
158
+
159
+ #define CUpti_PmSampling_Enable_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_PmSampling_Enable_Params, pPmSamplingObject)
160
+
161
+ /**
162
+ * \brief Create a PM sampling object and enable PM sampling on the CUDA device.
163
+ *
164
+ * \param pParams A pointer to \ref CUpti_PmSampling_Enable_Params
165
+ *
166
+ * \retval CUPTI_SUCCESS
167
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
168
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY if memory allocation fails while creating the PM sampling object
169
+ * \retval CUPTI_ERROR_INVALID_OPERATION if PM sampling is already enabled on the device
170
+ * \retval CUPTI_ERROR_INSUFFICIENT_PRIVILEGES if the user does not have sufficient privileges to perform the operation
171
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
172
+ */
173
+ CUptiResult CUPTIAPI cuptiPmSamplingEnable(CUpti_PmSampling_Enable_Params* pParams);
174
+
175
+ /**
176
+ * \brief Params for cuptiPmSamplingDisable
177
+ */
178
+ typedef struct CUpti_PmSampling_Disable_Params
179
+ {
180
+ /// [in] Size of the data structure.
181
+ size_t structSize;
182
+ /// [in] Set to NULL.
183
+ void* pPriv;
184
+ /// [in] PM sampling object.
185
+ CUpti_PmSampling_Object* pPmSamplingObject;
186
+ } CUpti_PmSampling_Disable_Params;
187
+
188
+ #define CUpti_PmSampling_Disable_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_PmSampling_Disable_Params, pPmSamplingObject)
189
+
190
+ /**
191
+ * \brief Disable PM sampling on the CUDA device and destroy the PM sampling object.
192
+ *
193
+ * \param pParams A pointer to \ref CUpti_PmSampling_Disable_Params
194
+ *
195
+ * \retval CUPTI_SUCCESS
196
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
197
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
198
+ */
199
+ CUptiResult CUPTIAPI cuptiPmSamplingDisable(CUpti_PmSampling_Disable_Params* pParams);
200
+
201
+ /**
202
+ * \brief Params for cuptiPmSamplingStart
203
+ */
204
+ typedef struct CUpti_PmSampling_Start_Params
205
+ {
206
+ /// [in] Size of the data structure.
207
+ size_t structSize;
208
+ /// [in] Set to NULL.
209
+ void* pPriv;
210
+ /// [in] PM sampling object.
211
+ CUpti_PmSampling_Object* pPmSamplingObject;
212
+ } CUpti_PmSampling_Start_Params;
213
+
214
+ #define CUpti_PmSampling_Start_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_PmSampling_Start_Params, pPmSamplingObject)
215
+
216
+ /**
217
+ * \brief Start the PM sampling. The GPU will start collecting the metrics data
218
+ * periodically based on trigger type and sampling interval passed in CUpti_PmSampling_SetConfig_Params.
219
+ * The collected data will be stored in the hardware buffer.
220
+ *
221
+ * \param pParams A pointer to \ref CUpti_PmSampling_Start_Params
222
+ *
223
+ * \retval CUPTI_SUCCESS
224
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
225
+ * \retval CUPTI_ERROR_INVALID_OPERATION if PM sampling Start is called without enabling PM sampling,
226
+ * and PM sampling is already started
227
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
228
+ */
229
+ CUptiResult CUPTIAPI cuptiPmSamplingStart(CUpti_PmSampling_Start_Params* pParams);
230
+
231
+ /**
232
+ * \brief Params for cuptiPmSamplingStop
233
+ */
234
+ typedef struct CUpti_PmSampling_Stop_Params
235
+ {
236
+ /// [in] Size of the data structure.
237
+ size_t structSize;
238
+ /// [in] Set to NULL.
239
+ void* pPriv;
240
+ /// [in] PM sampling object.
241
+ CUpti_PmSampling_Object* pPmSamplingObject;
242
+ } CUpti_PmSampling_Stop_Params;
243
+
244
+ #define CUpti_PmSampling_Stop_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_PmSampling_Stop_Params, pPmSamplingObject)
245
+
246
+ /**
247
+ * \brief Stop the PM sampling. The GPU will stop collecting the metrics data.
248
+ *
249
+ * \param pParams A pointer to \ref CUpti_PmSampling_Stop_Params
250
+ *
251
+ * \retval CUPTI_SUCCESS
252
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
253
+ * \retval CUPTI_ERROR_INVALID_OPERATION if PM sampling Stop is called without enabling PM sampling,
254
+ * and PM sampling is already stopped
255
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
256
+ */
257
+ CUptiResult CUPTIAPI cuptiPmSamplingStop(CUpti_PmSampling_Stop_Params* pParams);
258
+
259
+ /**
260
+ * \brief Params for cuptiPmSamplingDecodeData
261
+ */
262
+ typedef struct CUpti_PmSampling_DecodeData_Params
263
+ {
264
+ /// [in] Size of the data structure.
265
+ size_t structSize;
266
+ /// [in] Set to NULL.
267
+ void* pPriv;
268
+ /// [in] PM sampling object.
269
+ CUpti_PmSampling_Object* pPmSamplingObject;
270
+ /// [in] Counter data image.
271
+ uint8_t* pCounterDataImage;
272
+ /// [in] Size of the counter data image.
273
+ size_t counterDataImageSize;
274
+ /// [out] decode stop reason
275
+ CUpti_PmSampling_DecodeStopReason decodeStopReason;
276
+ /// [out] overflow status for hardware buffer.
277
+ /// To avoid overflow, either increase the maxSamples values in
278
+ /// \ref CUpti_PmSampling_SetConfig_Params or reduce the sampling interval.
279
+ uint8_t overflow;
280
+ } CUpti_PmSampling_DecodeData_Params;
281
+
282
+ #define CUpti_PmSampling_DecodeData_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_PmSampling_DecodeData_Params, overflow)
283
+
284
+ /**
285
+ * \brief Decode the metrics data stored in the hardware buffer to the counter data image.
286
+ *
287
+ *
288
+ * \param pParams A pointer to \ref CUpti_PmSampling_DecodeData_Params
289
+ *
290
+ * \retval CUPTI_SUCCESS
291
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
292
+ * \retval CUPTI_ERROR_INVALID_OPERATION if PM sampling DecodeData is called without enabling PM sampling
293
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY if there is record overflow in the hardware buffer
294
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
295
+ */
296
+ CUptiResult CUPTIAPI cuptiPmSamplingDecodeData(CUpti_PmSampling_DecodeData_Params* pParams);
297
+
298
+ /**
299
+ * \brief Params for cuptiPmSamplingGetCounterData
300
+ */
301
+ typedef struct CUpti_PmSampling_GetCounterAvailability_Params
302
+ {
303
+ /// [in] Size of the data structure.
304
+ size_t structSize;
305
+ /// [in] Set to NULL.
306
+ void* pPriv;
307
+ /// [in] Device index.
308
+ size_t deviceIndex;
309
+ /// [inout] Size of the counter availability image. When pCounterAvailabilityImage is NULL,
310
+ /// this field is used to return the size of the counter availability image.
311
+ size_t counterAvailabilityImageSize;
312
+ /// [out] Counter availability image.
313
+ uint8_t* pCounterAvailabilityImage;
314
+ } CUpti_PmSampling_GetCounterAvailability_Params;
315
+ #define CUpti_PmSampling_GetCounterAvailability_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_PmSampling_GetCounterAvailability_Params, pCounterAvailabilityImage)
316
+
317
+ /**
318
+ * \brief Query counter availibility information in a buffer which can be used to filter unavailable raw metrics on host.
319
+ * Note: This API may fail, if any profiling or sampling session is active on the specified device.
320
+ *
321
+ * \param pParams A pointer to \ref CUpti_PmSampling_GetCounterAvailability_Params
322
+ *
323
+ * \retval CUPTI_SUCCESS
324
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
325
+ * \retval CUPTI_ERROR_INSUFFICIENT_PRIVILEGES if the user does not have sufficient privileges to perform the operation
326
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
327
+ */
328
+ CUptiResult CUPTIAPI cuptiPmSamplingGetCounterAvailability(CUpti_PmSampling_GetCounterAvailability_Params* pParams);
329
+
330
+ /**
331
+ * \brief Params for cuptiPmSamplingGetCounterDataSize
332
+ */
333
+ typedef struct CUpti_PmSampling_GetCounterDataSize_Params
334
+ {
335
+ /// [in] Size of the data structure.
336
+ size_t structSize;
337
+ /// [in] Set to NULL.
338
+ void* pPriv;
339
+ /// [in] PM sampling object.
340
+ CUpti_PmSampling_Object* pPmSamplingObject;
341
+ /// [in] Names of the metrics to be collected.
342
+ const char** pMetricNames;
343
+ /// [in] Number of metrics to be collected.
344
+ size_t numMetrics;
345
+ /// [in] Maximum number of samples to be stored in the counter data image.
346
+ uint32_t maxSamples;
347
+ /// [out] Size of the counter data image.
348
+ size_t counterDataSize;
349
+ } CUpti_PmSampling_GetCounterDataSize_Params;
350
+ #define CUpti_PmSampling_GetCounterDataSize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_PmSampling_GetCounterDataSize_Params, counterDataSize)
351
+
352
+ /**
353
+ * \brief Query the size of the counter data image which will be used to store the metrics data.
354
+ * User need to allocate the memory for the counter data image based on the size returned by this API.
355
+ *
356
+ * \param pParams A pointer to \ref CUpti_PmSampling_GetCounterDataSize_Params
357
+ *
358
+ * \retval CUPTI_SUCCESS
359
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
360
+ * \retval CUPTI_ERROR_INVALID_OPERATION if PM sampling GetCounterDataSize is called without enabling PM sampling
361
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
362
+ */
363
+ CUptiResult CUPTIAPI cuptiPmSamplingGetCounterDataSize(CUpti_PmSampling_GetCounterDataSize_Params* pParams);
364
+
365
+ /**
366
+ * \brief Params for cuptiPmSamplingCounterDataImageInitialize
367
+ */
368
+ typedef struct CUpti_PmSampling_CounterDataImage_Initialize_Params
369
+ {
370
+ /// [in] Size of the data structure.
371
+ size_t structSize;
372
+ /// [in] Set to NULL.
373
+ void* pPriv;
374
+ /// [in] PM sampling object.
375
+ CUpti_PmSampling_Object* pPmSamplingObject;
376
+ /// [in] Size of the counter data image.
377
+ size_t counterDataSize;
378
+ /// [in] Counter data image.
379
+ uint8_t* pCounterData;
380
+ } CUpti_PmSampling_CounterDataImage_Initialize_Params;
381
+ #define CUpti_PmSampling_CounterDataImage_Initialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_PmSampling_CounterDataImage_Initialize_Params, pCounterData)
382
+
383
+ /**
384
+ * \brief Initialize the counter data to CUPTI record format for storing the metric data.
385
+ *
386
+ * \param pParams A pointer to \ref CUpti_PmSampling_CounterDataImage_Initialize_Params
387
+ *
388
+ * \retval CUPTI_SUCCESS
389
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
390
+ * \retval CUPTI_ERROR_INVALID_OPERATION if PM sampling CounterDataInitialize is called without enabling PM sampling
391
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
392
+ */
393
+ CUptiResult CUPTIAPI cuptiPmSamplingCounterDataImageInitialize(CUpti_PmSampling_CounterDataImage_Initialize_Params* pParams);
394
+
395
+ /**
396
+ * \brief Params for cuptiPmSamplingGetCounterDataInfo
397
+ */
398
+ typedef struct CUpti_PmSampling_GetCounterDataInfo_Params
399
+ {
400
+ /// [in] Size of the data structure.
401
+ size_t structSize;
402
+ /// [in] Set to NULL.
403
+ void* pPriv;
404
+ /// [in] Counter data image.
405
+ const uint8_t* pCounterDataImage;
406
+ /// [in] Size of the counter data image.
407
+ size_t counterDataImageSize;
408
+ /// [out] Number of samples in the counter data image.
409
+ size_t numTotalSamples;
410
+ /// [out] Number of populated samples.
411
+ size_t numPopulatedSamples;
412
+ /// [out] Number of samples that have been completed.
413
+ size_t numCompletedSamples;
414
+ } CUpti_PmSampling_GetCounterDataInfo_Params;
415
+ #define CUpti_PmSampling_GetCounterDataInfo_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_PmSampling_GetCounterDataInfo_Params, numCompletedSamples)
416
+
417
+ /**
418
+ * \brief Get the counter data info like number of samples, number of populated
419
+ * samples and number of completed samples in a counter data image.
420
+ *
421
+ * \param pParams A pointer to \ref CUpti_PmSampling_GetCounterDataInfo_Params
422
+ *
423
+ * \retval CUPTI_SUCCESS
424
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
425
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
426
+ */
427
+ CUptiResult CUPTIAPI cuptiPmSamplingGetCounterDataInfo(CUpti_PmSampling_GetCounterDataInfo_Params* pParams);
428
+
429
+ /**
430
+ * \brief Params for cuptiPmSamplingCounterDataGetSampleInfo
431
+ */
432
+ typedef struct CUpti_PmSampling_CounterData_GetSampleInfo_Params
433
+ {
434
+ /// [in] Size of the data structure.
435
+ size_t structSize;
436
+ /// [in] Set to NULL.
437
+ void* pPriv;
438
+ /// [in] PM sampling object.
439
+ CUpti_PmSampling_Object* pPmSamplingObject;
440
+ /// [in] Counter data image.
441
+ const uint8_t* pCounterDataImage;
442
+ /// [in] Size of the counter data image.
443
+ size_t counterDataImageSize;
444
+ /// [in] Index of the sample.
445
+ size_t sampleIndex;
446
+ /// [out] Start time of the sample.
447
+ uint64_t startTimestamp;
448
+ /// [out] End time of the sample.
449
+ uint64_t endTimestamp;
450
+ } CUpti_PmSampling_CounterData_GetSampleInfo_Params;
451
+ #define CUpti_PmSampling_CounterData_GetSampleInfo_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_PmSampling_CounterData_GetSampleInfo_Params, endTimestamp)
452
+
453
+ /**
454
+ * \brief Get the sample info (start and end time stamp) for the given sample index.
455
+ * Each sample is distinguished by the start and end time stamp.
456
+ *
457
+ * \param pParams A pointer to \ref CUpti_PmSampling_CounterData_GetSampleInfo_Params
458
+ *
459
+ * \retval CUPTI_SUCCESS
460
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
461
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
462
+ */
463
+ CUptiResult CUPTIAPI cuptiPmSamplingCounterDataGetSampleInfo(CUpti_PmSampling_CounterData_GetSampleInfo_Params* pParams);
464
+
465
+ /** @} */ /* END CUPTI_PMSAMPLING_API */
466
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
467
+ #pragma GCC visibility pop
468
+ #endif
469
+
470
+ #ifdef __cplusplus
471
+ } /* extern "C" */
472
+ #endif
473
+
474
+ #endif // _CUPTI_PMSAMPLING_H_
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_profiler_host.h ADDED
@@ -0,0 +1,541 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2024 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_PROFILER_HOST_H_)
51
+ #define _CUPTI_PROFILER_HOST_H_
52
+
53
+ /*
54
+ CUPTI profiler host API's
55
+ This file contains the CUPTI profiling host API's.
56
+ */
57
+ #include <cupti_result.h>
58
+ #include <stdint.h>
59
+ #include <stddef.h>
60
+ #include <string>
61
+
62
+ #ifdef __cplusplus
63
+ extern "C" {
64
+ #endif
65
+
66
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
67
+ #pragma GCC visibility push(default)
68
+ #endif
69
+
70
+ /**
71
+ * \defgroup CUPTI_PROFILER_HOST_API CUPTI Profiler Host API
72
+ * Functions, types, and enums that implement the CUPTI Profiler Host API.
73
+ * @{
74
+ */
75
+ #ifndef CUPTI_PROFILER_STRUCT_SIZE
76
+ #define CUPTI_PROFILER_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
77
+ #endif
78
+
79
+ typedef enum CUpti_MetricType
80
+ {
81
+ CUPTI_METRIC_TYPE_COUNTER = 0,
82
+ CUPTI_METRIC_TYPE_RATIO,
83
+ CUPTI_METRIC_TYPE_THROUGHPUT,
84
+ CUPTI_METRIC_TYPE__COUNT
85
+ } CUpti_MetricType;
86
+
87
+ typedef enum CUpti_ProfilerType
88
+ {
89
+ CUPTI_PROFILER_TYPE_RANGE_PROFILER,
90
+ CUPTI_PROFILER_TYPE_PM_SAMPLING,
91
+ CUPTI_PROFILER_TYPE_PROFILER_INVALID
92
+ } CUpti_ProfilerType;
93
+
94
+ typedef struct CUpti_Profiler_Host_Object CUpti_Profiler_Host_Object;
95
+
96
+ /**
97
+ * \brief Params for cuptiProfilerHostInitialize
98
+ */
99
+ typedef struct CUpti_Profiler_Host_Initialize_Params
100
+ {
101
+ /// [in] Size of the data structure.
102
+ /// CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
103
+ /// available in the structure. Used to preserve backward compatibility.
104
+ size_t structSize;
105
+ /// [in] Assign to NULL
106
+ void* pPriv;
107
+ /// [in] the profiler kind one from CUpti_ProfilerType
108
+ CUpti_ProfilerType profilerType;
109
+ /// [in] accepted for chips supported at the time-of-release.
110
+ const char* pChipName;
111
+ /// [in] buffer with counter availability image - required for future chip support
112
+ const uint8_t* pCounterAvailabilityImage;
113
+ /// [out] binary blob allocated by CUPTI and operations associated with this object.
114
+ CUpti_Profiler_Host_Object* pHostObject;
115
+ } CUpti_Profiler_Host_Initialize_Params;
116
+
117
+ #define CUpti_Profiler_Host_Initialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Host_Initialize_Params, pHostObject)
118
+
119
+ /**
120
+ * \brief Create and initialize the profiler host object (CUpti_Profiler_Host_Object).
121
+ *
122
+ * \param pParams A pointer to \ref CUpti_Profiler_Host_Initialize_Params
123
+ *
124
+ * \retval CUPTI_SUCCESS
125
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
126
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
127
+ */
128
+ CUptiResult CUPTIAPI cuptiProfilerHostInitialize(CUpti_Profiler_Host_Initialize_Params* pParams);
129
+
130
+ /**
131
+ * \brief Params for cuptiProfilerHostDeinitialize
132
+ */
133
+ typedef struct CUpti_Profiler_Host_Deinitialize_Params
134
+ {
135
+ /// [in] Size of the data structure.
136
+ /// CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
137
+ /// available in the structure. Used to preserve backward compatibility.
138
+ size_t structSize;
139
+ /// [in] Assign to NULL
140
+ void* pPriv;
141
+ /// [in] reference to the profiler host object allocated by CUPTI in cuptiProfilerHostInitialize
142
+ struct CUpti_Profiler_Host_Object* pHostObject;
143
+ } CUpti_Profiler_Host_Deinitialize_Params;
144
+
145
+ #define CUpti_Profiler_Host_Deinitialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Host_Deinitialize_Params, pHostObject)
146
+
147
+ /**
148
+ * \brief Deinitialize and destroy the profiler host object (CUpti_Profiler_Host_Object).
149
+ *
150
+ * \param pParams A pointer to \ref CUpti_Profiler_Host_Deinitialize_Params
151
+ *
152
+ * \retval CUPTI_SUCCESS
153
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
154
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
155
+ */
156
+ CUptiResult CUPTIAPI cuptiProfilerHostDeinitialize(CUpti_Profiler_Host_Deinitialize_Params* pParams);
157
+
158
+ /**
159
+ * \brief Params for cuptiProfilerHostGetSupportedChips
160
+ */
161
+ typedef struct CUpti_Profiler_Host_GetSupportedChips_Params
162
+ {
163
+ /// [in] Size of the data structure.
164
+ size_t structSize;
165
+ /// [in] Assign to NULL
166
+ void* pPriv;
167
+ /// [out] number of supported chips
168
+ size_t numChips;
169
+ /// [out] list of supported chips
170
+ const char* const* ppChipNames;
171
+ } CUpti_Profiler_Host_GetSupportedChips_Params;
172
+
173
+ #define CUpti_Profiler_Host_GetSupportedChips_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Host_GetSupportedChips_Params, ppChipNames)
174
+
175
+ /**
176
+ * \brief Get the list of supported chips.
177
+ *
178
+ * \param pParams A pointer to \ref CUpti_Profiler_Host_GetSupportedChips_Params
179
+ *
180
+ * \retval CUPTI_SUCCESS
181
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
182
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
183
+ */
184
+ CUptiResult CUPTIAPI cuptiProfilerHostGetSupportedChips(CUpti_Profiler_Host_GetSupportedChips_Params* pParams);
185
+
186
+ /**
187
+ * \brief Params for cuptiProfilerHostGetSupportedMetrics
188
+ */
189
+ typedef struct CUpti_Profiler_Host_GetBaseMetrics_Params
190
+ {
191
+ /// [in] Size of the data structure.
192
+ size_t structSize;
193
+ /// [in] Assign to NULL
194
+ void* pPriv;
195
+ /// [in] reference to the profiler host object allocated by CUPTI in cuptiProfilerHostInitialize
196
+ struct CUpti_Profiler_Host_Object* pHostObject;
197
+ /// [in] metric type (counter, ratio, throughput)
198
+ CUpti_MetricType metricType;
199
+ /// [out] list of base metrics supported of queried metric type for the chip
200
+ const char** ppMetricNames;
201
+ /// [out] number of metrics
202
+ size_t numMetrics;
203
+ } CUpti_Profiler_Host_GetBaseMetrics_Params;
204
+
205
+ #define CUpti_Profiler_Host_GetBaseMetrics_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Host_GetBaseMetrics_Params, numMetrics)
206
+
207
+ /**
208
+ * \brief Get the list of supported base metrics for the chip.
209
+ *
210
+ * \param pParams A pointer to \ref CUpti_Profiler_Host_GetBaseMetrics_Params
211
+ *
212
+ * \retval CUPTI_SUCCESS
213
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
214
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
215
+ */
216
+ CUptiResult CUPTIAPI cuptiProfilerHostGetBaseMetrics(CUpti_Profiler_Host_GetBaseMetrics_Params* pParams);
217
+
218
+ /**
219
+ * \brief Params for cuptiProfilerHostGetSubMetrics
220
+ */
221
+ typedef struct CUpti_Profiler_Host_GetSubMetrics_Params
222
+ {
223
+ /// [in] Size of the data structure.
224
+ size_t structSize;
225
+ /// [in] Assign to NULL
226
+ void* pPriv;
227
+ /// [in] reference to the profiler host object allocated by CUPTI in cuptiProfilerHostInitialize
228
+ CUpti_Profiler_Host_Object* pHostObject;
229
+ /// [in] the metric type for queried metric
230
+ CUpti_MetricType metricType;
231
+ /// [in] metric name for which sub-metric will be listed
232
+ const char* pMetricName;
233
+ /// [out] number of submetrics supported
234
+ size_t numOfSubmetrics;
235
+ /// [out] list of submetrics supported for the metric.
236
+ const char** ppSubMetrics;
237
+ } CUpti_Profiler_Host_GetSubMetrics_Params;
238
+
239
+ #define CUpti_Profiler_Host_GetSubMetrics_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Host_GetSubMetrics_Params, ppSubMetrics)
240
+
241
+ /**
242
+ * \brief Get the list of supported sub-metrics for the metric.
243
+ *
244
+ * \param pParams A pointer to \ref CUpti_Profiler_Host_GetSubMetrics_Params
245
+ *
246
+ * \retval CUPTI_SUCCESS
247
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
248
+ * \retval CUPTI_ERROR_INVALID_METRIC_NAME if the metric name is not valid or not supported for the chip
249
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
250
+ */
251
+ CUptiResult CUPTIAPI cuptiProfilerHostGetSubMetrics(CUpti_Profiler_Host_GetSubMetrics_Params* pParams);
252
+
253
+ /**
254
+ * \brief Params for cuptiProfilerHostGetMetricProperties
255
+ */
256
+ typedef struct CUpti_Profiler_Host_GetMetricProperties_Params
257
+ {
258
+ /// [in] Size of the data structure.
259
+ size_t structSize;
260
+ /// [in] Assign to NULL
261
+ void* pPriv;
262
+ /// [in] reference to the profiler host object allocated by CUPTI in cuptiProfilerHostInitialize
263
+ CUpti_Profiler_Host_Object* pHostObject;
264
+ /// [in] metric name for which its properties will be listed
265
+ const char* pMetricName;
266
+ /// [out] a short description about the metric
267
+ const char* pDescription;
268
+ /// [out] associated hw unit for the metric
269
+ const char* pHwUnit;
270
+ /// [out] the dimension of the metric values
271
+ const char* pDimUnit;
272
+ /// [out] the metric type (counter, ratio or throughput)
273
+ CUpti_MetricType metricType;
274
+ } CUpti_Profiler_Host_GetMetricProperties_Params;
275
+
276
+ #define CUpti_Profiler_Host_GetMetricProperties_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Host_GetMetricProperties_Params, metricType)
277
+
278
+ /**
279
+ * \brief Get the properties of the metric.
280
+ *
281
+ * \param pParams A pointer to \ref CUpti_Profiler_Host_GetMetricProperties_Params
282
+ *
283
+ * \retval CUPTI_SUCCESS
284
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
285
+ * \retval CUPTI_ERROR_INVALID_METRIC_NAME if the metric name is not valid or not supported for the chip
286
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
287
+ */
288
+ CUptiResult CUPTIAPI cuptiProfilerHostGetMetricProperties(CUpti_Profiler_Host_GetMetricProperties_Params* pParams);
289
+
290
+ /**
291
+ * \brief Params for cuptiProfilerHostGetRangeName
292
+ */
293
+ typedef struct CUpti_Profiler_Host_GetRangeName_Params
294
+ {
295
+ /// [in] Size of the data structure.
296
+ size_t structSize;
297
+ /// [in] Assign to NULL
298
+ void* pPriv;
299
+ /// [in] the counter data image where profiling data has been decoded
300
+ const uint8_t* pCounterDataImage;
301
+ /// [in] size of counter data image
302
+ size_t counterDataImageSize;
303
+ /// [in] range index for which the range name will be queried
304
+ size_t rangeIndex;
305
+ /// [in] used in case of nested ranges, default="/". Range1<delimiter>Range2
306
+ const char* delimiter;
307
+ /// [out] the range name.
308
+ /// Note: that the CUPTI allocate the memory internal and
309
+ /// its user responsibility to free up the allocated memory
310
+ const char* pRangeName;
311
+ } CUpti_Profiler_Host_GetRangeName_Params;
312
+
313
+ #define CUpti_Profiler_Host_GetRangeName_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Host_GetRangeName_Params, pRangeName)
314
+
315
+ /**
316
+ * \brief Get the range name for the range index stored in the counter data.
317
+ * In Range profiler, for Auto range mode the range name will be numeric value
318
+ * assigned to the kernel based on execution order. For user range mode, the
319
+ * name of range will be based on the range name provided by the user using
320
+ * Push range API.
321
+ *
322
+ * \param pParams A pointer to \ref CUpti_Profiler_Host_GetRangeName_Params
323
+ *
324
+ * \retval CUPTI_SUCCESS
325
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
326
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
327
+ */
328
+ CUptiResult CUPTIAPI cuptiProfilerHostGetRangeName(CUpti_Profiler_Host_GetRangeName_Params* pParams);
329
+
330
+ /**
331
+ * \brief Params for cuptiProfilerHostEvaluateToGpuValues
332
+ */
333
+ typedef struct CUpti_Profiler_Host_EvaluateToGpuValues_Params
334
+ {
335
+ /// [in] Size of the data structure.
336
+ size_t structSize;
337
+ /// [in] Assign to NULL
338
+ void* pPriv;
339
+ /// [in] reference to the profiler host object allocated by CUPTI in cuptiProfilerHostInitialize
340
+ CUpti_Profiler_Host_Object* pHostObject;
341
+ /// [in] the counter data image where profiling data has been decoded
342
+ const uint8_t* pCounterDataImage;
343
+ /// [in] size of counter data image
344
+ size_t counterDataImageSize;
345
+ /// [in] range index for which the range name will be queried
346
+ size_t rangeIndex;
347
+ /// [in] the metrics for which GPU values will be evaluated for the range
348
+ const char** ppMetricNames;
349
+ /// [in] number of metrics
350
+ size_t numMetrics;
351
+ /// [out] output value for given metric and range index
352
+ double* pMetricValues;
353
+ } CUpti_Profiler_Host_EvaluateToGpuValues_Params;
354
+
355
+ #define CUpti_Profiler_Host_EvaluateToGpuValues_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Host_EvaluateToGpuValues_Params, pMetricValues)
356
+
357
+ /**
358
+ * \brief Evaluate the metric values for the range index stored in the counter data.
359
+ *
360
+ * \param pParams A pointer to \ref CUpti_Profiler_Host_EvaluateToGpuValues_Params
361
+ *
362
+ * \retval CUPTI_SUCCESS
363
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
364
+ * \retval CUPTI_ERROR_INVALID_METRIC_NAME if the metric name is not valid or not supported for the chip
365
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
366
+ */
367
+ CUptiResult CUPTIAPI cuptiProfilerHostEvaluateToGpuValues(CUpti_Profiler_Host_EvaluateToGpuValues_Params* pParams);
368
+
369
+ /**
370
+ * \brief Params for cuptiProfilerHostConfigAddMetrics
371
+ */
372
+ typedef struct CUpti_Profiler_Host_ConfigAddMetrics_Params
373
+ {
374
+ /// [in] Size of the data structure.
375
+ size_t structSize;
376
+ /// [in] Assign to NULL
377
+ void* pPriv;
378
+ /// [in] reference to the profiler host object allocated by CUPTI in cuptiProfilerHostInitialize
379
+ struct CUpti_Profiler_Host_Object* pHostObject;
380
+ /// [in] metric names for which config image will be generated
381
+ const char** ppMetricNames;
382
+ /// [in] number of metrics
383
+ size_t numMetrics;
384
+ } CUpti_Profiler_Host_ConfigAddMetrics_Params;
385
+
386
+ #define CUpti_Profiler_Host_ConfigAddMetrics_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Host_ConfigAddMetrics_Params, numMetrics)
387
+
388
+ /**
389
+ * \brief Add the metrics to the profiler host object for generating the config image.
390
+ * The config image will have the required information to schedule the metrics for
391
+ * collecting the profiling data.
392
+ * Note: PM sampling only supports single pass config image.
393
+ *
394
+ * \param pParams A pointer to \ref CUpti_Profiler_Host_ConfigAddMetrics_Params
395
+ *
396
+ * \retval CUPTI_SUCCESS
397
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
398
+ * \retval CUPTI_ERROR_INVALID_METRIC_NAME if the metric name is not valid or not supported for the chip
399
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
400
+ */
401
+ CUptiResult CUPTIAPI cuptiProfilerHostConfigAddMetrics(CUpti_Profiler_Host_ConfigAddMetrics_Params* pParams);
402
+
403
+ /**
404
+ * \brief Params for cuptiProfilerHostGetConfigImageSize
405
+ */
406
+ typedef struct CUpti_Profiler_Host_GetConfigImageSize_Params
407
+ {
408
+ /// [in] Size of the data structure.
409
+ size_t structSize;
410
+ /// [in] Assign to NULL
411
+ void* pPriv;
412
+ /// [in] reference to the profiler host object allocated by CUPTI in cuptiProfilerHostInitialize
413
+ CUpti_Profiler_Host_Object* pHostObject;
414
+ /// [out] the size of config image, users need to allocate the buffer for storing
415
+ size_t configImageSize;
416
+ } CUpti_Profiler_Host_GetConfigImageSize_Params;
417
+
418
+ #define CUpti_Profiler_Host_GetConfigImageSize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Host_GetConfigImageSize_Params, configImageSize)
419
+
420
+ /**
421
+ * \brief Get the size of the config image for the metrics added to the profiler host object.
422
+ * Users need to allocate the buffer for storing the config image.
423
+ *
424
+ * \param pParams A pointer to \ref CUpti_Profiler_Host_GetConfigImageSize_Params
425
+ *
426
+ * \retval CUPTI_SUCCESS
427
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
428
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
429
+ */
430
+ CUptiResult CUPTIAPI cuptiProfilerHostGetConfigImageSize(CUpti_Profiler_Host_GetConfigImageSize_Params* pParams);
431
+
432
+ /**
433
+ * \brief Params for cuptiProfilerHostGetConfigImage
434
+ */
435
+ typedef struct CUpti_Profiler_Host_GetConfigImage_Params
436
+ {
437
+ /// [in] Size of the data structure.
438
+ size_t structSize;
439
+ /// [in] Assign to NULL
440
+ void* pPriv;
441
+ /// [in] reference to the profiler host object allocated by CUPTI in cuptiProfilerHostInitialize
442
+ CUpti_Profiler_Host_Object* pHostObject;
443
+ /// [in] Number of bytes allocated for pBuffer
444
+ size_t configImageSize;
445
+ /// [out] Buffer receiving the config image
446
+ uint8_t* pConfigImage;
447
+ } CUpti_Profiler_Host_GetConfigImage_Params;
448
+
449
+ #define CUpti_Profiler_Host_GetConfigImage_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Host_GetConfigImage_Params, pConfigImage)
450
+
451
+ /**
452
+ * \brief Get the config image for the metrics added to the profiler host object.
453
+ * User will pass the allocated buffer to store the config image.
454
+ *
455
+ * \param pParams A pointer to \ref CUpti_Profiler_Host_GetConfigImage_Params
456
+ *
457
+ * \retval CUPTI_SUCCESS
458
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
459
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
460
+ */
461
+ CUptiResult CUPTIAPI cuptiProfilerHostGetConfigImage(CUpti_Profiler_Host_GetConfigImage_Params* pParams);
462
+
463
+ /**
464
+ * \brief Params for cuptiProfilerHostGetNumOfPasses
465
+ */
466
+ typedef struct CUpti_Profiler_Host_GetNumOfPasses_Params
467
+ {
468
+ /// [in] Size of the data structure.
469
+ size_t structSize;
470
+ /// [in] Assign to NULL
471
+ void* pPriv;
472
+ /// [in] Number of bytes allocated for pConfigImage
473
+ size_t configImageSize;
474
+ /// [in] the config image buffer
475
+ uint8_t* pConfigImage;
476
+ /// [out] number of passes required for profiling scheduled metrics in the config image
477
+ size_t numOfPasses;
478
+ } CUpti_Profiler_Host_GetNumOfPasses_Params;
479
+
480
+ #define CUpti_Profiler_Host_GetNumOfPasses_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Host_GetNumOfPasses_Params, numOfPasses)
481
+
482
+ /**
483
+ * \brief Get the number of passes required for profiling the scheduled metrics in the config image.
484
+ *
485
+ * \param pParams A pointer to \ref CUpti_Profiler_Host_GetNumOfPasses_Params
486
+ *
487
+ * \retval CUPTI_SUCCESS
488
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
489
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
490
+ */
491
+ CUptiResult CUPTIAPI cuptiProfilerHostGetNumOfPasses(CUpti_Profiler_Host_GetNumOfPasses_Params* pParams);
492
+
493
+ /**
494
+ * \brief Params for cuptiProfilerHostGetMaxNumHardwareMetricsPerPass
495
+ */
496
+ typedef struct CUpti_Profiler_Host_GetMaxNumHardwareMetricsPerPass_Params
497
+ {
498
+ /// [in] Size of the data structure.
499
+ size_t structSize;
500
+ /// [in] Assign to NULL
501
+ void* pPriv;
502
+ /// [in] the profiler kind one from CUpti_ProfilerType
503
+ CUpti_ProfilerType profilerType;
504
+ /// [in] accepted for chips supported at the time-of-release.
505
+ const char* pChipName;
506
+ /// [in] buffer with counter availability image - required for future chip support
507
+ uint8_t* pCounterAvailabilityImage;
508
+ /// [out] maximum number of metrics that can be scheduled in a pass
509
+ size_t maxMetricsPerPass;
510
+ } CUpti_Profiler_Host_GetMaxNumHardwareMetricsPerPass_Params;
511
+
512
+ #define CUpti_Profiler_Host_GetMaxNumHardwareMetricsPerPass_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Host_GetMaxNumHardwareMetricsPerPass_Params, maxMetricsPerPass)
513
+
514
+ /**
515
+ * \brief Get the maximum number of hardware metrics (metric names which doesn't include _sass_ keyword)
516
+ * that can be scheduled in a single pass for a chip. While this represents a theoretical upper limit,
517
+ * practical constraints may prevent reaching this threshold for a specific set of metrics. Furthermore,
518
+ * the maximum achievable value is contingent upon the characteristics and architecture of the chip in question.
519
+ *
520
+ * Use cuptiProfilerHostGetNumOfPasses API for getting the actual number of passes required for the
521
+ * for collecting the profiling data for the scheduled metrics in a config image.
522
+ *
523
+ * \param pParams A pointer to \ref CUpti_Profiler_Host_GetMaxNumHardwareMetricsPerPass_Params
524
+ *
525
+ * \retval CUPTI_SUCCESS
526
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
527
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
528
+ */
529
+ CUptiResult CUPTIAPI cuptiProfilerHostGetMaxNumHardwareMetricsPerPass(CUpti_Profiler_Host_GetMaxNumHardwareMetricsPerPass_Params* pParams);
530
+
531
+ /** @} */ /* END CUPTI_METRIC_API */
532
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
533
+ #pragma GCC visibility pop
534
+ #endif
535
+
536
+
537
+ #ifdef __cplusplus
538
+ } /* extern "C" */
539
+ #endif
540
+
541
+ #endif
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_profiler_target.h ADDED
@@ -0,0 +1,602 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2011-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_PROFILER_TARGET_H_)
51
+ #define _CUPTI_PROFILER_TARGET_H_
52
+
53
+ #include <cuda.h>
54
+ #include <cupti_result.h>
55
+ #include <stddef.h>
56
+ #include <stdint.h>
57
+
58
+ #ifdef __cplusplus
59
+ extern "C" {
60
+ #endif
61
+
62
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
63
+ #pragma GCC visibility push(default)
64
+ #endif
65
+
66
+ /**
67
+ * \defgroup CUPTI_PROFILER_API CUPTI Profiling API
68
+ * Functions, types, and enums that implement the CUPTI Profiling API.
69
+ * @{
70
+ */
71
+ #ifndef CUPTI_PROFILER_STRUCT_SIZE
72
+ #define CUPTI_PROFILER_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
73
+ #endif
74
+
75
+ /**
76
+ * \brief Profiler range attribute
77
+ *
78
+ * A metric enabled in the session's configuration is collected separately per unique range-stack in the pass.
79
+ * This is an attribute to collect metrics around each kernel in a profiling session or in an user defined range.
80
+ */
81
+ typedef enum
82
+ {
83
+ /**
84
+ * Invalid value
85
+ */
86
+ CUPTI_Range_INVALID,
87
+ /**
88
+ * Ranges are auto defined around each kernel in a profiling session
89
+ */
90
+ CUPTI_AutoRange,
91
+ /**
92
+ * A range in which metric data to be collected is defined by the user
93
+ */
94
+ CUPTI_UserRange,
95
+ /**
96
+ * Range count
97
+ */
98
+ CUPTI_Range_COUNT,
99
+ } CUpti_ProfilerRange;
100
+
101
+ /**
102
+ * \brief Profiler replay attribute
103
+ *
104
+ * For metrics which require multipass collection, a replay of the GPU kernel(s) is required.
105
+ * This is an attribute which specify how the replay of the kernel(s) to be measured is done.
106
+ */
107
+ typedef enum
108
+ {
109
+ /**
110
+ * Invalid Value
111
+ */
112
+ CUPTI_Replay_INVALID,
113
+ /**
114
+ * Replay is done by CUPTI user around the process
115
+ */
116
+ CUPTI_ApplicationReplay,
117
+ /**
118
+ * Replay is done around kernel implicitly by CUPTI
119
+ */
120
+ CUPTI_KernelReplay,
121
+ /**
122
+ * Replay is done by CUPTI user within a process
123
+ */
124
+ CUPTI_UserReplay,
125
+ /**
126
+ * Replay count
127
+ */
128
+ CUPTI_Replay_COUNT,
129
+ } CUpti_ProfilerReplayMode;
130
+
131
+ /**
132
+ * \brief Default parameter for cuptiProfilerInitialize
133
+ */
134
+ typedef struct CUpti_Profiler_Initialize_Params
135
+ {
136
+ size_t structSize; //!< [in] CUpti_Profiler_Initialize_Params_STRUCT_SIZE
137
+ void* pPriv; //!< [in] assign to NULL
138
+
139
+ } CUpti_Profiler_Initialize_Params;
140
+ #define CUpti_Profiler_Initialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Initialize_Params, pPriv)
141
+
142
+ /**
143
+ * \brief Default parameter for cuptiProfilerDeInitialize
144
+ */
145
+ typedef struct CUpti_Profiler_DeInitialize_Params
146
+ {
147
+ size_t structSize; //!< [in] CUpti_Profiler_DeInitialize_Params_STRUCT_SIZE
148
+ void* pPriv; //!< [in] assign to NULL
149
+
150
+ } CUpti_Profiler_DeInitialize_Params;
151
+ #define CUpti_Profiler_DeInitialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_DeInitialize_Params, pPriv)
152
+
153
+ /**
154
+ * \brief Initializes the profiler interface
155
+ *
156
+ * Loads the required libraries in the process address space.
157
+ * Sets up the hooks with the CUDA driver.
158
+ */
159
+ CUptiResult CUPTIAPI cuptiProfilerInitialize(CUpti_Profiler_Initialize_Params *pParams);
160
+
161
+ /**
162
+ * \brief DeInitializes the profiler interface
163
+ */
164
+ CUptiResult CUPTIAPI cuptiProfilerDeInitialize(CUpti_Profiler_DeInitialize_Params *pParams);
165
+
166
+ /**
167
+ * \brief Input parameter to define the counterDataImage
168
+ */
169
+ typedef struct CUpti_Profiler_CounterDataImageOptions
170
+ {
171
+ size_t structSize; //!< [in] CUpti_Profiler_CounterDataImageOptions_Params_STRUCT_SIZE
172
+ void* pPriv; //!< [in] assign to NULL
173
+
174
+ const uint8_t* pCounterDataPrefix; /**< [in] Address of CounterDataPrefix generated from NVPW_CounterDataBuilder_GetCounterDataPrefix().
175
+ Must be align(8).*/
176
+ size_t counterDataPrefixSize; //!< [in] Size of CounterDataPrefix generated from NVPW_CounterDataBuilder_GetCounterDataPrefix().
177
+ uint32_t maxNumRanges; //!< [in] Maximum number of ranges that can be profiled
178
+ uint32_t maxNumRangeTreeNodes; //!< [in] Maximum number of RangeTree nodes; must be >= maxNumRanges
179
+ uint32_t maxRangeNameLength; //!< [in] Maximum string length of each RangeName, including the trailing NULL character
180
+ } CUpti_Profiler_CounterDataImageOptions;
181
+ #define CUpti_Profiler_CounterDataImageOptions_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImageOptions, maxRangeNameLength)
182
+
183
+ /**
184
+ * \brief Params for cuptiProfilerCounterDataImageCalculateSize
185
+ */
186
+ typedef struct CUpti_Profiler_CounterDataImage_CalculateSize_Params
187
+ {
188
+ size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_CalculateSize_Params_STRUCT_SIZE
189
+ void* pPriv; //!< [in] assign to NULL
190
+
191
+ size_t sizeofCounterDataImageOptions; //!< [in] CUpti_Profiler_CounterDataImageOptions_STRUCT_SIZE
192
+ const CUpti_Profiler_CounterDataImageOptions* pOptions; //!< [in] Pointer to Counter Data Image Options
193
+ size_t counterDataImageSize; //!< [out]
194
+ } CUpti_Profiler_CounterDataImage_CalculateSize_Params;
195
+ #define CUpti_Profiler_CounterDataImage_CalculateSize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_CalculateSize_Params, counterDataImageSize)
196
+
197
+ /**
198
+ * \brief Params for cuptiProfilerCounterDataImageInitialize
199
+ */
200
+ typedef struct CUpti_Profiler_CounterDataImage_Initialize_Params
201
+ {
202
+ size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_Initialize_Params_STRUCT_SIZE
203
+ void* pPriv; //!< [in] assign to NULL
204
+
205
+ size_t sizeofCounterDataImageOptions; //!< [in] CUpti_Profiler_CounterDataImageOptions_STRUCT_SIZE
206
+ const CUpti_Profiler_CounterDataImageOptions* pOptions; //!< [in] Pointer to Counter Data Image Options
207
+ size_t counterDataImageSize; //!< [in] Size calculated from cuptiProfilerCounterDataImageCalculateSize
208
+ uint8_t* pCounterDataImage; //!< [in] The buffer to be initialized.
209
+ } CUpti_Profiler_CounterDataImage_Initialize_Params;
210
+ #define CUpti_Profiler_CounterDataImage_Initialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_Initialize_Params, pCounterDataImage)
211
+
212
+ /**
213
+ * \brief A CounterData image allocates space for values for each counter for each range.
214
+ *
215
+ * User borne the resposibility of managing the counterDataImage allocations.
216
+ * CounterDataPrefix contains meta data about the metrics that will be stored in counterDataImage.
217
+ * Use these APIs to calculate the allocation size and initialize counterData image.
218
+ */
219
+ CUptiResult CUPTIAPI cuptiProfilerCounterDataImageCalculateSize(CUpti_Profiler_CounterDataImage_CalculateSize_Params* pParams);
220
+ CUptiResult CUPTIAPI cuptiProfilerCounterDataImageInitialize(CUpti_Profiler_CounterDataImage_Initialize_Params* pParams);
221
+
222
+ /**
223
+ * \brief Params for cuptiProfilerCounterDataImageCalculateScratchBufferSize
224
+ */
225
+ typedef struct CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params
226
+ {
227
+ size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params_STRUCT_SIZE
228
+ void* pPriv; //!< [in] assign to NULL
229
+
230
+ size_t counterDataImageSize; //!< [in] size calculated from cuptiProfilerCounterDataImageCalculateSize
231
+ uint8_t* pCounterDataImage; //!< [in]
232
+ size_t counterDataScratchBufferSize; //!< [out]
233
+ } CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params;
234
+ #define CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params, counterDataScratchBufferSize)
235
+
236
+ /**
237
+ * \brief Params for cuptiProfilerCounterDataImageInitializeScratchBuffer
238
+ */
239
+ typedef struct CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params
240
+ {
241
+ size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params_STRUCT_SIZE
242
+ void* pPriv; //!< [in] assign to NULL
243
+
244
+ size_t counterDataImageSize; //!< [in] size calculated from cuptiProfilerCounterDataImageCalculateSize
245
+ uint8_t* pCounterDataImage; //!< [in]
246
+ size_t counterDataScratchBufferSize; //!< [in] size calculated using cuptiProfilerCounterDataImageCalculateScratchBufferSize
247
+ uint8_t* pCounterDataScratchBuffer; //!< [in] the scratch buffer to be initialized.
248
+ } CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params;
249
+ #define CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params, pCounterDataScratchBuffer)
250
+
251
+ /**
252
+ * \brief A temporary storage for CounterData image needed for internal operations
253
+ *
254
+ * Use these APIs to calculate the allocation size and initialize counterData image scratch buffer.
255
+ */
256
+ CUptiResult CUPTIAPI cuptiProfilerCounterDataImageCalculateScratchBufferSize(CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params* pParams);
257
+ CUptiResult CUPTIAPI cuptiProfilerCounterDataImageInitializeScratchBuffer(CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params* pParams);
258
+
259
+ /**
260
+ * \brief Params for cuptiProfilerBeginSession
261
+ */
262
+ typedef struct CUpti_Profiler_BeginSession_Params
263
+ {
264
+ size_t structSize; //!< [in] CUpti_Profiler_BeginSession_Params_STRUCT_SIZE
265
+ void* pPriv; //!< [in] assign to NULL
266
+
267
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
268
+ size_t counterDataImageSize; //!< [in] size calculated from cuptiProfilerCounterDataImageCalculateSize
269
+ uint8_t* pCounterDataImage; //!< [in] address of CounterDataImage
270
+ size_t counterDataScratchBufferSize; //!< [in] size calculated from cuptiProfilerCounterDataImageInitializeScratchBuffer
271
+ uint8_t* pCounterDataScratchBuffer; //!< [in] address of CounterDataImage scratch buffer
272
+ uint8_t bDumpCounterDataInFile; //!< [in] [optional]
273
+ const char* pCounterDataFilePath; //!< [in] [optional]
274
+ CUpti_ProfilerRange range; //!< [in] CUpti_ProfilerRange
275
+ CUpti_ProfilerReplayMode replayMode; //!< [in] CUpti_ProfilerReplayMode
276
+ /* Replay options, required when replay is done by cupti user */
277
+ size_t maxRangesPerPass; //!< [in] Maximum number of ranges that can be recorded in a single pass.
278
+ size_t maxLaunchesPerPass; //!< [in] Maximum number of kernel launches that can be recorded in a single pass; must be >= maxRangesPerPass.
279
+
280
+ } CUpti_Profiler_BeginSession_Params;
281
+ #define CUpti_Profiler_BeginSession_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_BeginSession_Params, maxLaunchesPerPass)
282
+ /**
283
+ * \brief Params for cuptiProfilerEndSession
284
+ */
285
+ typedef struct CUpti_Profiler_EndSession_Params
286
+ {
287
+ size_t structSize; //!< [in] CUpti_Profiler_EndSession_Params_STRUCT_SIZE
288
+ void* pPriv; //!< [in] assign to NULL
289
+
290
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
291
+ } CUpti_Profiler_EndSession_Params;
292
+ #define CUpti_Profiler_EndSession_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_EndSession_Params, ctx)
293
+
294
+ /**
295
+ * \brief Begin profiling session sets up the profiling on the device
296
+ *
297
+ * Although, it doesn't start the profiling but GPU resources needed for profiling are allocated.
298
+ * Outside of a session, the GPU will return to its normal operating state.
299
+ */
300
+ CUptiResult CUPTIAPI cuptiProfilerBeginSession(CUpti_Profiler_BeginSession_Params* pParams);
301
+ /**
302
+ * \brief Ends profiling session
303
+ *
304
+ * Frees up the GPU resources acquired for profiling.
305
+ * Outside of a session, the GPU will return to it's normal operating state.
306
+ */
307
+ CUptiResult CUPTIAPI cuptiProfilerEndSession(CUpti_Profiler_EndSession_Params* pParams);
308
+
309
+ /**
310
+ * \brief Params for cuptiProfilerSetConfig
311
+ */
312
+ typedef struct CUpti_Profiler_SetConfig_Params
313
+ {
314
+ size_t structSize; //!< [in] CUpti_Profiler_SetConfig_Params_STRUCT_SIZE
315
+ void* pPriv; //!< [in] assign to NULL
316
+
317
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
318
+ const uint8_t* pConfig; //!< [in] Config created by NVPW_RawMetricsConfig_GetConfigImage(). Must be align(8).
319
+ size_t configSize; //!< [in] size of config
320
+ uint16_t minNestingLevel; //!< [in] the lowest nesting level to be profiled; must be >= 1
321
+ uint16_t numNestingLevels; //!< [in] the number of nesting levels to profile; must be >= 1
322
+ size_t passIndex; //!< [in] Set this to zero for in-app replay; set this to the output of EndPass() for application replay
323
+ uint16_t targetNestingLevel; //!< [in] Set this to minNestingLevel for in-app replay; set this to the output of EndPass() for application
324
+ } CUpti_Profiler_SetConfig_Params;
325
+
326
+ #define CUpti_Profiler_SetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_SetConfig_Params, targetNestingLevel)
327
+
328
+ /**
329
+ * \brief Params for cuptiProfilerUnsetConfig
330
+ */
331
+ typedef struct CUpti_Profiler_UnsetConfig_Params
332
+ {
333
+ size_t structSize; //!< [in] CUpti_Profiler_UnsetConfig_Params_STRUCT_SIZE
334
+ void* pPriv; //!< [in] assign to NULL
335
+
336
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
337
+ } CUpti_Profiler_UnsetConfig_Params;
338
+ #define CUpti_Profiler_UnsetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_UnsetConfig_Params, ctx)
339
+
340
+ /**
341
+ * \brief Set metrics configuration to be profiled
342
+ *
343
+ * Use these APIs to set the config to profile in a session. It can be used for advanced cases such as where multiple
344
+ * configurations are collected into a single CounterData Image on the need basis, without restarting the session.
345
+ */
346
+ CUptiResult CUPTIAPI cuptiProfilerSetConfig(CUpti_Profiler_SetConfig_Params* pParams);
347
+ /**
348
+ * \brief Unset metrics configuration profiled
349
+ *
350
+ */
351
+ CUptiResult CUPTIAPI cuptiProfilerUnsetConfig(CUpti_Profiler_UnsetConfig_Params* pParams);
352
+
353
+ /**
354
+ * \brief Params for cuptiProfilerBeginPass
355
+ */
356
+ typedef struct CUpti_Profiler_BeginPass_Params
357
+ {
358
+ size_t structSize; //!< [in] CUpti_Profiler_BeginPass_Params_STRUCT_SIZE
359
+ void* pPriv; //!< [in] assign to NULL
360
+
361
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
362
+ } CUpti_Profiler_BeginPass_Params;
363
+ #define CUpti_Profiler_BeginPass_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_BeginPass_Params, ctx)
364
+
365
+ /**
366
+ * \brief Params for cuptiProfilerEndPass
367
+ */
368
+ typedef struct CUpti_Profiler_EndPass_Params
369
+ {
370
+ size_t structSize; //!< [in] CUpti_Profiler_EndPass_Params_STRUCT_SIZE
371
+ void* pPriv; //!< [in] assign to NULL
372
+
373
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
374
+ uint16_t targetNestingLevel; //! [out] The targetNestingLevel that will be collected by the *next* BeginPass.
375
+ size_t passIndex; //!< [out] The passIndex that will be collected by the *next* BeginPass
376
+ uint8_t allPassesSubmitted; //!< [out] becomes true when the last pass has been queued to the GPU
377
+ } CUpti_Profiler_EndPass_Params;
378
+ #define CUpti_Profiler_EndPass_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_EndPass_Params, allPassesSubmitted)
379
+
380
+ /**
381
+ * \brief Replay API: used for multipass collection.
382
+
383
+ * These APIs are used if user chooses to replay by itself \ref CUPTI_UserReplay or \ref CUPTI_ApplicationReplay
384
+ * for multipass collection of the metrics configurations.
385
+ * It's a no-op in case of \ref CUPTI_KernelReplay.
386
+ */
387
+ CUptiResult CUPTIAPI cuptiProfilerBeginPass(CUpti_Profiler_BeginPass_Params* pParams);
388
+
389
+ /**
390
+ * \brief Replay API: used for multipass collection.
391
+
392
+ * These APIs are used if user chooses to replay by itself \ref CUPTI_UserReplay or \ref CUPTI_ApplicationReplay
393
+ * for multipass collection of the metrics configurations.
394
+ * Its a no-op in case of \ref CUPTI_KernelReplay.
395
+ * Returns information for next pass.
396
+ */
397
+ CUptiResult CUPTIAPI cuptiProfilerEndPass(CUpti_Profiler_EndPass_Params* pParams);
398
+
399
+ /**
400
+ * \brief Params for cuptiProfilerEnableProfiling
401
+ */
402
+ typedef struct CUpti_Profiler_EnableProfiling_Params
403
+ {
404
+ size_t structSize; //!< [in] CUpti_Profiler_EnableProfiling_Params_STRUCT_SIZE
405
+ void* pPriv; //!< [in] assign to NULL
406
+
407
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
408
+ } CUpti_Profiler_EnableProfiling_Params;
409
+ #define CUpti_Profiler_EnableProfiling_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_EnableProfiling_Params, ctx)
410
+
411
+ /**
412
+ * \brief Params for cuptiProfilerDisableProfiling
413
+ */
414
+ typedef struct CUpti_Profiler_DisableProfiling_Params
415
+ {
416
+ size_t structSize; //!< [in] CUpti_Profiler_DisableProfiling_Params_STRUCT_SIZE
417
+ void* pPriv; //!< [in] assign to NULL
418
+
419
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
420
+ } CUpti_Profiler_DisableProfiling_Params;
421
+ #define CUpti_Profiler_DisableProfiling_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_DisableProfiling_Params, ctx)
422
+
423
+ /**
424
+ * \brief Enables Profiling
425
+ *
426
+ * In \ref CUPTI_AutoRange, these APIs are used to enable/disable profiling for the kernels to be executed in
427
+ * a profiling session.
428
+ */
429
+ CUptiResult CUPTIAPI cuptiProfilerEnableProfiling(CUpti_Profiler_EnableProfiling_Params* pParams);
430
+
431
+ /**
432
+ * \brief Disable Profiling
433
+ *
434
+ * In \ref CUPTI_AutoRange, these APIs are used to enable/disable profiling for the kernels to be executed in
435
+ * a profiling session.
436
+ */
437
+ CUptiResult CUPTIAPI cuptiProfilerDisableProfiling(CUpti_Profiler_DisableProfiling_Params* pParams);
438
+
439
+ /**
440
+ * \brief Params for cuptiProfilerIsPassCollected
441
+ */
442
+ typedef struct CUpti_Profiler_IsPassCollected_Params
443
+ {
444
+ size_t structSize; //!< [in] CUpti_Profiler_IsPassCollected_Params_STRUCT_SIZE
445
+ void* pPriv; //!< [in] assign to NULL
446
+
447
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
448
+ size_t numRangesDropped; //!< [out] number of ranges whose data was dropped in the processed pass
449
+ size_t numTraceBytesDropped; //!< [out] number of bytes not written to TraceBuffer due to buffer full
450
+ uint8_t onePassCollected; //!< [out] true if a pass was successfully decoded
451
+ uint8_t allPassesCollected; //!< [out] becomes true when the last pass has been decoded
452
+ } CUpti_Profiler_IsPassCollected_Params;
453
+ #define CUpti_Profiler_IsPassCollected_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_IsPassCollected_Params, allPassesCollected)
454
+
455
+ /**
456
+ * \brief Asynchronous call to query if the submitted pass to GPU is collected
457
+ *
458
+ */
459
+ CUptiResult CUPTIAPI cuptiProfilerIsPassCollected(CUpti_Profiler_IsPassCollected_Params* pParams);
460
+
461
+ /**
462
+ * \brief Params for cuptiProfilerFlushCounterData
463
+ */
464
+ typedef struct CUpti_Profiler_FlushCounterData_Params
465
+ {
466
+ size_t structSize; //!< [in] CUpti_Profiler_FlushCounterData_Params_STRUCT_SIZE
467
+ void* pPriv; //!< [in] assign to NULL
468
+
469
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
470
+ size_t numRangesDropped; //!< [out] number of ranges whose data was dropped in the processed passes
471
+ size_t numTraceBytesDropped; //!< [out] number of bytes not written to TraceBuffer due to buffer full
472
+ } CUpti_Profiler_FlushCounterData_Params;
473
+ #define CUpti_Profiler_FlushCounterData_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_FlushCounterData_Params, numTraceBytesDropped)
474
+
475
+ /**
476
+ * \brief Decode all the submitted passes
477
+ *
478
+ * Flush Counter data API to ensure every pass is decoded into the counterDataImage passed at beginSession.
479
+ * This will cause the CPU/GPU sync to collect all the undecoded pass.
480
+ */
481
+ CUptiResult CUPTIAPI cuptiProfilerFlushCounterData(CUpti_Profiler_FlushCounterData_Params* pParams);
482
+
483
+ typedef struct CUpti_Profiler_PushRange_Params
484
+ {
485
+ size_t structSize; //!< [in] CUpti_Profiler_PushRange_Params_STRUCT_SIZE
486
+ void* pPriv; //!< [in] assign to NULL
487
+
488
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
489
+ const char* pRangeName; //!< [in] specifies the range for subsequent launches; must not be NULL
490
+ size_t rangeNameLength; //!< [in] assign to strlen(pRangeName) if known; if set to zero, the library will call strlen()
491
+ } CUpti_Profiler_PushRange_Params;
492
+ #define CUpti_Profiler_PushRange_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_PushRange_Params, rangeNameLength)
493
+
494
+ typedef struct CUpti_Profiler_PopRange_Params
495
+ {
496
+ size_t structSize; //!< [in] CUpti_Profiler_PopRange_Params_STRUCT_SIZE
497
+ void* pPriv; //!< [in] assign to NULL
498
+
499
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
500
+ } CUpti_Profiler_PopRange_Params;
501
+ #define CUpti_Profiler_PopRange_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_PopRange_Params, ctx)
502
+
503
+
504
+ /**
505
+ * \brief Range API's : Push user range
506
+ *
507
+ * Counter data is collected per unique range-stack. Identified by a string label passsed by the user.
508
+ * It's an invalid operation in case of \ref CUPTI_AutoRange.
509
+ */
510
+ CUptiResult CUPTIAPI cuptiProfilerPushRange(CUpti_Profiler_PushRange_Params *pParams);
511
+
512
+ /**
513
+ * \brief Range API's : Pop user range
514
+ *
515
+ * Counter data is collected per unique range-stack. Identified by a string label passsed by the user.
516
+ * It's an invalid operation in case of \ref CUPTI_AutoRange.
517
+ */
518
+ CUptiResult CUPTIAPI cuptiProfilerPopRange(CUpti_Profiler_PopRange_Params *pParams);
519
+
520
+ /**
521
+ * \brief Params for cuptiProfilerGetCounterAvailability
522
+ */
523
+ typedef struct CUpti_Profiler_GetCounterAvailability_Params
524
+ {
525
+ size_t structSize; //!< [in] CUpti_Profiler_GetCounterAvailability_Params_STRUCT_SIZE
526
+ void* pPriv; //!< [in] assign to NULL
527
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
528
+ size_t counterAvailabilityImageSize; //!< [in/out] If `pCounterAvailabilityImage` is NULL, then the required size is returned in
529
+ //!< `counterAvailabilityImageSize`, otherwise `counterAvailabilityImageSize` should be set to the size of
530
+ //!< `pCounterAvailabilityImage`, and on return it would be overwritten with number of actual bytes copied
531
+ uint8_t* pCounterAvailabilityImage; //!< [in] buffer receiving counter availability image, may be NULL
532
+ } CUpti_Profiler_GetCounterAvailability_Params;
533
+ #define CUpti_Profiler_GetCounterAvailability_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_GetCounterAvailability_Params, pCounterAvailabilityImage)
534
+
535
+ /**
536
+ * \brief Query counter availibility
537
+ *
538
+ * Use this API to query counter availability information in a buffer which can be used to filter unavailable raw metrics on host.
539
+ * Note: This API may fail, if any profiling or sampling session is active on the specified context or its device.
540
+ */
541
+ CUptiResult CUPTIAPI cuptiProfilerGetCounterAvailability(CUpti_Profiler_GetCounterAvailability_Params *pParams);
542
+
543
+ /// Generic support level enum for CUPTI
544
+ typedef enum
545
+ {
546
+ CUPTI_PROFILER_CONFIGURATION_UNKNOWN = 0, //!< Configuration support level unknown - either detection code errored out before setting this value, or unable to determine it
547
+ CUPTI_PROFILER_CONFIGURATION_UNSUPPORTED, //!< Profiling is unavailable. For specific feature fields, this means that the current configuration of this feature does not work with profiling. For instance, SLI-enabled devices do not support profiling, and this value would be returned for SLI on an SLI-enabled device.
548
+ CUPTI_PROFILER_CONFIGURATION_DISABLED, //!< Profiling would be available for this configuration, but was disabled by the system
549
+ CUPTI_PROFILER_CONFIGURATION_SUPPORTED //!< Profiling is supported. For specific feature fields, this means that the current configuration of this feature works with profiling. For instance, SLI-enabled devices do not support profiling, and this value would only be returned for devices which are not SLI-enabled.
550
+ } CUpti_Profiler_Support_Level;
551
+
552
+ /**
553
+ * \brief Profiler API types
554
+ */
555
+ typedef enum
556
+ {
557
+ CUPTI_PROFILER_RANGE_PROFILING = 0, //!< CUPTI APIs for range based profiling (cuptiProfiler*)
558
+ CUPTI_PROFILER_PC_SAMPLING, //!< CUPTI APIs collecting pc sampling data (cuptiPcSampling*)
559
+ CUPTI_PROFILER_SASS_METRICS, //!< CUPTI APIs collecting SASS metrics data (cuptiSassMetrics*)
560
+ CUPTI_PROFILER_PM_SAMPLING, //!< CUPTI APIs collecting PM Sampling data (cuptiPmSampling*)
561
+ CUPTI_PROFILER_UNKNOWN
562
+ } CUpti_Profiler_API;
563
+
564
+ /**
565
+ * \brief Params for cuptiProfilerDeviceSupported
566
+ */
567
+ typedef struct
568
+ {
569
+ size_t structSize; //!< [in] Must be CUpti_Profiler_DeviceSupported_Params_STRUCT_SIZE
570
+ void *pPriv; //!< [in] assign to NULL
571
+ CUdevice cuDevice; //!< [in] if NULL, the current CUcontext is used
572
+
573
+ CUpti_Profiler_Support_Level isSupported; //!< [out] overall SUPPORTED / UNSUPPORTED flag representing whether Profiling and PC Sampling APIs work on the given device and configuration. SUPPORTED if all following flags are SUPPORTED, UNSUPPORTED otherwise.
574
+
575
+ CUpti_Profiler_Support_Level architecture; //!< [out] SUPPORTED if the device architecture level supports the Profiling API (Compute Capability >= 7.0), UNSUPPORTED otherwise
576
+ CUpti_Profiler_Support_Level sli; //!< [out] SUPPORTED if SLI is not enabled, UNSUPPORTED otherwise
577
+ CUpti_Profiler_Support_Level vGpu; //!< [out] SUPPORTED if vGPU is supported and profiling is enabled, DISABLED if profiling is supported but not enabled, UNSUPPORTED otherwise
578
+ CUpti_Profiler_Support_Level confidentialCompute; //!< [out] SUPPORTED if confidential compute is not enabled, UNSUPPORTED otherwise
579
+ CUpti_Profiler_Support_Level cmp; //!< [out] SUPPORTED if not NVIDIA Crypto Mining Processors (CMP), UNSUPPORTED otherwise
580
+ CUpti_Profiler_Support_Level wsl; //!< [out] SUPPORTED if WSL supported, UNSUPPORTED otherwise
581
+ CUpti_Profiler_API api; //!< [in] the CUPTI API type for which device support will be checked
582
+ } CUpti_Profiler_DeviceSupported_Params;
583
+ #define CUpti_Profiler_DeviceSupported_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_DeviceSupported_Params, api)
584
+
585
+ /**
586
+ * \brief Query device compatibility with Profiling API
587
+ *
588
+ * Use this call to determine whether a compute device and configuration are compatible with the Profiling API.
589
+ * If the configuration does not support profiling, one of several flags will indicate why.
590
+ */
591
+ CUptiResult CUPTIAPI cuptiProfilerDeviceSupported(CUpti_Profiler_DeviceSupported_Params *pParams);
592
+
593
+ /** @} */ /* END CUPTI_METRIC_API */
594
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
595
+ #pragma GCC visibility pop
596
+ #endif
597
+
598
+ #ifdef __cplusplus
599
+ } /* extern "C" */
600
+ #endif
601
+
602
+ #endif /*_CUPTI_PROFILER_TARGET_H_*/
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_range_profiler.h ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2024 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_RANGE_PROFILER_H_)
51
+ #define _CUPTI_RANGE_PROFILER_H_
52
+
53
+ #include <cuda.h>
54
+ #include <cupti_result.h>
55
+ #include <cupti_profiler_target.h>
56
+ #include <stddef.h>
57
+ #include <stdint.h>
58
+
59
+ #ifdef __cplusplus
60
+ extern "C" {
61
+ #endif
62
+
63
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
64
+ #pragma GCC visibility push(default)
65
+ #endif
66
+
67
+ /**
68
+ * \defgroup CUPTI_RANGE_PROFILER_API CUPTI Range Profiling API
69
+ * Functions, types, and enums that implement the CUPTI Range Profiling API.
70
+ * @{
71
+ */
72
+ #ifndef CUPTI_PROFILER_STRUCT_SIZE
73
+ #define CUPTI_PROFILER_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
74
+ #endif
75
+
76
+
77
+ typedef struct CUpti_RangeProfiler_Object CUpti_RangeProfiler_Object;
78
+
79
+ /**
80
+ * \brief Params for cuptiRangeProfilerSetConfig
81
+ */
82
+ typedef struct CUpti_RangeProfiler_SetConfig_Params
83
+ {
84
+ /// [in] Size of the data structure.
85
+ size_t structSize;
86
+ /// [in] Set to NULL.
87
+ void* pPriv;
88
+ /// [in] Range Profiler Object.
89
+ CUpti_RangeProfiler_Object* pRangeProfilerObject;
90
+ /// [in] Size of the config image.
91
+ size_t configSize;
92
+ /// [in] Config image.
93
+ const uint8_t* pConfig;
94
+ /// [in] Size of the counter data image.
95
+ size_t counterDataImageSize;
96
+ /// [in] Counter data image.
97
+ uint8_t* pCounterDataImage;
98
+ /// [in] Profiling Range mode.
99
+ CUpti_ProfilerRange range;
100
+ /// [in] Replay mode.
101
+ CUpti_ProfilerReplayMode replayMode;
102
+ /// [in] Maximum number of ranges that can be profiled in a pass.
103
+ size_t maxRangesPerPass;
104
+ /// [in] number of nesting level to be profiled. For Auto range mode, this should be set to 1.
105
+ uint16_t numNestingLevels;
106
+ /// [in] minimum nesting level to be profiled.
107
+ uint16_t minNestingLevel;
108
+ /// [in] Pass index for the replay session.
109
+ size_t passIndex;
110
+ /// [in] Target nesting level for the replay session.
111
+ uint16_t targetNestingLevel;
112
+ } CUpti_RangeProfiler_SetConfig_Params;
113
+
114
+ #define CUpti_RangeProfiler_SetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_RangeProfiler_SetConfig_Params, targetNestingLevel)
115
+
116
+ /**
117
+ * \brief Set the configuration for range profiler like maximum number of ranges per pass, number of nesting levels,
118
+ * range and replay mode and the config image which has scheduling info for metric collection.
119
+ *
120
+ * \param pParams A pointer to \ref CUpti_RangeProfiler_SetConfig_Params
121
+ *
122
+ * \retval CUPTI_SUCCESS
123
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
124
+ */
125
+ CUptiResult CUPTIAPI cuptiRangeProfilerSetConfig(CUpti_RangeProfiler_SetConfig_Params* pParams);
126
+
127
+ /**
128
+ * \brief Params for cuptiRangeProfilerEnable
129
+ */
130
+ typedef struct CUpti_RangeProfiler_Enable_Params
131
+ {
132
+ /// [in] Size of the data structure.
133
+ size_t structSize;
134
+ /// [in] Set to NULL.
135
+ void* pPriv;
136
+ /// [in] Context to be used for profiling.
137
+ CUcontext ctx;
138
+ /// [out] Range Profiler Object.
139
+ CUpti_RangeProfiler_Object* pRangeProfilerObject;
140
+ } CUpti_RangeProfiler_Enable_Params;
141
+ #define CUpti_RangeProfiler_Enable_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_RangeProfiler_Enable_Params, pRangeProfilerObject)
142
+
143
+ /**
144
+ * \brief Create a range profiler object and enable range profiling on the CUDA context.
145
+ *
146
+ * \param pParams A pointer to \ref CUpti_RangeProfiler_Enable_Params
147
+ *
148
+ * \retval CUPTI_SUCCESS
149
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
150
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY if memory allocation fails while creating the PM sampling object
151
+ * \retval CUPTI_ERROR_INSUFFICIENT_PRIVILEGES if the user does not have sufficient privileges to perform the operation
152
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
153
+ */
154
+ CUptiResult CUPTIAPI cuptiRangeProfilerEnable(CUpti_RangeProfiler_Enable_Params* pParams);
155
+
156
+ /**
157
+ * \brief Params for cuptiRangeProfilerDisable
158
+ */
159
+ typedef struct CUpti_RangeProfiler_Disable_Params
160
+ {
161
+ /// [in] Size of the data structure.
162
+ size_t structSize;
163
+ /// [in] Set to NULL.
164
+ void* pPriv;
165
+ /// [in] Range Profiler Object.
166
+ CUpti_RangeProfiler_Object* pRangeProfilerObject;
167
+ } CUpti_RangeProfiler_Disable_Params;
168
+ #define CUpti_RangeProfiler_Disable_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_RangeProfiler_Disable_Params, pRangeProfilerObject)
169
+
170
+ /**
171
+ * \brief Disable the range profiler on the CUDA context and destroy the range profiler object.
172
+ *
173
+ * \param pParams A pointer to \ref CUpti_RangeProfiler_Disable_Params
174
+ *
175
+ * \retval CUPTI_SUCCESS
176
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
177
+ */
178
+ CUptiResult CUPTIAPI cuptiRangeProfilerDisable(CUpti_RangeProfiler_Disable_Params* pParams);
179
+
180
+ /**
181
+ * \brief Params for cuptiRangeProfilerStart
182
+ */
183
+ typedef struct CUpti_RangeProfiler_Start_Params
184
+ {
185
+ /// [in] Size of the data structure.
186
+ size_t structSize;
187
+ /// [in] Set to NULL.
188
+ void* pPriv;
189
+ /// [in] Range Profiler Object.
190
+ CUpti_RangeProfiler_Object* pRangeProfilerObject;
191
+ } CUpti_RangeProfiler_Start_Params;
192
+ #define CUpti_RangeProfiler_Start_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_RangeProfiler_Start_Params, pRangeProfilerObject)
193
+
194
+ /**
195
+ * \brief Start the range profiler.
196
+ *
197
+ * \param pParams A pointer to \ref CUpti_RangeProfiler_Start_Params
198
+ *
199
+ * \retval CUPTI_SUCCESS
200
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
201
+ * \retval CUPTI_ERROR_INVALID_OPERATION if range profiler Start is called without enabling range profiler
202
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
203
+ */
204
+ CUptiResult CUPTIAPI cuptiRangeProfilerStart(CUpti_RangeProfiler_Start_Params* pParams);
205
+
206
+ /**
207
+ * \brief Params for cuptiRangeProfilerStop
208
+ */
209
+ typedef struct CUpti_RangeProfiler_Stop_Params
210
+ {
211
+ /// [in] Size of the data structure.
212
+ size_t structSize;
213
+ /// [in] Set to NULL.
214
+ void* pPriv;
215
+ /// [in] Range Profiler Object.
216
+ CUpti_RangeProfiler_Object* pRangeProfilerObject;
217
+ /// [out] pass index for the replay session.
218
+ size_t passIndex;
219
+ /// [out] target nesting level for the replay session.
220
+ size_t targetNestingLevel;
221
+ /// [out] 1 if all passes are submitted to GPU for collection, 0 otherwise.
222
+ uint8_t isAllPassSubmitted;
223
+ } CUpti_RangeProfiler_Stop_Params;
224
+ #define CUpti_RangeProfiler_Stop_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_RangeProfiler_Stop_Params, isAllPassSubmitted)
225
+
226
+ /**
227
+ * \brief Stop the range profiler.
228
+ *
229
+ * \param pParams A pointer to \ref CUpti_RangeProfiler_Stop_Params
230
+ *
231
+ * \retval CUPTI_SUCCESS
232
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
233
+ * \retval CUPTI_ERROR_INVALID_OPERATION if range profiler Stop is called without enabling range profiler
234
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
235
+ */
236
+ CUptiResult CUPTIAPI cuptiRangeProfilerStop(CUpti_RangeProfiler_Stop_Params* pParams);
237
+
238
+ /**
239
+ * \brief Params for cuptiRangeProfilerPushRange
240
+ */
241
+ typedef struct CUpti_RangeProfiler_PushRange_Params
242
+ {
243
+ /// [in] Size of the data structure.
244
+ size_t structSize;
245
+ /// [in] Set to NULL.
246
+ void* pPriv;
247
+ /// [in] Range Profiler Object.
248
+ CUpti_RangeProfiler_Object* pRangeProfilerObject;
249
+ /// [in] Name of the range to be profiled (only valid for User range mode).
250
+ const char* pRangeName;
251
+ } CUpti_RangeProfiler_PushRange_Params;
252
+ #define CUpti_RangeProfiler_PushRange_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_RangeProfiler_PushRange_Params, pRangeName)
253
+
254
+ /**
255
+ * \brief Add a new range to the Range Profiler with a given range name.
256
+ * For nested ranges, this API should be called again for the innermost range. For profiling the nested
257
+ * range, users need to set the values for minNestingLevel and numNestingLevels in the SetConfig API.
258
+ *
259
+ * \param pParams A pointer to \ref CUpti_RangeProfiler_PushRange_Params
260
+ *
261
+ * \retval CUPTI_SUCCESS
262
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
263
+ * \retval CUPTI_ERROR_INVALID_OPERATION if range profiler PushRange is called without enabling range profiler
264
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
265
+ */
266
+ CUptiResult CUPTIAPI cuptiRangeProfilerPushRange(CUpti_RangeProfiler_PushRange_Params* pParams);
267
+
268
+ /**
269
+ * \brief Params for cuptiRangeProfilerPopRange
270
+ */
271
+ typedef struct CUpti_RangeProfiler_PopRange_Params
272
+ {
273
+ /// [in] Size of the data structure.
274
+ size_t structSize;
275
+ /// [in] Set to NULL.
276
+ void* pPriv;
277
+ /// [in] Range Profiler Object.
278
+ CUpti_RangeProfiler_Object* pRangeProfilerObject;
279
+ } CUpti_RangeProfiler_PopRange_Params;
280
+ #define CUpti_RangeProfiler_PopRange_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_RangeProfiler_PopRange_Params, pRangeProfilerObject)
281
+
282
+ /**
283
+ * \brief pop the current range to the Range Profiler.
284
+ * The number of pop range API call should be same as number of push ranges in the same order.
285
+ *
286
+ * \param pParams A pointer to \ref CUpti_RangeProfiler_PopRange_Params
287
+ *
288
+ * \retval CUPTI_SUCCESS
289
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
290
+ * \retval CUPTI_ERROR_INVALID_OPERATION if range profiler PopRange is called without enabling range profiler
291
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
292
+ */
293
+ CUptiResult CUPTIAPI cuptiRangeProfilerPopRange(CUpti_RangeProfiler_PopRange_Params* pParams);
294
+
295
+ /**
296
+ * \brief Params for cuptiRangeProfilerDecodeData
297
+ */
298
+ typedef struct CUpti_RangeProfiler_DecodeData_Params
299
+ {
300
+ /// [in] Size of the data structure.
301
+ size_t structSize;
302
+ /// [in] Set to NULL.
303
+ void* pPriv;
304
+ /// [in] Range Profiler Object.
305
+ CUpti_RangeProfiler_Object* pRangeProfilerObject;
306
+ /// [out] Number of ranges dropped in the processed passes.
307
+ size_t numOfRangeDropped;
308
+ } CUpti_RangeProfiler_DecodeData_Params;
309
+ #define CUpti_RangeProfiler_DecodeData_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_RangeProfiler_DecodeData_Params, numOfRangeDropped)
310
+
311
+ /**
312
+ * \brief Decode the profiling data stored in the hardware to the counter data image passed in the
313
+ * SetConfig API. This API should be called after cuptiRangeProfilerStop. The counter data image
314
+ * will be updated with the profiling data for the ranges profiled.
315
+ *
316
+ * For the cases where the number of ranges counter data image can store is less than the number of ranges
317
+ * profiled (= maxRangesPerPass in SetConfig API), the counter data image will report dropped ranges.
318
+ *
319
+ * \param pParams A pointer to \ref CUpti_RangeProfiler_DecodeData_Params
320
+ *
321
+ * \retval CUPTI_SUCCESS
322
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
323
+ * \retval CUPTI_ERROR_INVALID_OPERATION if range profiler DecodeData is called without enabling range profiler
324
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
325
+ */
326
+ CUptiResult CUPTIAPI cuptiRangeProfilerDecodeData(CUpti_RangeProfiler_DecodeData_Params* pParams);
327
+
328
+ /**
329
+ * \brief Params for cuptiRangeProfilerGetCounterDataSize
330
+ */
331
+ typedef struct CUpti_RangeProfiler_GetCounterDataSize_Params
332
+ {
333
+ /// [in] Size of the data structure.
334
+ size_t structSize;
335
+ /// [in] Set to NULL.
336
+ void* pPriv;
337
+ /// [in] Periodic sampler object.
338
+ CUpti_RangeProfiler_Object* pRangeProfilerObject;
339
+ /// [in] Names of the metrics to be collected.
340
+ const char** pMetricNames;
341
+ /// [in] Number of metrics to be collected.
342
+ size_t numMetrics;
343
+ /// [in] Maximum number of ranges to be stored in the counter data image.
344
+ size_t maxNumOfRanges;
345
+ /// [in] Maximum number of RangeTree nodes; must be >= maxNumOfRanges
346
+ uint32_t maxNumRangeTreeNodes;
347
+ /// [out] Size of the counter data image.
348
+ size_t counterDataSize;
349
+ } CUpti_RangeProfiler_GetCounterDataSize_Params;
350
+ #define CUpti_RangeProfiler_GetCounterDataSize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_RangeProfiler_GetCounterDataSize_Params, counterDataSize)
351
+
352
+ /**
353
+ * \brief Get the size of the counter data image required to store the profiling data for the ranges profiled.
354
+ *
355
+ * \param pParams A pointer to \ref CUpti_RangeProfiler_GetCounterDataSize_Params
356
+ *
357
+ * \retval CUPTI_SUCCESS
358
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
359
+ * \retval CUPTI_ERROR_INVALID_OPERATION if range profiler GetCounterDataSize is called without enabling range profiler
360
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
361
+ */
362
+ CUptiResult CUPTIAPI cuptiRangeProfilerGetCounterDataSize(CUpti_RangeProfiler_GetCounterDataSize_Params* pParams);
363
+
364
+ /**
365
+ * \brief Params for cuptiRangeProfilerCounterDataImageInitialize
366
+ */
367
+ typedef struct CUpti_RangeProfiler_CounterDataImage_Initialize_Params
368
+ {
369
+ /// [in] Size of the data structure.
370
+ size_t structSize;
371
+ /// [in] Set to NULL.
372
+ void* pPriv;
373
+ /// [in] Periodic sampler object.
374
+ CUpti_RangeProfiler_Object* pRangeProfilerObject;
375
+ /// [in] Size of the counter data image.
376
+ size_t counterDataSize;
377
+ /// [in] Counter data image.
378
+ uint8_t* pCounterData;
379
+ } CUpti_RangeProfiler_CounterDataImage_Initialize_Params;
380
+ #define CUpti_RangeProfiler_CounterDataImage_Initialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_RangeProfiler_CounterDataImage_Initialize_Params, pCounterData)
381
+
382
+ /**
383
+ * \brief Initialize the counter data image with the profiling data for the ranges profiled.
384
+ *
385
+ * \param pParams A pointer to \ref CUpti_RangeProfiler_CounterDataImage_Initialize_Params
386
+ *
387
+ * \retval CUPTI_SUCCESS
388
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
389
+ * \retval CUPTI_ERROR_INVALID_OPERATION if range profiler CounterDataImageInitialize is called without enabling range profiler
390
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
391
+ */
392
+ CUptiResult CUPTIAPI cuptiRangeProfilerCounterDataImageInitialize(CUpti_RangeProfiler_CounterDataImage_Initialize_Params* pParams);
393
+
394
+ /**
395
+ * \brief Params for cuptiRangeProfilerGetCounterDataInfo
396
+ */
397
+ typedef struct CUpti_RangeProfiler_GetCounterDataInfo_Params
398
+ {
399
+ /// [in] Size of the data structure.
400
+ size_t structSize;
401
+ /// [in] Set to NULL.
402
+ void* pPriv;
403
+ /// [in] Counter data image.
404
+ const uint8_t* pCounterDataImage;
405
+ /// [in] Size of the counter data image.
406
+ size_t counterDataImageSize;
407
+ /// [out] Number of ranges in the counter data image.
408
+ size_t numTotalRanges;
409
+ } CUpti_RangeProfiler_GetCounterDataInfo_Params;
410
+ #define CUpti_RangeProfiler_GetCounterDataInfo_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_RangeProfiler_GetCounterDataInfo_Params, numTotalRanges)
411
+
412
+ /**
413
+ * \brief Get the number of ranges stored in the counter data image.
414
+ *
415
+ * \param pParams A pointer to \ref CUpti_RangeProfiler_GetCounterDataInfo_Params
416
+ *
417
+ * \retval CUPTI_SUCCESS
418
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
419
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
420
+ */
421
+ CUptiResult CUPTIAPI cuptiRangeProfilerGetCounterDataInfo(CUpti_RangeProfiler_GetCounterDataInfo_Params* pParams);
422
+
423
+ /**
424
+ * \brief Params for cuptiRangeProfilerCounterDataGetRangeInfo
425
+ */
426
+ typedef struct CUpti_RangeProfiler_CounterData_GetRangeInfo_Params
427
+ {
428
+ /// [in] Size of the data structure.
429
+ size_t structSize;
430
+ /// [in] Set to NULL.
431
+ void* pPriv;
432
+ /// [in] Counter data image.
433
+ const uint8_t* pCounterDataImage;
434
+ /// [in] Size of the counter data image.
435
+ size_t counterDataImageSize;
436
+ /// [in] Index of the sample.
437
+ size_t rangeIndex;
438
+ /// [in] range delimiter.
439
+ const char* rangeDelimiter;
440
+ /// [out] RangeName;
441
+ const char* rangeName;
442
+ } CUpti_RangeProfiler_CounterData_GetRangeInfo_Params;
443
+ #define CUpti_RangeProfiler_CounterData_GetRangeInfo_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_RangeProfiler_CounterData_GetRangeInfo_Params, rangeName)
444
+
445
+ /**
446
+ * \brief Get the range name for the given range index.
447
+ *
448
+ * \param pParams A pointer to \ref CUpti_RangeProfiler_CounterData_GetRangeInfo_Params
449
+ *
450
+ * \retval CUPTI_SUCCESS
451
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
452
+ * \retval CUPTI_ERROR_UNKNOWN for any internal error
453
+ */
454
+ CUptiResult CUPTIAPI cuptiRangeProfilerCounterDataGetRangeInfo(CUpti_RangeProfiler_CounterData_GetRangeInfo_Params* pParams);
455
+
456
+
457
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
458
+ #pragma GCC visibility pop
459
+ #endif
460
+
461
+ #ifdef __cplusplus
462
+ } /* extern "C" */
463
+ #endif
464
+
465
+ #endif /*_CUPTI_RANGE_PROFILER_H_*/
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_result.h ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_RESULT_H_)
51
+ #define _CUPTI_RESULT_H_
52
+
53
+ #ifndef CUPTIAPI
54
+ #ifdef _WIN32
55
+ #define CUPTIAPI __stdcall
56
+ #else
57
+ #define CUPTIAPI
58
+ #endif
59
+ #endif
60
+
61
+ #if defined(__cplusplus)
62
+ extern "C" {
63
+ #endif
64
+
65
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
66
+ #pragma GCC visibility push(default)
67
+ #endif
68
+
69
+ /**
70
+ * \defgroup CUPTI_RESULT_API CUPTI Result Codes
71
+ * Error and result codes returned by CUPTI functions.
72
+ * @{
73
+ */
74
+
75
+ /**
76
+ * \brief CUPTI result codes.
77
+ *
78
+ * Error and result codes returned by CUPTI functions.
79
+ */
80
+ typedef enum {
81
+ /**
82
+ * No error.
83
+ */
84
+ CUPTI_SUCCESS = 0,
85
+ /**
86
+ * One or more of the parameters is invalid.
87
+ */
88
+ CUPTI_ERROR_INVALID_PARAMETER = 1,
89
+ /**
90
+ * The device does not correspond to a valid CUDA device.
91
+ */
92
+ CUPTI_ERROR_INVALID_DEVICE = 2,
93
+ /**
94
+ * The context is NULL or not valid.
95
+ */
96
+ CUPTI_ERROR_INVALID_CONTEXT = 3,
97
+ /**
98
+ * The event domain id is invalid.
99
+ */
100
+ CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID = 4,
101
+ /**
102
+ * The event id is invalid.
103
+ */
104
+ CUPTI_ERROR_INVALID_EVENT_ID = 5,
105
+ /**
106
+ * The event name is invalid.
107
+ */
108
+ CUPTI_ERROR_INVALID_EVENT_NAME = 6,
109
+ /**
110
+ * The current operation cannot be performed due to dependency on
111
+ * other factors.
112
+ */
113
+ CUPTI_ERROR_INVALID_OPERATION = 7,
114
+ /**
115
+ * Unable to allocate enough memory to perform the requested
116
+ * operation.
117
+ */
118
+ CUPTI_ERROR_OUT_OF_MEMORY = 8,
119
+ /**
120
+ * An error occurred on the performance monitoring hardware.
121
+ */
122
+ CUPTI_ERROR_HARDWARE = 9,
123
+ /**
124
+ * The output buffer size is not sufficient to return all
125
+ * requested data.
126
+ */
127
+ CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT = 10,
128
+ /**
129
+ * API is not implemented.
130
+ */
131
+ CUPTI_ERROR_API_NOT_IMPLEMENTED = 11,
132
+ /**
133
+ * The maximum limit is reached.
134
+ */
135
+ CUPTI_ERROR_MAX_LIMIT_REACHED = 12,
136
+ /**
137
+ * The object is not yet ready to perform the requested operation.
138
+ */
139
+ CUPTI_ERROR_NOT_READY = 13,
140
+ /**
141
+ * The current operation is not compatible with the current state
142
+ * of the object
143
+ */
144
+ CUPTI_ERROR_NOT_COMPATIBLE = 14,
145
+ /**
146
+ * CUPTI is unable to initialize its connection to the CUDA
147
+ * driver.
148
+ */
149
+ CUPTI_ERROR_NOT_INITIALIZED = 15,
150
+ /**
151
+ * The metric id is invalid.
152
+ */
153
+ CUPTI_ERROR_INVALID_METRIC_ID = 16,
154
+ /**
155
+ * The metric name is invalid.
156
+ */
157
+ CUPTI_ERROR_INVALID_METRIC_NAME = 17,
158
+ /**
159
+ * The queue is empty.
160
+ */
161
+ CUPTI_ERROR_QUEUE_EMPTY = 18,
162
+ /**
163
+ * Invalid handle (internal?).
164
+ */
165
+ CUPTI_ERROR_INVALID_HANDLE = 19,
166
+ /**
167
+ * Invalid stream.
168
+ */
169
+ CUPTI_ERROR_INVALID_STREAM = 20,
170
+ /**
171
+ * Invalid kind.
172
+ */
173
+ CUPTI_ERROR_INVALID_KIND = 21,
174
+ /**
175
+ * Invalid event value.
176
+ */
177
+ CUPTI_ERROR_INVALID_EVENT_VALUE = 22,
178
+ /**
179
+ * CUPTI is disabled due to conflicts with other enabled profilers
180
+ */
181
+ CUPTI_ERROR_DISABLED = 23,
182
+ /**
183
+ * Invalid module.
184
+ */
185
+ CUPTI_ERROR_INVALID_MODULE = 24,
186
+ /**
187
+ * Invalid metric value.
188
+ */
189
+ CUPTI_ERROR_INVALID_METRIC_VALUE = 25,
190
+ /**
191
+ * The performance monitoring hardware is in use by other client.
192
+ */
193
+ CUPTI_ERROR_HARDWARE_BUSY = 26,
194
+ /**
195
+ * The attempted operation is not supported on the current
196
+ * system or device.
197
+ */
198
+ CUPTI_ERROR_NOT_SUPPORTED = 27,
199
+ /**
200
+ * Unified memory profiling is not supported on the system.
201
+ * Potential reason could be unsupported OS or architecture.
202
+ */
203
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED = 28,
204
+ /**
205
+ * Unified memory profiling is not supported on the device
206
+ */
207
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_DEVICE = 29,
208
+ /**
209
+ * Unified memory profiling is not supported on a multi-GPU
210
+ * configuration without P2P support between any pair of devices
211
+ */
212
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_NON_P2P_DEVICES = 30,
213
+ /**
214
+ * Unified memory profiling is not supported under the
215
+ * Multi-Process Service (MPS) environment. CUDA 7.5 removes this
216
+ * restriction.
217
+ */
218
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_WITH_MPS = 31,
219
+ /**
220
+ * In CUDA 9.0, devices with compute capability 7.0 don't
221
+ * support CDP tracing
222
+ */
223
+ CUPTI_ERROR_CDP_TRACING_NOT_SUPPORTED = 32,
224
+ /**
225
+ * Profiling on virtualized GPU is not supported.
226
+ */
227
+ CUPTI_ERROR_VIRTUALIZED_DEVICE_NOT_SUPPORTED = 33,
228
+ /**
229
+ * Profiling results might be incorrect for CUDA applications
230
+ * compiled with nvcc version older than 9.0 for devices with
231
+ * compute capability 6.0 and 6.1.
232
+ * Profiling session will continue and CUPTI will notify it using this error code.
233
+ * User is advised to recompile the application code with nvcc version 9.0 or later.
234
+ * Ignore this warning if code is already compiled with the recommended nvcc version.
235
+ */
236
+ CUPTI_ERROR_CUDA_COMPILER_NOT_COMPATIBLE = 34,
237
+ /**
238
+ * User doesn't have sufficient privileges which are required to
239
+ * start the profiling session.
240
+ * One possible reason for this may be that the NVIDIA driver or your system
241
+ * administrator may have restricted access to the NVIDIA GPU performance counters.
242
+ * To learn how to resolve this issue and find more information, please visit
243
+ * https://developer.nvidia.com/CUPTI_ERROR_INSUFFICIENT_PRIVILEGES
244
+ */
245
+ CUPTI_ERROR_INSUFFICIENT_PRIVILEGES = 35,
246
+ /**
247
+ * Legacy CUPTI Profiling API i.e. event API from the header cupti_events.h and
248
+ * metric API from the header cupti_metrics.h are not compatible with the
249
+ * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API
250
+ * in the headers nvperf_host.h and nvperf_target.h.
251
+ */
252
+ CUPTI_ERROR_OLD_PROFILER_API_INITIALIZED = 36,
253
+ /**
254
+ * Missing definition of the OpenACC API routine in the linked OpenACC library.
255
+ *
256
+ * One possible reason is that OpenACC library is linked statically in the
257
+ * user application, which might not have the definition of all the OpenACC
258
+ * API routines needed for the OpenACC profiling, as compiler might ignore
259
+ * definitions for the functions not used in the application. This issue
260
+ * can be mitigated by linking the OpenACC library dynamically.
261
+ */
262
+ CUPTI_ERROR_OPENACC_UNDEFINED_ROUTINE = 37,
263
+ /**
264
+ * Legacy CUPTI Profiling API i.e. event API from the header cupti_events.h and
265
+ * metric API from the header cupti_metrics.h are not supported on devices with
266
+ * compute capability 7.5 and higher (i.e. Turing and later GPU architectures).
267
+ * These API will be deprecated in a future CUDA release. These are replaced by
268
+ * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API
269
+ * in the headers nvperf_host.h and nvperf_target.h.
270
+ */
271
+ CUPTI_ERROR_LEGACY_PROFILER_NOT_SUPPORTED = 38,
272
+ /**
273
+ * CUPTI doesn't allow multiple callback subscribers. Only a single subscriber
274
+ * can be registered at a time.
275
+ * Same error code is used when application is launched using NVIDIA tools
276
+ * like nvprof, Visual Profiler, Nsight Systems, Nsight Compute, cuda-gdb and
277
+ * cuda-memcheck.
278
+ */
279
+ CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED = 39,
280
+ /**
281
+ * Profiling on virtualized GPU is not allowed by hypervisor.
282
+ */
283
+ CUPTI_ERROR_VIRTUALIZED_DEVICE_INSUFFICIENT_PRIVILEGES = 40,
284
+ /**
285
+ * Profiling and tracing are not allowed when confidential computing mode
286
+ * is enabled.
287
+ */
288
+ CUPTI_ERROR_CONFIDENTIAL_COMPUTING_NOT_SUPPORTED = 41,
289
+ /**
290
+ * CUPTI does not support NVIDIA Crypto Mining Processors (CMP).
291
+ * For more information, please visit https://developer.nvidia.com/ERR_NVCMPGPU
292
+ */
293
+ CUPTI_ERROR_CMP_DEVICE_NOT_SUPPORTED = 42,
294
+ /**
295
+ * An unknown internal error has occurred.
296
+ */
297
+ CUPTI_ERROR_UNKNOWN = 999,
298
+ CUPTI_ERROR_FORCE_INT = 0x7fffffff
299
+ } CUptiResult;
300
+
301
+ /**
302
+ * \brief Get the descriptive string for a CUptiResult.
303
+ *
304
+ * Return the descriptive string for a CUptiResult in \p *str.
305
+ * \note \b Thread-safety: this function is thread safe.
306
+ *
307
+ * \param result The result to get the string for
308
+ * \param str Returns the string
309
+ *
310
+ * \retval CUPTI_SUCCESS on success
311
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p str is NULL or \p
312
+ * result is not a valid CUptiResult
313
+ */
314
+ CUptiResult CUPTIAPI cuptiGetResultString(CUptiResult result, const char **str);
315
+
316
+ /**
317
+ * @brief Get the descriptive message corresponding to error codes returned
318
+ * by CUPTI.
319
+ *
320
+ * Return the descriptive error message for a CUptiResult in \p *str.
321
+ * \note \b Thread-safety: this function is thread safe.
322
+ *
323
+ * \param result The result to get the descriptive error message for
324
+ * \param str Returns the error message string
325
+ *
326
+ * \retval CUPTI_SUCCESS on success
327
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p str is NULL or \p
328
+ * result is not a valid CUptiResult
329
+ *
330
+ */
331
+
332
+ CUptiResult CUPTIAPI cuptiGetErrorMessage(CUptiResult result, const char **str);
333
+
334
+ /** @} */ /* END CUPTI_RESULT_API */
335
+
336
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
337
+ #pragma GCC visibility pop
338
+ #endif
339
+
340
+ #if defined(__cplusplus)
341
+ }
342
+ #endif
343
+
344
+ #endif /*_CUPTI_RESULT_H_*/
345
+
346
+
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_runtime_cbid.h ADDED
@@ -0,0 +1,497 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ // *************************************************************************
3
+ // Definitions of indices for API functions, unique across entire API
4
+ // *************************************************************************
5
+
6
+ // This file is generated. Any changes you make will be lost during the next clean build.
7
+ // CUDA public interface, for type definitions and cu* function prototypes
8
+
9
+ #if !defined(_CUPTI_RUNTIME_CBID_H)
10
+ #define _CUPTI_RUNTIME_CBID_H
11
+
12
+ typedef enum CUpti_runtime_api_trace_cbid_enum {
13
+ CUPTI_RUNTIME_TRACE_CBID_INVALID = 0,
14
+ CUPTI_RUNTIME_TRACE_CBID_cudaDriverGetVersion_v3020 = 1,
15
+ CUPTI_RUNTIME_TRACE_CBID_cudaRuntimeGetVersion_v3020 = 2,
16
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceCount_v3020 = 3,
17
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceProperties_v3020 = 4,
18
+ CUPTI_RUNTIME_TRACE_CBID_cudaChooseDevice_v3020 = 5,
19
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetChannelDesc_v3020 = 6,
20
+ CUPTI_RUNTIME_TRACE_CBID_cudaCreateChannelDesc_v3020 = 7,
21
+ CUPTI_RUNTIME_TRACE_CBID_cudaConfigureCall_v3020 = 8,
22
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetupArgument_v3020 = 9,
23
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetLastError_v3020 = 10,
24
+ CUPTI_RUNTIME_TRACE_CBID_cudaPeekAtLastError_v3020 = 11,
25
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetErrorString_v3020 = 12,
26
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020 = 13,
27
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetCacheConfig_v3020 = 14,
28
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncGetAttributes_v3020 = 15,
29
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetDevice_v3020 = 16,
30
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDevice_v3020 = 17,
31
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetValidDevices_v3020 = 18,
32
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetDeviceFlags_v3020 = 19,
33
+ CUPTI_RUNTIME_TRACE_CBID_cudaMalloc_v3020 = 20,
34
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocPitch_v3020 = 21,
35
+ CUPTI_RUNTIME_TRACE_CBID_cudaFree_v3020 = 22,
36
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocArray_v3020 = 23,
37
+ CUPTI_RUNTIME_TRACE_CBID_cudaFreeArray_v3020 = 24,
38
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocHost_v3020 = 25,
39
+ CUPTI_RUNTIME_TRACE_CBID_cudaFreeHost_v3020 = 26,
40
+ CUPTI_RUNTIME_TRACE_CBID_cudaHostAlloc_v3020 = 27,
41
+ CUPTI_RUNTIME_TRACE_CBID_cudaHostGetDevicePointer_v3020 = 28,
42
+ CUPTI_RUNTIME_TRACE_CBID_cudaHostGetFlags_v3020 = 29,
43
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemGetInfo_v3020 = 30,
44
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020 = 31,
45
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2D_v3020 = 32,
46
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArray_v3020 = 33,
47
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArray_v3020 = 34,
48
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArray_v3020 = 35,
49
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArray_v3020 = 36,
50
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyArrayToArray_v3020 = 37,
51
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DArrayToArray_v3020 = 38,
52
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbol_v3020 = 39,
53
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbol_v3020 = 40,
54
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyAsync_v3020 = 41,
55
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArrayAsync_v3020 = 42,
56
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArrayAsync_v3020 = 43,
57
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DAsync_v3020 = 44,
58
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArrayAsync_v3020 = 45,
59
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArrayAsync_v3020 = 46,
60
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbolAsync_v3020 = 47,
61
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbolAsync_v3020 = 48,
62
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset_v3020 = 49,
63
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset2D_v3020 = 50,
64
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemsetAsync_v3020 = 51,
65
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset2DAsync_v3020 = 52,
66
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetSymbolAddress_v3020 = 53,
67
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetSymbolSize_v3020 = 54,
68
+ CUPTI_RUNTIME_TRACE_CBID_cudaBindTexture_v3020 = 55,
69
+ CUPTI_RUNTIME_TRACE_CBID_cudaBindTexture2D_v3020 = 56,
70
+ CUPTI_RUNTIME_TRACE_CBID_cudaBindTextureToArray_v3020 = 57,
71
+ CUPTI_RUNTIME_TRACE_CBID_cudaUnbindTexture_v3020 = 58,
72
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureAlignmentOffset_v3020 = 59,
73
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureReference_v3020 = 60,
74
+ CUPTI_RUNTIME_TRACE_CBID_cudaBindSurfaceToArray_v3020 = 61,
75
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetSurfaceReference_v3020 = 62,
76
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLSetGLDevice_v3020 = 63,
77
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLRegisterBufferObject_v3020 = 64,
78
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLMapBufferObject_v3020 = 65,
79
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLUnmapBufferObject_v3020 = 66,
80
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLUnregisterBufferObject_v3020 = 67,
81
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLSetBufferObjectMapFlags_v3020 = 68,
82
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLMapBufferObjectAsync_v3020 = 69,
83
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLUnmapBufferObjectAsync_v3020 = 70,
84
+ CUPTI_RUNTIME_TRACE_CBID_cudaWGLGetDevice_v3020 = 71,
85
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsGLRegisterImage_v3020 = 72,
86
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsGLRegisterBuffer_v3020 = 73,
87
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsUnregisterResource_v3020 = 74,
88
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceSetMapFlags_v3020 = 75,
89
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsMapResources_v3020 = 76,
90
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsUnmapResources_v3020 = 77,
91
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedPointer_v3020 = 78,
92
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsSubResourceGetMappedArray_v3020 = 79,
93
+ CUPTI_RUNTIME_TRACE_CBID_cudaVDPAUGetDevice_v3020 = 80,
94
+ CUPTI_RUNTIME_TRACE_CBID_cudaVDPAUSetVDPAUDevice_v3020 = 81,
95
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsVDPAURegisterVideoSurface_v3020 = 82,
96
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsVDPAURegisterOutputSurface_v3020 = 83,
97
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDevice_v3020 = 84,
98
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDevices_v3020 = 85,
99
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D11SetDirect3DDevice_v3020 = 86,
100
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D11RegisterResource_v3020 = 87,
101
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDevice_v3020 = 88,
102
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDevices_v3020 = 89,
103
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10SetDirect3DDevice_v3020 = 90,
104
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D10RegisterResource_v3020 = 91,
105
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10RegisterResource_v3020 = 92,
106
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10UnregisterResource_v3020 = 93,
107
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10MapResources_v3020 = 94,
108
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10UnmapResources_v3020 = 95,
109
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceSetMapFlags_v3020 = 96,
110
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetSurfaceDimensions_v3020 = 97,
111
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedArray_v3020 = 98,
112
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedPointer_v3020 = 99,
113
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedSize_v3020 = 100,
114
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedPitch_v3020 = 101,
115
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDevice_v3020 = 102,
116
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDevices_v3020 = 103,
117
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9SetDirect3DDevice_v3020 = 104,
118
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDirect3DDevice_v3020 = 105,
119
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D9RegisterResource_v3020 = 106,
120
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9RegisterResource_v3020 = 107,
121
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnregisterResource_v3020 = 108,
122
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9MapResources_v3020 = 109,
123
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnmapResources_v3020 = 110,
124
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceSetMapFlags_v3020 = 111,
125
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetSurfaceDimensions_v3020 = 112,
126
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedArray_v3020 = 113,
127
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedPointer_v3020 = 114,
128
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedSize_v3020 = 115,
129
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedPitch_v3020 = 116,
130
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9Begin_v3020 = 117,
131
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9End_v3020 = 118,
132
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9RegisterVertexBuffer_v3020 = 119,
133
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnregisterVertexBuffer_v3020 = 120,
134
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9MapVertexBuffer_v3020 = 121,
135
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnmapVertexBuffer_v3020 = 122,
136
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadExit_v3020 = 123,
137
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetDoubleForDevice_v3020 = 124,
138
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetDoubleForHost_v3020 = 125,
139
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadSynchronize_v3020 = 126,
140
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadGetLimit_v3020 = 127,
141
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadSetLimit_v3020 = 128,
142
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreate_v3020 = 129,
143
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamDestroy_v3020 = 130,
144
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSynchronize_v3020 = 131,
145
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamQuery_v3020 = 132,
146
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventCreate_v3020 = 133,
147
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventCreateWithFlags_v3020 = 134,
148
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventRecord_v3020 = 135,
149
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventDestroy_v3020 = 136,
150
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventSynchronize_v3020 = 137,
151
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventQuery_v3020 = 138,
152
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventElapsedTime_v3020 = 139,
153
+ CUPTI_RUNTIME_TRACE_CBID_cudaMalloc3D_v3020 = 140,
154
+ CUPTI_RUNTIME_TRACE_CBID_cudaMalloc3DArray_v3020 = 141,
155
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset3D_v3020 = 142,
156
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset3DAsync_v3020 = 143,
157
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3D_v3020 = 144,
158
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DAsync_v3020 = 145,
159
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadSetCacheConfig_v3020 = 146,
160
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamWaitEvent_v3020 = 147,
161
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDirect3DDevice_v3020 = 148,
162
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDirect3DDevice_v3020 = 149,
163
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadGetCacheConfig_v3020 = 150,
164
+ CUPTI_RUNTIME_TRACE_CBID_cudaPointerGetAttributes_v4000 = 151,
165
+ CUPTI_RUNTIME_TRACE_CBID_cudaHostRegister_v4000 = 152,
166
+ CUPTI_RUNTIME_TRACE_CBID_cudaHostUnregister_v4000 = 153,
167
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceCanAccessPeer_v4000 = 154,
168
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceEnablePeerAccess_v4000 = 155,
169
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceDisablePeerAccess_v4000 = 156,
170
+ CUPTI_RUNTIME_TRACE_CBID_cudaPeerRegister_v4000 = 157,
171
+ CUPTI_RUNTIME_TRACE_CBID_cudaPeerUnregister_v4000 = 158,
172
+ CUPTI_RUNTIME_TRACE_CBID_cudaPeerGetDevicePointer_v4000 = 159,
173
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyPeer_v4000 = 160,
174
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyPeerAsync_v4000 = 161,
175
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeer_v4000 = 162,
176
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeerAsync_v4000 = 163,
177
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceReset_v3020 = 164,
178
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSynchronize_v3020 = 165,
179
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetLimit_v3020 = 166,
180
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetLimit_v3020 = 167,
181
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetCacheConfig_v3020 = 168,
182
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetCacheConfig_v3020 = 169,
183
+ CUPTI_RUNTIME_TRACE_CBID_cudaProfilerInitialize_v4000 = 170,
184
+ CUPTI_RUNTIME_TRACE_CBID_cudaProfilerStart_v4000 = 171,
185
+ CUPTI_RUNTIME_TRACE_CBID_cudaProfilerStop_v4000 = 172,
186
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetByPCIBusId_v4010 = 173,
187
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetPCIBusId_v4010 = 174,
188
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLGetDevices_v4010 = 175,
189
+ CUPTI_RUNTIME_TRACE_CBID_cudaIpcGetEventHandle_v4010 = 176,
190
+ CUPTI_RUNTIME_TRACE_CBID_cudaIpcOpenEventHandle_v4010 = 177,
191
+ CUPTI_RUNTIME_TRACE_CBID_cudaIpcGetMemHandle_v4010 = 178,
192
+ CUPTI_RUNTIME_TRACE_CBID_cudaIpcOpenMemHandle_v4010 = 179,
193
+ CUPTI_RUNTIME_TRACE_CBID_cudaIpcCloseMemHandle_v4010 = 180,
194
+ CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetInfo_v4010 = 181,
195
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetSharedMemConfig_v4020 = 182,
196
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetSharedMemConfig_v4020 = 183,
197
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetSharedMemConfig_v4020 = 184,
198
+ CUPTI_RUNTIME_TRACE_CBID_cudaCreateTextureObject_v5000 = 185,
199
+ CUPTI_RUNTIME_TRACE_CBID_cudaDestroyTextureObject_v5000 = 186,
200
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectResourceDesc_v5000 = 187,
201
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectTextureDesc_v5000 = 188,
202
+ CUPTI_RUNTIME_TRACE_CBID_cudaCreateSurfaceObject_v5000 = 189,
203
+ CUPTI_RUNTIME_TRACE_CBID_cudaDestroySurfaceObject_v5000 = 190,
204
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetSurfaceObjectResourceDesc_v5000 = 191,
205
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocMipmappedArray_v5000 = 192,
206
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetMipmappedArrayLevel_v5000 = 193,
207
+ CUPTI_RUNTIME_TRACE_CBID_cudaFreeMipmappedArray_v5000 = 194,
208
+ CUPTI_RUNTIME_TRACE_CBID_cudaBindTextureToMipmappedArray_v5000 = 195,
209
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedMipmappedArray_v5000 = 196,
210
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamAddCallback_v5000 = 197,
211
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreateWithFlags_v5000 = 198,
212
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectResourceViewDesc_v5000 = 199,
213
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetAttribute_v5000 = 200,
214
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamDestroy_v5050 = 201,
215
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreateWithPriority_v5050 = 202,
216
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetPriority_v5050 = 203,
217
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetFlags_v5050 = 204,
218
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetStreamPriorityRange_v5050 = 205,
219
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocManaged_v6000 = 206,
220
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000 = 207,
221
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamAttachMemAsync_v6000 = 208,
222
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetErrorName_v6050 = 209,
223
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050 = 210,
224
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000 = 211,
225
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceFlags_v7000 = 212,
226
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_ptsz_v7000 = 213,
227
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_ptsz_v7000 = 214,
228
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_ptds_v7000 = 215,
229
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2D_ptds_v7000 = 216,
230
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArray_ptds_v7000 = 217,
231
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArray_ptds_v7000 = 218,
232
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArray_ptds_v7000 = 219,
233
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArray_ptds_v7000 = 220,
234
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyArrayToArray_ptds_v7000 = 221,
235
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DArrayToArray_ptds_v7000 = 222,
236
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbol_ptds_v7000 = 223,
237
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbol_ptds_v7000 = 224,
238
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyAsync_ptsz_v7000 = 225,
239
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArrayAsync_ptsz_v7000 = 226,
240
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArrayAsync_ptsz_v7000 = 227,
241
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DAsync_ptsz_v7000 = 228,
242
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArrayAsync_ptsz_v7000 = 229,
243
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArrayAsync_ptsz_v7000 = 230,
244
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbolAsync_ptsz_v7000 = 231,
245
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbolAsync_ptsz_v7000 = 232,
246
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset_ptds_v7000 = 233,
247
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset2D_ptds_v7000 = 234,
248
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemsetAsync_ptsz_v7000 = 235,
249
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset2DAsync_ptsz_v7000 = 236,
250
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetPriority_ptsz_v7000 = 237,
251
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetFlags_ptsz_v7000 = 238,
252
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSynchronize_ptsz_v7000 = 239,
253
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamQuery_ptsz_v7000 = 240,
254
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamAttachMemAsync_ptsz_v7000 = 241,
255
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventRecord_ptsz_v7000 = 242,
256
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset3D_ptds_v7000 = 243,
257
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset3DAsync_ptsz_v7000 = 244,
258
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3D_ptds_v7000 = 245,
259
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DAsync_ptsz_v7000 = 246,
260
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamWaitEvent_ptsz_v7000 = 247,
261
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamAddCallback_ptsz_v7000 = 248,
262
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeer_ptds_v7000 = 249,
263
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeerAsync_ptsz_v7000 = 250,
264
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000 = 251,
265
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_v8000 = 252,
266
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_ptsz_v8000 = 253,
267
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemAdvise_v8000 = 254,
268
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetP2PAttribute_v8000 = 255,
269
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsEGLRegisterImage_v7000 = 256,
270
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerConnect_v7000 = 257,
271
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerDisconnect_v7000 = 258,
272
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerAcquireFrame_v7000 = 259,
273
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerReleaseFrame_v7000 = 260,
274
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerConnect_v7000 = 261,
275
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerDisconnect_v7000 = 262,
276
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerPresentFrame_v7000 = 263,
277
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerReturnFrame_v7000 = 264,
278
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedEglFrame_v7000 = 265,
279
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemRangeGetAttribute_v8000 = 266,
280
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemRangeGetAttributes_v8000 = 267,
281
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerConnectWithFlags_v7000 = 268,
282
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernel_v9000 = 269,
283
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernel_ptsz_v9000 = 270,
284
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventCreateFromEGLSync_v9000 = 271,
285
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernelMultiDevice_v9000 = 272,
286
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetAttribute_v9000 = 273,
287
+ CUPTI_RUNTIME_TRACE_CBID_cudaImportExternalMemory_v10000 = 274,
288
+ CUPTI_RUNTIME_TRACE_CBID_cudaExternalMemoryGetMappedBuffer_v10000 = 275,
289
+ CUPTI_RUNTIME_TRACE_CBID_cudaExternalMemoryGetMappedMipmappedArray_v10000 = 276,
290
+ CUPTI_RUNTIME_TRACE_CBID_cudaDestroyExternalMemory_v10000 = 277,
291
+ CUPTI_RUNTIME_TRACE_CBID_cudaImportExternalSemaphore_v10000 = 278,
292
+ CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v10000 = 279,
293
+ CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_ptsz_v10000 = 280,
294
+ CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v10000 = 281,
295
+ CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_ptsz_v10000 = 282,
296
+ CUPTI_RUNTIME_TRACE_CBID_cudaDestroyExternalSemaphore_v10000 = 283,
297
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchHostFunc_v10000 = 284,
298
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchHostFunc_ptsz_v10000 = 285,
299
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphCreate_v10000 = 286,
300
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeGetParams_v10000 = 287,
301
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeSetParams_v10000 = 288,
302
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddKernelNode_v10000 = 289,
303
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNode_v10000 = 290,
304
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeGetParams_v10000 = 291,
305
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParams_v10000 = 292,
306
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemsetNode_v10000 = 293,
307
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemsetNodeGetParams_v10000 = 294,
308
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemsetNodeSetParams_v10000 = 295,
309
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddHostNode_v10000 = 296,
310
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphHostNodeGetParams_v10000 = 297,
311
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddChildGraphNode_v10000 = 298,
312
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphChildGraphNodeGetGraph_v10000 = 299,
313
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEmptyNode_v10000 = 300,
314
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphClone_v10000 = 301,
315
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeFindInClone_v10000 = 302,
316
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetType_v10000 = 303,
317
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetRootNodes_v10000 = 304,
318
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependencies_v10000 = 305,
319
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependentNodes_v10000 = 306,
320
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddDependencies_v10000 = 307,
321
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphRemoveDependencies_v10000 = 308,
322
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphDestroyNode_v10000 = 309,
323
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiate_v10000 = 310,
324
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphLaunch_v10000 = 311,
325
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphLaunch_ptsz_v10000 = 312,
326
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecDestroy_v10000 = 313,
327
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphDestroy_v10000 = 314,
328
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCapture_v10000 = 315,
329
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCapture_ptsz_v10000 = 316,
330
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamIsCapturing_v10000 = 317,
331
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamIsCapturing_ptsz_v10000 = 318,
332
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamEndCapture_v10000 = 319,
333
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamEndCapture_ptsz_v10000 = 320,
334
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphHostNodeSetParams_v10000 = 321,
335
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetNodes_v10000 = 322,
336
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetEdges_v10000 = 323,
337
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v10010 = 324,
338
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_ptsz_v10010 = 325,
339
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecKernelNodeSetParams_v10010 = 326,
340
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadExchangeStreamCaptureMode_v10010 = 327,
341
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetNvSciSyncAttributes_v10020 = 328,
342
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyAvailableDynamicSMemPerBlock_v10200 = 329,
343
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetFlags_v10200 = 330,
344
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetFlags_ptsz_v10200 = 331,
345
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParams_v10020 = 332,
346
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemsetNodeSetParams_v10020 = 333,
347
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecHostNodeSetParams_v10020 = 334,
348
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecUpdate_v10020 = 335,
349
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetFuncBySymbol_v11000 = 336,
350
+ CUPTI_RUNTIME_TRACE_CBID_cudaCtxResetPersistingL2Cache_v11000 = 337,
351
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeCopyAttributes_v11000 = 338,
352
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeGetAttribute_v11000 = 339,
353
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeSetAttribute_v11000 = 340,
354
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamCopyAttributes_v11000 = 341,
355
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamCopyAttributes_ptsz_v11000 = 342,
356
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetAttribute_v11000 = 343,
357
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetAttribute_ptsz_v11000 = 344,
358
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetAttribute_v11000 = 345,
359
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetAttribute_ptsz_v11000 = 346,
360
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetTexture1DLinearMaxWidth_v11010 = 347,
361
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphUpload_v10000 = 348,
362
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphUpload_ptsz_v10000 = 349,
363
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNodeToSymbol_v11010 = 350,
364
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNodeFromSymbol_v11010 = 351,
365
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNode1D_v11010 = 352,
366
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParamsToSymbol_v11010 = 353,
367
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParamsFromSymbol_v11010 = 354,
368
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParams1D_v11010 = 355,
369
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010 = 356,
370
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010 = 357,
371
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParams1D_v11010 = 358,
372
+ CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetSparseProperties_v11010 = 359,
373
+ CUPTI_RUNTIME_TRACE_CBID_cudaMipmappedArrayGetSparseProperties_v11010 = 360,
374
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecChildGraphNodeSetParams_v11010 = 361,
375
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEventRecordNode_v11010 = 362,
376
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventRecordNodeGetEvent_v11010 = 363,
377
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventRecordNodeSetEvent_v11010 = 364,
378
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEventWaitNode_v11010 = 365,
379
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventWaitNodeGetEvent_v11010 = 366,
380
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventWaitNodeSetEvent_v11010 = 367,
381
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecEventRecordNodeSetEvent_v11010 = 368,
382
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecEventWaitNodeSetEvent_v11010 = 369,
383
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventRecordWithFlags_v11010 = 370,
384
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventRecordWithFlags_ptsz_v11010 = 371,
385
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetDefaultMemPool_v11020 = 372,
386
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocAsync_v11020 = 373,
387
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocAsync_ptsz_v11020 = 374,
388
+ CUPTI_RUNTIME_TRACE_CBID_cudaFreeAsync_v11020 = 375,
389
+ CUPTI_RUNTIME_TRACE_CBID_cudaFreeAsync_ptsz_v11020 = 376,
390
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolTrimTo_v11020 = 377,
391
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolSetAttribute_v11020 = 378,
392
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolGetAttribute_v11020 = 379,
393
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolSetAccess_v11020 = 380,
394
+ CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetPlane_v11020 = 381,
395
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolGetAccess_v11020 = 382,
396
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolCreate_v11020 = 383,
397
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolDestroy_v11020 = 384,
398
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetMemPool_v11020 = 385,
399
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetMemPool_v11020 = 386,
400
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolExportToShareableHandle_v11020 = 387,
401
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolImportFromShareableHandle_v11020 = 388,
402
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolExportPointer_v11020 = 389,
403
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolImportPointer_v11020 = 390,
404
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocFromPoolAsync_v11020 = 391,
405
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocFromPoolAsync_ptsz_v11020 = 392,
406
+ CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v2_v11020 = 393,
407
+ CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020 = 394,
408
+ CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v2_v11020 = 395,
409
+ CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020 = 396,
410
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddExternalSemaphoresSignalNode_v11020 = 397,
411
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresSignalNodeGetParams_v11020 = 398,
412
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresSignalNodeSetParams_v11020 = 399,
413
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddExternalSemaphoresWaitNode_v11020 = 400,
414
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresWaitNodeGetParams_v11020 = 401,
415
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresWaitNodeSetParams_v11020 = 402,
416
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020 = 403,
417
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020 = 404,
418
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceFlushGPUDirectRDMAWrites_v11030 = 405,
419
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPoint_v11030 = 406,
420
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPoint_ptsz_v11030 = 407,
421
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphDebugDotPrint_v11030 = 408,
422
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v2_v11030 = 409,
423
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v2_ptsz_v11030 = 410,
424
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_v11030 = 411,
425
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_ptsz_v11030 = 412,
426
+ CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectCreate_v11030 = 413,
427
+ CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectRetain_v11030 = 414,
428
+ CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectRelease_v11030 = 415,
429
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphRetainUserObject_v11030 = 416,
430
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphReleaseUserObject_v11030 = 417,
431
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithFlags_v11040 = 418,
432
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemAllocNode_v11040 = 419,
433
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemAllocNodeGetParams_v11040 = 420,
434
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemFreeNode_v11040 = 421,
435
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemFreeNodeGetParams_v11040 = 422,
436
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGraphMemTrim_v11040 = 423,
437
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetGraphMemAttribute_v11040 = 424,
438
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetGraphMemAttribute_v11040 = 425,
439
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeSetEnabled_v11060 = 426,
440
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetEnabled_v11060 = 427,
441
+ CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetMemoryRequirements_v11060 = 428,
442
+ CUPTI_RUNTIME_TRACE_CBID_cudaMipmappedArrayGetMemoryRequirements_v11060 = 429,
443
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernelExC_v11060 = 430,
444
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernelExC_ptsz_v11060 = 431,
445
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxPotentialClusterSize_v11070 = 432,
446
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveClusters_v11070 = 433,
447
+ CUPTI_RUNTIME_TRACE_CBID_cudaCreateTextureObject_v2_v11080 = 434,
448
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectTextureDesc_v2_v11080 = 435,
449
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithParams_v12000 = 436,
450
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithParams_ptsz_v12000 = 437,
451
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecGetFlags_v12000 = 438,
452
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetKernel_v12000 = 439,
453
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceProperties_v2_v12000 = 440,
454
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetId_v12000 = 441,
455
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetId_ptsz_v12000 = 442,
456
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiate_v12000 = 443,
457
+ CUPTI_RUNTIME_TRACE_CBID_cudaInitDevice_v12000 = 444,
458
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddNode_v12020 = 445,
459
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeSetParams_v12020 = 446,
460
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecNodeSetParams_v12020 = 447,
461
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemAdvise_v2_v12020 = 448,
462
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_v2_v12020 = 449,
463
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_v2_ptsz_v12020 = 450,
464
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncGetName_v12030 = 451,
465
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCaptureToGraph_v12030 = 452,
466
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCaptureToGraph_ptsz_v12030 = 453,
467
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphConditionalHandleCreate_v12030 = 454,
468
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetEdges_v2_v12030 = 455,
469
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependencies_v2_v12030 = 456,
470
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependentNodes_v2_v12030 = 457,
471
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddDependencies_v2_v12030 = 458,
472
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphRemoveDependencies_v2_v12030 = 459,
473
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddNode_v2_v12030 = 460,
474
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v3_v12030 = 461,
475
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v3_ptsz_v12030 = 462,
476
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_v2_v12030 = 463,
477
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_v2_ptsz_v12030 = 464,
478
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceRegisterAsyncNotification_v12040 = 465,
479
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceUnregisterAsyncNotification_v12040 = 466,
480
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncGetParamInfo_v12040 = 467,
481
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPointByVersion_v12050 = 468,
482
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPointByVersion_ptsz_v12050 = 469,
483
+ CUPTI_RUNTIME_TRACE_CBID_cuda470_v12060 = 470,
484
+ CUPTI_RUNTIME_TRACE_CBID_cuda471_v12060 = 471,
485
+ CUPTI_RUNTIME_TRACE_CBID_cuda472_v12060 = 472,
486
+ CUPTI_RUNTIME_TRACE_CBID_cuda473_v12060 = 473,
487
+ CUPTI_RUNTIME_TRACE_CBID_cuda474_v12060 = 474,
488
+ CUPTI_RUNTIME_TRACE_CBID_cuda475_v12060 = 475,
489
+ CUPTI_RUNTIME_TRACE_CBID_cuda476_v12060 = 476,
490
+ CUPTI_RUNTIME_TRACE_CBID_cuda477_v12060 = 477,
491
+ CUPTI_RUNTIME_TRACE_CBID_cuda478_v12060 = 478,
492
+ CUPTI_RUNTIME_TRACE_CBID_cuda479_v12060 = 479,
493
+ CUPTI_RUNTIME_TRACE_CBID_SIZE = 480,
494
+ CUPTI_RUNTIME_TRACE_CBID_FORCE_INT = 0x7fffffff
495
+ } CUpti_runtime_api_trace_cbid;
496
+
497
+ #endif
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (187 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/include/__init__.py ADDED
File without changes
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/include/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (195 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/include/nvrtc.h ADDED
@@ -0,0 +1,876 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //
2
+ // NVIDIA_COPYRIGHT_BEGIN
3
+ //
4
+ // Copyright (c) 2014-2024, NVIDIA CORPORATION. All rights reserved.
5
+ //
6
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
7
+ // and proprietary rights in and to this software, related documentation
8
+ // and any modifications thereto. Any use, reproduction, disclosure or
9
+ // distribution of this software and related documentation without an express
10
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
11
+ //
12
+ // NVIDIA_COPYRIGHT_END
13
+ //
14
+
15
+ #ifndef __NVRTC_H__
16
+ #define __NVRTC_H__
17
+
18
+ #ifdef __cplusplus
19
+ extern "C" {
20
+ #endif /* __cplusplus */
21
+
22
+ #include <stdlib.h>
23
+
24
+
25
+ /*************************************************************************//**
26
+ *
27
+ * \defgroup error Error Handling
28
+ *
29
+ * NVRTC defines the following enumeration type and function for API call
30
+ * error handling.
31
+ *
32
+ ****************************************************************************/
33
+
34
+
35
+ /**
36
+ * \ingroup error
37
+ * \brief The enumerated type nvrtcResult defines API call result codes.
38
+ * NVRTC API functions return nvrtcResult to indicate the call
39
+ * result.
40
+ */
41
+ typedef enum {
42
+ NVRTC_SUCCESS = 0,
43
+ NVRTC_ERROR_OUT_OF_MEMORY = 1,
44
+ NVRTC_ERROR_PROGRAM_CREATION_FAILURE = 2,
45
+ NVRTC_ERROR_INVALID_INPUT = 3,
46
+ NVRTC_ERROR_INVALID_PROGRAM = 4,
47
+ NVRTC_ERROR_INVALID_OPTION = 5,
48
+ NVRTC_ERROR_COMPILATION = 6,
49
+ NVRTC_ERROR_BUILTIN_OPERATION_FAILURE = 7,
50
+ NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION = 8,
51
+ NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION = 9,
52
+ NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID = 10,
53
+ NVRTC_ERROR_INTERNAL_ERROR = 11,
54
+ NVRTC_ERROR_TIME_FILE_WRITE_FAILED = 12
55
+ } nvrtcResult;
56
+
57
+
58
+ /**
59
+ * \ingroup error
60
+ * \brief nvrtcGetErrorString is a helper function that returns a string
61
+ * describing the given nvrtcResult code, e.g., NVRTC_SUCCESS to
62
+ * \c "NVRTC_SUCCESS".
63
+ * For unrecognized enumeration values, it returns
64
+ * \c "NVRTC_ERROR unknown".
65
+ *
66
+ * \param [in] result CUDA Runtime Compilation API result code.
67
+ * \return Message string for the given #nvrtcResult code.
68
+ */
69
+ const char *nvrtcGetErrorString(nvrtcResult result);
70
+
71
+
72
+ /*************************************************************************//**
73
+ *
74
+ * \defgroup query General Information Query
75
+ *
76
+ * NVRTC defines the following function for general information query.
77
+ *
78
+ ****************************************************************************/
79
+
80
+
81
+ /**
82
+ * \ingroup query
83
+ * \brief nvrtcVersion sets the output parameters \p major and \p minor
84
+ * with the CUDA Runtime Compilation version number.
85
+ *
86
+ * \param [out] major CUDA Runtime Compilation major version number.
87
+ * \param [out] minor CUDA Runtime Compilation minor version number.
88
+ * \return
89
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
90
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
91
+ *
92
+ */
93
+ nvrtcResult nvrtcVersion(int *major, int *minor);
94
+
95
+
96
+ /**
97
+ * \ingroup query
98
+ * \brief nvrtcGetNumSupportedArchs sets the output parameter \p numArchs
99
+ * with the number of architectures supported by NVRTC. This can
100
+ * then be used to pass an array to ::nvrtcGetSupportedArchs to
101
+ * get the supported architectures.
102
+ *
103
+ * \param [out] numArchs number of supported architectures.
104
+ * \return
105
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
106
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
107
+ *
108
+ * see ::nvrtcGetSupportedArchs
109
+ */
110
+ nvrtcResult nvrtcGetNumSupportedArchs(int* numArchs);
111
+
112
+
113
+ /**
114
+ * \ingroup query
115
+ * \brief nvrtcGetSupportedArchs populates the array passed via the output parameter
116
+ * \p supportedArchs with the architectures supported by NVRTC. The array is
117
+ * sorted in the ascending order. The size of the array to be passed can be
118
+ * determined using ::nvrtcGetNumSupportedArchs.
119
+ *
120
+ * \param [out] supportedArchs sorted array of supported architectures.
121
+ * \return
122
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
123
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
124
+ *
125
+ * see ::nvrtcGetNumSupportedArchs
126
+ */
127
+ nvrtcResult nvrtcGetSupportedArchs(int* supportedArchs);
128
+
129
+
130
+ /*************************************************************************//**
131
+ *
132
+ * \defgroup compilation Compilation
133
+ *
134
+ * NVRTC defines the following type and functions for actual compilation.
135
+ *
136
+ ****************************************************************************/
137
+
138
+
139
+ /**
140
+ * \ingroup compilation
141
+ * \brief nvrtcProgram is the unit of compilation, and an opaque handle for
142
+ * a program.
143
+ *
144
+ * To compile a CUDA program string, an instance of nvrtcProgram must be
145
+ * created first with ::nvrtcCreateProgram, then compiled with
146
+ * ::nvrtcCompileProgram.
147
+ */
148
+ typedef struct _nvrtcProgram *nvrtcProgram;
149
+
150
+
151
+ /**
152
+ * \ingroup compilation
153
+ * \brief nvrtcCreateProgram creates an instance of nvrtcProgram with the
154
+ * given input parameters, and sets the output parameter \p prog with
155
+ * it.
156
+ *
157
+ * \param [out] prog CUDA Runtime Compilation program.
158
+ * \param [in] src CUDA program source.
159
+ * \param [in] name CUDA program name.\n
160
+ * \p name can be \c NULL; \c "default_program" is
161
+ * used when \p name is \c NULL or "".
162
+ * \param [in] numHeaders Number of headers used.\n
163
+ * \p numHeaders must be greater than or equal to 0.
164
+ * \param [in] headers Sources of the headers.\n
165
+ * \p headers can be \c NULL when \p numHeaders is
166
+ * 0.
167
+ * \param [in] includeNames Name of each header by which they can be
168
+ * included in the CUDA program source.\n
169
+ * \p includeNames can be \c NULL when \p numHeaders
170
+ * is 0. These headers must be included with the exact
171
+ * names specified here.
172
+ * \return
173
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
174
+ * - \link #nvrtcResult NVRTC_ERROR_OUT_OF_MEMORY \endlink
175
+ * - \link #nvrtcResult NVRTC_ERROR_PROGRAM_CREATION_FAILURE \endlink
176
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
177
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
178
+ *
179
+ * \see ::nvrtcDestroyProgram
180
+ */
181
+ nvrtcResult nvrtcCreateProgram(nvrtcProgram *prog,
182
+ const char *src,
183
+ const char *name,
184
+ int numHeaders,
185
+ const char * const *headers,
186
+ const char * const *includeNames);
187
+
188
+
189
+ /**
190
+ * \ingroup compilation
191
+ * \brief nvrtcDestroyProgram destroys the given program.
192
+ *
193
+ * \param [in] prog CUDA Runtime Compilation program.
194
+ * \return
195
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
196
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
197
+ *
198
+ * \see ::nvrtcCreateProgram
199
+ */
200
+ nvrtcResult nvrtcDestroyProgram(nvrtcProgram *prog);
201
+
202
+
203
+ /**
204
+ * \ingroup compilation
205
+ * \brief nvrtcCompileProgram compiles the given program.
206
+ *
207
+ * \param [in] prog CUDA Runtime Compilation program.
208
+ * \param [in] numOptions Number of compiler options passed.
209
+ * \param [in] options Compiler options in the form of C string array.\n
210
+ * \p options can be \c NULL when \p numOptions is 0.
211
+ *
212
+ * \return
213
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
214
+ * - \link #nvrtcResult NVRTC_ERROR_OUT_OF_MEMORY \endlink
215
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
216
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
217
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_OPTION \endlink
218
+ * - \link #nvrtcResult NVRTC_ERROR_COMPILATION \endlink
219
+ * - \link #nvrtcResult NVRTC_ERROR_BUILTIN_OPERATION_FAILURE \endlink
220
+ * - \link #nvrtcResult NVRTC_ERROR_TIME_FILE_WRITE_FAILED \endlink
221
+ *
222
+ * It supports compile options listed in \ref options.
223
+ */
224
+ nvrtcResult nvrtcCompileProgram(nvrtcProgram prog,
225
+ int numOptions, const char * const *options);
226
+
227
+
228
+ /**
229
+ * \ingroup compilation
230
+ * \brief nvrtcGetPTXSize sets the value of \p ptxSizeRet with the size of the PTX
231
+ * generated by the previous compilation of \p prog (including the
232
+ * trailing \c NULL).
233
+ *
234
+ * \param [in] prog CUDA Runtime Compilation program.
235
+ * \param [out] ptxSizeRet Size of the generated PTX (including the trailing
236
+ * \c NULL).
237
+ * \return
238
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
239
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
240
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
241
+ *
242
+ * \see ::nvrtcGetPTX
243
+ */
244
+ nvrtcResult nvrtcGetPTXSize(nvrtcProgram prog, size_t *ptxSizeRet);
245
+
246
+
247
+ /**
248
+ * \ingroup compilation
249
+ * \brief nvrtcGetPTX stores the PTX generated by the previous compilation
250
+ * of \p prog in the memory pointed by \p ptx.
251
+ *
252
+ * \param [in] prog CUDA Runtime Compilation program.
253
+ * \param [out] ptx Compiled result.
254
+ * \return
255
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
256
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
257
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
258
+ *
259
+ * \see ::nvrtcGetPTXSize
260
+ */
261
+ nvrtcResult nvrtcGetPTX(nvrtcProgram prog, char *ptx);
262
+
263
+
264
+ /**
265
+ * \ingroup compilation
266
+ * \brief nvrtcGetCUBINSize sets the value of \p cubinSizeRet with the size of the cubin
267
+ * generated by the previous compilation of \p prog. The value of
268
+ * cubinSizeRet is set to 0 if the value specified to \c -arch is a
269
+ * virtual architecture instead of an actual architecture.
270
+ *
271
+ * \param [in] prog CUDA Runtime Compilation program.
272
+ * \param [out] cubinSizeRet Size of the generated cubin.
273
+ * \return
274
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
275
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
276
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
277
+ *
278
+ * \see ::nvrtcGetCUBIN
279
+ */
280
+ nvrtcResult nvrtcGetCUBINSize(nvrtcProgram prog, size_t *cubinSizeRet);
281
+
282
+
283
+ /**
284
+ * \ingroup compilation
285
+ * \brief nvrtcGetCUBIN stores the cubin generated by the previous compilation
286
+ * of \p prog in the memory pointed by \p cubin. No cubin is available
287
+ * if the value specified to \c -arch is a virtual architecture instead
288
+ * of an actual architecture.
289
+ *
290
+ * \param [in] prog CUDA Runtime Compilation program.
291
+ * \param [out] cubin Compiled and assembled result.
292
+ * \return
293
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
294
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
295
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
296
+ *
297
+ * \see ::nvrtcGetCUBINSize
298
+ */
299
+ nvrtcResult nvrtcGetCUBIN(nvrtcProgram prog, char *cubin);
300
+
301
+
302
+ #if defined(_WIN32)
303
+ # define __DEPRECATED__(msg) __declspec(deprecated(msg))
304
+ #elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__))))
305
+ # define __DEPRECATED__(msg) __attribute__((deprecated))
306
+ #elif (defined(__GNUC__))
307
+ # define __DEPRECATED__(msg) __attribute__((deprecated(msg)))
308
+ #else
309
+ # define __DEPRECATED__(msg)
310
+ #endif
311
+
312
+ /**
313
+ * \ingroup compilation
314
+ * \brief
315
+ * DEPRECATION NOTICE: This function will be removed in a future release. Please use
316
+ * nvrtcGetLTOIRSize (and nvrtcGetLTOIR) instead.
317
+ */
318
+ __DEPRECATED__("This function will be removed in a future release. Please use nvrtcGetLTOIRSize instead")
319
+ nvrtcResult nvrtcGetNVVMSize(nvrtcProgram prog, size_t *nvvmSizeRet);
320
+
321
+ /**
322
+ * \ingroup compilation
323
+ * \brief
324
+ * DEPRECATION NOTICE: This function will be removed in a future release. Please use
325
+ * nvrtcGetLTOIR (and nvrtcGetLTOIRSize) instead.
326
+ */
327
+ __DEPRECATED__("This function will be removed in a future release. Please use nvrtcGetLTOIR instead")
328
+ nvrtcResult nvrtcGetNVVM(nvrtcProgram prog, char *nvvm);
329
+
330
+ #undef __DEPRECATED__
331
+
332
+ /**
333
+ * \ingroup compilation
334
+ * \brief nvrtcGetLTOIRSize sets the value of \p LTOIRSizeRet with the size of the LTO IR
335
+ * generated by the previous compilation of \p prog. The value of
336
+ * LTOIRSizeRet is set to 0 if the program was not compiled with
337
+ * \c -dlto.
338
+ *
339
+ * \param [in] prog CUDA Runtime Compilation program.
340
+ * \param [out] LTOIRSizeRet Size of the generated LTO IR.
341
+ * \return
342
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
343
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
344
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
345
+ *
346
+ * \see ::nvrtcGetLTOIR
347
+ */
348
+ nvrtcResult nvrtcGetLTOIRSize(nvrtcProgram prog, size_t *LTOIRSizeRet);
349
+
350
+
351
+ /**
352
+ * \ingroup compilation
353
+ * \brief nvrtcGetLTOIR stores the LTO IR generated by the previous compilation
354
+ * of \p prog in the memory pointed by \p LTOIR. No LTO IR is available
355
+ * if the program was compiled without \c -dlto.
356
+ *
357
+ * \param [in] prog CUDA Runtime Compilation program.
358
+ * \param [out] LTOIR Compiled result.
359
+ * \return
360
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
361
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
362
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
363
+ *
364
+ * \see ::nvrtcGetLTOIRSize
365
+ */
366
+ nvrtcResult nvrtcGetLTOIR(nvrtcProgram prog, char *LTOIR);
367
+
368
+
369
+ /**
370
+ * \ingroup compilation
371
+ * \brief nvrtcGetOptiXIRSize sets the value of \p optixirSizeRet with the size of the OptiX IR
372
+ * generated by the previous compilation of \p prog. The value of
373
+ * nvrtcGetOptiXIRSize is set to 0 if the program was compiled with
374
+ * options incompatible with OptiX IR generation.
375
+ *
376
+ * \param [in] prog CUDA Runtime Compilation program.
377
+ * \param [out] optixirSizeRet Size of the generated LTO IR.
378
+ * \return
379
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
380
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
381
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
382
+ *
383
+ * \see ::nvrtcGetOptiXIR
384
+ */
385
+ nvrtcResult nvrtcGetOptiXIRSize(nvrtcProgram prog, size_t *optixirSizeRet);
386
+
387
+
388
+ /**
389
+ * \ingroup compilation
390
+ * \brief nvrtcGetOptiXIR stores the OptiX IR generated by the previous compilation
391
+ * of \p prog in the memory pointed by \p optixir. No OptiX IR is available
392
+ * if the program was compiled with options incompatible with OptiX IR generation.
393
+ *
394
+ * \param [in] prog CUDA Runtime Compilation program.
395
+ * \param [out] Optix IR Compiled result.
396
+ * \return
397
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
398
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
399
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
400
+ *
401
+ * \see ::nvrtcGetOptiXIRSize
402
+ */
403
+ nvrtcResult nvrtcGetOptiXIR(nvrtcProgram prog, char *optixir);
404
+
405
+ /**
406
+ * \ingroup compilation
407
+ * \brief nvrtcGetProgramLogSize sets \p logSizeRet with the size of the
408
+ * log generated by the previous compilation of \p prog (including the
409
+ * trailing \c NULL).
410
+ *
411
+ * Note that compilation log may be generated with warnings and informative
412
+ * messages, even when the compilation of \p prog succeeds.
413
+ *
414
+ * \param [in] prog CUDA Runtime Compilation program.
415
+ * \param [out] logSizeRet Size of the compilation log
416
+ * (including the trailing \c NULL).
417
+ * \return
418
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
419
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
420
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
421
+ *
422
+ * \see ::nvrtcGetProgramLog
423
+ */
424
+ nvrtcResult nvrtcGetProgramLogSize(nvrtcProgram prog, size_t *logSizeRet);
425
+
426
+
427
+ /**
428
+ * \ingroup compilation
429
+ * \brief nvrtcGetProgramLog stores the log generated by the previous
430
+ * compilation of \p prog in the memory pointed by \p log.
431
+ *
432
+ * \param [in] prog CUDA Runtime Compilation program.
433
+ * \param [out] log Compilation log.
434
+ * \return
435
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
436
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
437
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
438
+ *
439
+ * \see ::nvrtcGetProgramLogSize
440
+ */
441
+ nvrtcResult nvrtcGetProgramLog(nvrtcProgram prog, char *log);
442
+
443
+
444
+ /**
445
+ * \ingroup compilation
446
+ * \brief nvrtcAddNameExpression notes the given name expression
447
+ * denoting the address of a __global__ function
448
+ * or __device__/__constant__ variable.
449
+ *
450
+ * The identical name expression string must be provided on a subsequent
451
+ * call to nvrtcGetLoweredName to extract the lowered name.
452
+ * \param [in] prog CUDA Runtime Compilation program.
453
+ * \param [in] name_expression constant expression denoting the address of
454
+ * a __global__ function or __device__/__constant__ variable.
455
+ * \return
456
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
457
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
458
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
459
+ * - \link #nvrtcResult NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION \endlink
460
+ *
461
+ * \see ::nvrtcGetLoweredName
462
+ */
463
+ nvrtcResult nvrtcAddNameExpression(nvrtcProgram prog,
464
+ const char * const name_expression);
465
+
466
+ /**
467
+ * \ingroup compilation
468
+ * \brief nvrtcGetLoweredName extracts the lowered (mangled) name
469
+ * for a __global__ function or __device__/__constant__ variable,
470
+ * and updates *lowered_name to point to it. The memory containing
471
+ * the name is released when the NVRTC program is destroyed by
472
+ * nvrtcDestroyProgram.
473
+ * The identical name expression must have been previously
474
+ * provided to nvrtcAddNameExpression.
475
+ *
476
+ * \param [in] prog CUDA Runtime Compilation program.
477
+ * \param [in] name_expression constant expression denoting the address of
478
+ * a __global__ function or __device__/__constant__ variable.
479
+ * \param [out] lowered_name initialized by the function to point to a
480
+ * C string containing the lowered (mangled)
481
+ * name corresponding to the provided name expression.
482
+ * \return
483
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
484
+ * - \link #nvrtcResult NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION \endlink
485
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
486
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
487
+ * - \link #nvrtcResult NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID \endlink
488
+ *
489
+ * \see ::nvrtcAddNameExpression
490
+ */
491
+ nvrtcResult nvrtcGetLoweredName(nvrtcProgram prog,
492
+ const char *const name_expression,
493
+ const char** lowered_name);
494
+
495
+
496
+ /**
497
+ * \defgroup options Supported Compile Options
498
+ *
499
+ * NVRTC supports the compile options below.
500
+ * Option names with two preceding dashs (\c --) are long option names and
501
+ * option names with one preceding dash (\c -) are short option names.
502
+ * Short option names can be used instead of long option names.
503
+ * When a compile option takes an argument, an assignment operator (\c =)
504
+ * is used to separate the compile option argument from the compile option
505
+ * name, e.g., \c "--gpu-architecture=compute_60".
506
+ * Alternatively, the compile option name and the argument can be specified in
507
+ * separate strings without an assignment operator, .e.g,
508
+ * \c "--gpu-architecture" \c "compute_60".
509
+ * Single-character short option names, such as \c -D, \c -U, and \c -I, do
510
+ * not require an assignment operator, and the compile option name and the
511
+ * argument can be present in the same string with or without spaces between
512
+ * them.
513
+ * For instance, \c "-D=<def>", \c "-D<def>", and \c "-D <def>" are all
514
+ * supported.
515
+ *
516
+ * The valid compiler options are:
517
+ *
518
+ * - Compilation targets
519
+ * - \c --gpu-architecture=\<arch\> (\c -arch)\n
520
+ * Specify the name of the class of GPU architectures for which the
521
+ * input must be compiled.\n
522
+ * - Valid <c>\<arch\></c>s:
523
+ * - \c compute_50
524
+ * - \c compute_52
525
+ * - \c compute_53
526
+ * - \c compute_60
527
+ * - \c compute_61
528
+ * - \c compute_62
529
+ * - \c compute_70
530
+ * - \c compute_72
531
+ * - \c compute_75
532
+ * - \c compute_80
533
+ * - \c compute_87
534
+ * - \c compute_89
535
+ * - \c compute_90
536
+ * - \c compute_90a
537
+ * - \c sm_50
538
+ * - \c sm_52
539
+ * - \c sm_53
540
+ * - \c sm_60
541
+ * - \c sm_61
542
+ * - \c sm_62
543
+ * - \c sm_70
544
+ * - \c sm_72
545
+ * - \c sm_75
546
+ * - \c sm_80
547
+ * - \c sm_87
548
+ * - \c sm_89
549
+ * - \c sm_90
550
+ * - \c sm_90a
551
+ * - Default: \c compute_52
552
+ * - Separate compilation / whole-program compilation
553
+ * - \c --device-c (\c -dc)\n
554
+ * Generate relocatable code that can be linked with other relocatable
555
+ * device code. It is equivalent to --relocatable-device-code=true.
556
+ * - \c --device-w (\c -dw)\n
557
+ * Generate non-relocatable code. It is equivalent to
558
+ * \c --relocatable-device-code=false.
559
+ * - \c --relocatable-device-code={true|false} (\c -rdc)\n
560
+ * Enable (disable) the generation of relocatable device code.
561
+ * - Default: \c false
562
+ * - \c --extensible-whole-program (\c -ewp)\n
563
+ * Do extensible whole program compilation of device code.
564
+ * - Default: \c false
565
+ * - Debugging support
566
+ * - \c --device-debug (\c -G)\n
567
+ * Generate debug information. If --dopt is not specified,
568
+ * then turns off all optimizations.
569
+ * - \c --generate-line-info (\c -lineinfo)\n
570
+ * Generate line-number information.
571
+ * - Code generation
572
+ * - \c --dopt on (\c -dopt)\n
573
+ * - \c --dopt=on \n
574
+ * Enable device code optimization. When specified along with '-G', enables
575
+ * limited debug information generation for optimized device code (currently,
576
+ * only line number information).
577
+ * When '-G' is not specified, '-dopt=on' is implicit.
578
+ * - \c --ptxas-options \<options\> (\c -Xptxas)\n
579
+ * - \c --ptxas-options=\<options\> \n
580
+ * Specify options directly to ptxas, the PTX optimizing assembler.
581
+ * - \c --maxrregcount=\<N\> (\c -maxrregcount)\n
582
+ * Specify the maximum amount of registers that GPU functions can use.
583
+ * Until a function-specific limit, a higher value will generally
584
+ * increase the performance of individual GPU threads that execute this
585
+ * function. However, because thread registers are allocated from a
586
+ * global register pool on each GPU, a higher value of this option will
587
+ * also reduce the maximum thread block size, thereby reducing the amount
588
+ * of thread parallelism. Hence, a good maxrregcount value is the result
589
+ * of a trade-off. If this option is not specified, then no maximum is
590
+ * assumed. Value less than the minimum registers required by ABI will
591
+ * be bumped up by the compiler to ABI minimum limit.
592
+ * - \c --ftz={true|false} (\c -ftz)\n
593
+ * When performing single-precision floating-point operations, flush
594
+ * denormal values to zero or preserve denormal values.
595
+ * \c --use_fast_math implies \c --ftz=true.
596
+ * - Default: \c false
597
+ * - \c --prec-sqrt={true|false} (\c -prec-sqrt)\n
598
+ * For single-precision floating-point square root, use IEEE
599
+ * round-to-nearest mode or use a faster approximation.
600
+ * \c --use_fast_math implies \c --prec-sqrt=false.
601
+ * - Default: \c true
602
+ * - \c --prec-div={true|false} (\c -prec-div)\n
603
+ * For single-precision floating-point division and reciprocals, use IEEE
604
+ * round-to-nearest mode or use a faster approximation.
605
+ * \c --use_fast_math implies \c --prec-div=false.
606
+ * - Default: \c true
607
+ * - \c --fmad={true|false} (\c -fmad)\n
608
+ * Enables (disables) the contraction of floating-point multiplies and
609
+ * adds/subtracts into floating-point multiply-add operations (FMAD,
610
+ * FFMA, or DFMA). \c --use_fast_math implies \c --fmad=true.
611
+ * - Default: \c true
612
+ * - \c --use_fast_math (\c -use_fast_math)\n
613
+ * Make use of fast math operations.
614
+ * \c --use_fast_math implies \c --ftz=true \c --prec-div=false
615
+ * \c --prec-sqrt=false \c --fmad=true.
616
+ * - \c --extra-device-vectorization (\c -extra-device-vectorization)\n
617
+ * Enables more aggressive device code vectorization in the NVVM optimizer.
618
+ * - \c --modify-stack-limit={true|false} (\c -modify-stack-limit)\n
619
+ * On Linux, during compilation, use \c setrlimit() to increase stack size
620
+ * to maximum allowed. The limit is reset to the previous value at the
621
+ * end of compilation.
622
+ * Note: \c setrlimit() changes the value for the entire process.
623
+ * - Default: \c true
624
+ * - \c --dlink-time-opt (\c -dlto)\n
625
+ * Generate intermediate code for later link-time optimization.
626
+ * It implies \c -rdc=true.
627
+ * Note: when this option is used the nvrtcGetLTOIR API should be used,
628
+ * as PTX or Cubin will not be generated.
629
+ * - \c --gen-opt-lto (\c -gen-opt-lto)\n
630
+ * Run the optimizer passes before generating the LTO IR.
631
+ * - \c --optix-ir (\c -optix-ir)\n
632
+ * Generate OptiX IR. The Optix IR is only intended for consumption by OptiX
633
+ * through appropriate APIs. This feature is not supported with
634
+ * link-time-optimization (\c -dlto)\n.
635
+ * Note: when this option is used the nvrtcGetOptiX API should be used,
636
+ * as PTX or Cubin will not be generated.
637
+ * - \c --jump-table-density=[0-101] (\c -jtd)\n
638
+ * Specify the case density percentage in switch statements, and use it as
639
+ * a minimal threshold to determine whether jump table(brx.idx instruction)
640
+ * will be used to implement a switch statement. Default value is 101. The
641
+ * percentage ranges from 0 to 101 inclusively.
642
+ * - \c --device-stack-protector={true|false} (\c -device-stack-protector)\n
643
+ * Enable (disable) the generation of stack canaries in device code.\n
644
+ * - Default: \c false
645
+ * - Preprocessing
646
+ * - \c --define-macro=\<def\> (\c -D)\n
647
+ * \c \<def\> can be either \c \<name\> or \c \<name=definitions\>.
648
+ * - \c \<name\> \n
649
+ * Predefine \c \<name\> as a macro with definition \c 1.
650
+ * - \c \<name\>=\<definition\> \n
651
+ * The contents of \c \<definition\> are tokenized and preprocessed
652
+ * as if they appeared during translation phase three in a \c \#define
653
+ * directive. In particular, the definition will be truncated by
654
+ * embedded new line characters.
655
+ * - \c --undefine-macro=\<def\> (\c -U)\n
656
+ * Cancel any previous definition of \c \<def\>.
657
+ * - \c --include-path=\<dir\> (\c -I)\n
658
+ * Add the directory \c \<dir\> to the list of directories to be
659
+ * searched for headers. These paths are searched after the list of
660
+ * headers given to ::nvrtcCreateProgram.
661
+ * - \c --pre-include=\<header\> (\c -include)\n
662
+ * Preinclude \c \<header\> during preprocessing.
663
+ * - \c --no-source-include (\c -no-source-include)
664
+ * The preprocessor by default adds the directory of each input sources
665
+ * to the include path. This option disables this feature and only
666
+ * considers the path specified explicitly.
667
+ * - Language Dialect
668
+ * - \c --std={c++03|c++11|c++14|c++17|c++20}
669
+ * (\c -std={c++11|c++14|c++17|c++20})\n
670
+ * Set language dialect to C++03, C++11, C++14, C++17 or C++20
671
+ * - Default: \c c++17
672
+ * - \c --builtin-move-forward={true|false} (\c -builtin-move-forward)\n
673
+ * Provide builtin definitions of \c std::move and \c std::forward,
674
+ * when C++11 or later language dialect is selected.
675
+ * - Default: \c true
676
+ * - \c --builtin-initializer-list={true|false}
677
+ * (\c -builtin-initializer-list)\n
678
+ * Provide builtin definitions of \c std::initializer_list class and
679
+ * member functions when C++11 or later language dialect is selected.
680
+ * - Default: \c true
681
+ * - Misc.
682
+ * - \c --disable-warnings (\c -w)\n
683
+ * Inhibit all warning messages.
684
+ * - \c --restrict (\c -restrict)\n
685
+ * Programmer assertion that all kernel pointer parameters are restrict
686
+ * pointers.
687
+ * - \c --device-as-default-execution-space
688
+ * (\c -default-device)\n
689
+ * Treat entities with no execution space annotation as \c __device__
690
+ * entities.
691
+ * - \c --device-int128 (\c -device-int128)\n
692
+ * Allow the \c __int128 type in device code. Also causes the macro \c __CUDACC_RTC_INT128__
693
+ * to be defined.
694
+ * - \c --optimization-info=\<kind\> (\c -opt-info)\n
695
+ * Provide optimization reports for the specified kind of optimization.
696
+ * The following kind tags are supported:
697
+ * - \c inline : emit a remark when a function is inlined.
698
+ * - \c --display-error-number (\c -err-no)\n
699
+ * Display diagnostic number for warning messages. (Default)
700
+ * - \c --no-display-error-number (\c -no-err-no)\n
701
+ * Disables the display of a diagnostic number for warning messages.
702
+ * - \c --diag-error=<error-number>,... (\c -diag-error)\n
703
+ * Emit error for specified diagnostic message number(s). Message numbers can be separated by comma.
704
+ * - \c --diag-suppress=<error-number>,... (\c -diag-suppress)\n
705
+ * Suppress specified diagnostic message number(s). Message numbers can be separated by comma.
706
+ * - \c --diag-warn=<error-number>,... (\c -diag-warn)\n
707
+ * Emit warning for specified diagnostic message number(s). Message numbers can be separated by comma.
708
+ * - \c --brief-diagnostics={true|false} (\c -brief-diag)\n
709
+ * This option disables or enables showing source line and column info
710
+ * in a diagnostic.
711
+ * The --brief-diagnostics=true will not show the source line and column info.
712
+ * - Default: \c false
713
+ * - \c --time=<file-name> (\c -time)\n
714
+ * Generate a comma separated value table with the time taken by each compilation
715
+ * phase, and append it at the end of the file given as the option argument.
716
+ * If the file does not exist, the column headings are generated in the first row
717
+ * of the table. If the file name is '-', the timing data is written to the compilation log.
718
+ * - \c --split-compile=<number of threads> (\c -split-compile=<number of threads>)\n
719
+ * Perform compiler optimizations in parallel.
720
+ * Split compilation attempts to reduce compile time by enabling the compiler to run certain
721
+ * optimization passes concurrently. This option accepts a numerical value that specifies the
722
+ * maximum number of threads the compiler can use. One can also allow the compiler to use the maximum
723
+ * threads available on the system by setting --split-compile=0.
724
+ * Setting --split-compile=1 will cause this option to be ignored.
725
+ * - \c --fdevice-syntax-only (\c -fdevice-syntax-only)\n
726
+ * Ends device compilation after front-end syntax checking. This option does not generate valid
727
+ * device code.
728
+ * - \c --minimal (\c -minimal)\n
729
+ * Omit certain language features to reduce compile time for small programs.
730
+ * In particular, the following are omitted:
731
+ * - Texture and surface functions and associated types, e.g., \c cudaTextureObject_t.
732
+ * - CUDA Runtime Functions that are provided by the cudadevrt device code library,
733
+ * typically named with prefix "cuda", e.g., \c cudaMalloc.
734
+ * - Kernel launch from device code.
735
+ * - Types and macros associated with CUDA Runtime and Driver APIs,
736
+ * provided by cuda/tools/cudart/driver_types.h, typically named with prefix "cuda", e.g., \c cudaError_t.
737
+ * - \c --device-stack-protector (\c -device-stack-protector)\n
738
+ * Enable stack canaries in device code.
739
+ * Stack canaries make it more difficult to exploit certain types of memory safety bugs involving stack-local variables.
740
+ * The compiler uses heuristics to assess the risk of such a bug in each function. Only those functions which are deemed high-risk make use of a stack canary.
741
+ *
742
+ */
743
+
744
+ #ifdef __cplusplus
745
+ }
746
+ #endif /* __cplusplus */
747
+
748
+
749
+ /* The utility function 'nvrtcGetTypeName' is not available by default. Define
750
+ the macro 'NVRTC_GET_TYPE_NAME' to a non-zero value to make it available.
751
+ */
752
+
753
+ #if NVRTC_GET_TYPE_NAME || __DOXYGEN_ONLY__
754
+
755
+ #if NVRTC_USE_CXXABI || __clang__ || __GNUC__ || __DOXYGEN_ONLY__
756
+ #include <cxxabi.h>
757
+ #include <cstdlib>
758
+
759
+ #elif defined(_WIN32)
760
+ #include <Windows.h>
761
+ #include <DbgHelp.h>
762
+ #endif /* NVRTC_USE_CXXABI || __clang__ || __GNUC__ */
763
+
764
+
765
+ #include <string>
766
+ #include <typeinfo>
767
+
768
+ template <typename T> struct __nvrtcGetTypeName_helper_t { };
769
+
770
+ /*************************************************************************//**
771
+ *
772
+ * \defgroup hosthelper Host Helper
773
+ *
774
+ * NVRTC defines the following functions for easier interaction with host code.
775
+ *
776
+ ****************************************************************************/
777
+
778
+ /**
779
+ * \ingroup hosthelper
780
+ * \brief nvrtcGetTypeName stores the source level name of a type in the given
781
+ * std::string location.
782
+ *
783
+ * This function is only provided when the macro NVRTC_GET_TYPE_NAME is
784
+ * defined with a non-zero value. It uses abi::__cxa_demangle or UnDecorateSymbolName
785
+ * function calls to extract the type name, when using gcc/clang or cl.exe compilers,
786
+ * respectively. If the name extraction fails, it will return NVRTC_INTERNAL_ERROR,
787
+ * otherwise *result is initialized with the extracted name.
788
+ *
789
+ * Windows-specific notes:
790
+ * - nvrtcGetTypeName() is not multi-thread safe because it calls UnDecorateSymbolName(),
791
+ * which is not multi-thread safe.
792
+ * - The returned string may contain Microsoft-specific keywords such as __ptr64 and __cdecl.
793
+ *
794
+ * \param [in] tinfo: reference to object of type std::type_info for a given type.
795
+ * \param [in] result: pointer to std::string in which to store the type name.
796
+ * \return
797
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
798
+ * - \link #nvrtcResult NVRTC_ERROR_INTERNAL_ERROR \endlink
799
+ *
800
+ */
801
+ inline nvrtcResult nvrtcGetTypeName(const std::type_info &tinfo, std::string *result)
802
+ {
803
+ #if USE_CXXABI || __clang__ || __GNUC__
804
+ const char *name = tinfo.name();
805
+ int status;
806
+ char *undecorated_name = abi::__cxa_demangle(name, 0, 0, &status);
807
+ if (status == 0) {
808
+ *result = undecorated_name;
809
+ free(undecorated_name);
810
+ return NVRTC_SUCCESS;
811
+ }
812
+ #elif defined(_WIN32)
813
+ const char *name = tinfo.raw_name();
814
+ if (!name || *name != '.') {
815
+ return NVRTC_ERROR_INTERNAL_ERROR;
816
+ }
817
+ char undecorated_name[4096];
818
+ //name+1 skips over the '.' prefix
819
+ if(UnDecorateSymbolName(name+1, undecorated_name,
820
+ sizeof(undecorated_name) / sizeof(*undecorated_name),
821
+ //note: doesn't seem to work correctly without UNDNAME_NO_ARGUMENTS.
822
+ UNDNAME_NO_ARGUMENTS | UNDNAME_NAME_ONLY ) ) {
823
+ *result = undecorated_name;
824
+ return NVRTC_SUCCESS;
825
+ }
826
+ #endif /* USE_CXXABI || __clang__ || __GNUC__ */
827
+
828
+ return NVRTC_ERROR_INTERNAL_ERROR;
829
+ }
830
+
831
+ /**
832
+ * \ingroup hosthelper
833
+ * \brief nvrtcGetTypeName stores the source level name of the template type argument
834
+ * T in the given std::string location.
835
+ *
836
+ * This function is only provided when the macro NVRTC_GET_TYPE_NAME is
837
+ * defined with a non-zero value. It uses abi::__cxa_demangle or UnDecorateSymbolName
838
+ * function calls to extract the type name, when using gcc/clang or cl.exe compilers,
839
+ * respectively. If the name extraction fails, it will return NVRTC_INTERNAL_ERROR,
840
+ * otherwise *result is initialized with the extracted name.
841
+ *
842
+ * Windows-specific notes:
843
+ * - nvrtcGetTypeName() is not multi-thread safe because it calls UnDecorateSymbolName(),
844
+ * which is not multi-thread safe.
845
+ * - The returned string may contain Microsoft-specific keywords such as __ptr64 and __cdecl.
846
+ *
847
+ * \param [in] result: pointer to std::string in which to store the type name.
848
+ * \return
849
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
850
+ * - \link #nvrtcResult NVRTC_ERROR_INTERNAL_ERROR \endlink
851
+ *
852
+ */
853
+
854
+ template <typename T>
855
+ nvrtcResult nvrtcGetTypeName(std::string *result)
856
+ {
857
+ nvrtcResult res = nvrtcGetTypeName(typeid(__nvrtcGetTypeName_helper_t<T>),
858
+ result);
859
+ if (res != NVRTC_SUCCESS)
860
+ return res;
861
+
862
+ std::string repr = *result;
863
+ std::size_t idx = repr.find("__nvrtcGetTypeName_helper_t");
864
+ idx = (idx != std::string::npos) ? repr.find("<", idx) : idx;
865
+ std::size_t last_idx = repr.find_last_of('>');
866
+ if (idx == std::string::npos || last_idx == std::string::npos) {
867
+ return NVRTC_ERROR_INTERNAL_ERROR;
868
+ }
869
+ ++idx;
870
+ *result = repr.substr(idx, last_idx - idx);
871
+ return NVRTC_SUCCESS;
872
+ }
873
+
874
+ #endif /* NVRTC_GET_TYPE_NAME */
875
+
876
+ #endif /* __NVRTC_H__ */
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/__init__.py ADDED
File without changes
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (191 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.6 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1d5fce3e3ef04a7b5e6604c0bb17f40d39f9965a3f6d0b2b7a2bc308e189ed0
3
+ size 5322632
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.12 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ced830bff406b5555298cc1eacc8670b6bb6b9378e6e5783fac53ddcf7cbbf5b
3
+ size 58726320
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (189 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/__init__.py ADDED
File without changes
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (197 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/builtin_types.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*******************************************************************************
51
+ * *
52
+ * *
53
+ * *
54
+ *******************************************************************************/
55
+
56
+ #include "device_types.h"
57
+ #if !defined(__CUDACC_RTC__)
58
+ #define EXCLUDE_FROM_RTC
59
+ #include "driver_types.h"
60
+ #undef EXCLUDE_FROM_RTC
61
+ #endif /* !__CUDACC_RTC__ */
62
+ #include "surface_types.h"
63
+ #include "texture_types.h"
64
+ #include "vector_types.h"
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h ADDED
@@ -0,0 +1,588 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CHANNEL_DESCRIPTOR_H__)
51
+ #define __CHANNEL_DESCRIPTOR_H__
52
+
53
+ #if defined(__cplusplus)
54
+
55
+ /*******************************************************************************
56
+ * *
57
+ * *
58
+ * *
59
+ *******************************************************************************/
60
+
61
+ #include "cuda_runtime_api.h"
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ /**
70
+ * \addtogroup CUDART_HIGHLEVEL
71
+ *
72
+ * @{
73
+ */
74
+
75
+ /**
76
+ * \brief \hl Returns a channel descriptor using the specified format
77
+ *
78
+ * Returns a channel descriptor with format \p f and number of bits of each
79
+ * component \p x, \p y, \p z, and \p w. The ::cudaChannelFormatDesc is
80
+ * defined as:
81
+ * \code
82
+ struct cudaChannelFormatDesc {
83
+ int x, y, z, w;
84
+ enum cudaChannelFormatKind f;
85
+ };
86
+ * \endcode
87
+ *
88
+ * where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned,
89
+ * ::cudaChannelFormatKindUnsigned, cudaChannelFormatKindFloat,
90
+ * ::cudaChannelFormatKindSignedNormalized8X1, ::cudaChannelFormatKindSignedNormalized8X2,
91
+ * ::cudaChannelFormatKindSignedNormalized8X4,
92
+ * ::cudaChannelFormatKindUnsignedNormalized8X1, ::cudaChannelFormatKindUnsignedNormalized8X2,
93
+ * ::cudaChannelFormatKindUnsignedNormalized8X4,
94
+ * ::cudaChannelFormatKindSignedNormalized16X1, ::cudaChannelFormatKindSignedNormalized16X2,
95
+ * ::cudaChannelFormatKindSignedNormalized16X4,
96
+ * ::cudaChannelFormatKindUnsignedNormalized16X1, ::cudaChannelFormatKindUnsignedNormalized16X2,
97
+ * ::cudaChannelFormatKindUnsignedNormalized16X4
98
+ * or ::cudaChannelFormatKindNV12.
99
+ *
100
+ * The format is specified by the template specialization.
101
+ *
102
+ * The template function specializes for the following scalar types:
103
+ * char, signed char, unsigned char, short, unsigned short, int, unsigned int, long, unsigned long, and float.
104
+ * The template function specializes for the following vector types:
105
+ * char{1|2|4}, uchar{1|2|4}, short{1|2|4}, ushort{1|2|4}, int{1|2|4}, uint{1|2|4}, long{1|2|4}, ulong{1|2|4}, float{1|2|4}.
106
+ * The template function specializes for following cudaChannelFormatKind enum values:
107
+ * ::cudaChannelFormatKind{Uns|S}ignedNormalized{8|16}X{1|2|4}, and ::cudaChannelFormatKindNV12.
108
+ *
109
+ * Invoking the function on a type without a specialization defaults to creating a channel format of kind ::cudaChannelFormatKindNone
110
+ *
111
+ * \return
112
+ * Channel descriptor with format \p f
113
+ *
114
+ * \sa \ref ::cudaCreateChannelDesc(int,int,int,int,cudaChannelFormatKind) "cudaCreateChannelDesc (Low level)",
115
+ * ::cudaGetChannelDesc,
116
+ */
117
+ template<class T> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void)
118
+ {
119
+ return cudaCreateChannelDesc(0, 0, 0, 0, cudaChannelFormatKindNone);
120
+ }
121
+
122
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf(void)
123
+ {
124
+ int e = (int)sizeof(unsigned short) * 8;
125
+
126
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
127
+ }
128
+
129
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf1(void)
130
+ {
131
+ int e = (int)sizeof(unsigned short) * 8;
132
+
133
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
134
+ }
135
+
136
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf2(void)
137
+ {
138
+ int e = (int)sizeof(unsigned short) * 8;
139
+
140
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindFloat);
141
+ }
142
+
143
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf4(void)
144
+ {
145
+ int e = (int)sizeof(unsigned short) * 8;
146
+
147
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindFloat);
148
+ }
149
+
150
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char>(void)
151
+ {
152
+ int e = (int)sizeof(char) * 8;
153
+
154
+ #if defined(_CHAR_UNSIGNED) || defined(__CHAR_UNSIGNED__)
155
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
156
+ #else /* _CHAR_UNSIGNED || __CHAR_UNSIGNED__ */
157
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
158
+ #endif /* _CHAR_UNSIGNED || __CHAR_UNSIGNED__ */
159
+ }
160
+
161
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<signed char>(void)
162
+ {
163
+ int e = (int)sizeof(signed char) * 8;
164
+
165
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
166
+ }
167
+
168
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned char>(void)
169
+ {
170
+ int e = (int)sizeof(unsigned char) * 8;
171
+
172
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
173
+ }
174
+
175
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char1>(void)
176
+ {
177
+ int e = (int)sizeof(signed char) * 8;
178
+
179
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
180
+ }
181
+
182
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uchar1>(void)
183
+ {
184
+ int e = (int)sizeof(unsigned char) * 8;
185
+
186
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
187
+ }
188
+
189
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char2>(void)
190
+ {
191
+ int e = (int)sizeof(signed char) * 8;
192
+
193
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
194
+ }
195
+
196
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uchar2>(void)
197
+ {
198
+ int e = (int)sizeof(unsigned char) * 8;
199
+
200
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
201
+ }
202
+
203
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char4>(void)
204
+ {
205
+ int e = (int)sizeof(signed char) * 8;
206
+
207
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
208
+ }
209
+
210
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uchar4>(void)
211
+ {
212
+ int e = (int)sizeof(unsigned char) * 8;
213
+
214
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
215
+ }
216
+
217
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short>(void)
218
+ {
219
+ int e = (int)sizeof(short) * 8;
220
+
221
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
222
+ }
223
+
224
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned short>(void)
225
+ {
226
+ int e = (int)sizeof(unsigned short) * 8;
227
+
228
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
229
+ }
230
+
231
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short1>(void)
232
+ {
233
+ int e = (int)sizeof(short) * 8;
234
+
235
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
236
+ }
237
+
238
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ushort1>(void)
239
+ {
240
+ int e = (int)sizeof(unsigned short) * 8;
241
+
242
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
243
+ }
244
+
245
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short2>(void)
246
+ {
247
+ int e = (int)sizeof(short) * 8;
248
+
249
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
250
+ }
251
+
252
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ushort2>(void)
253
+ {
254
+ int e = (int)sizeof(unsigned short) * 8;
255
+
256
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
257
+ }
258
+
259
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short4>(void)
260
+ {
261
+ int e = (int)sizeof(short) * 8;
262
+
263
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
264
+ }
265
+
266
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ushort4>(void)
267
+ {
268
+ int e = (int)sizeof(unsigned short) * 8;
269
+
270
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
271
+ }
272
+
273
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int>(void)
274
+ {
275
+ int e = (int)sizeof(int) * 8;
276
+
277
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
278
+ }
279
+
280
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned int>(void)
281
+ {
282
+ int e = (int)sizeof(unsigned int) * 8;
283
+
284
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
285
+ }
286
+
287
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int1>(void)
288
+ {
289
+ int e = (int)sizeof(int) * 8;
290
+
291
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
292
+ }
293
+
294
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uint1>(void)
295
+ {
296
+ int e = (int)sizeof(unsigned int) * 8;
297
+
298
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
299
+ }
300
+
301
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int2>(void)
302
+ {
303
+ int e = (int)sizeof(int) * 8;
304
+
305
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
306
+ }
307
+
308
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uint2>(void)
309
+ {
310
+ int e = (int)sizeof(unsigned int) * 8;
311
+
312
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
313
+ }
314
+
315
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int4>(void)
316
+ {
317
+ int e = (int)sizeof(int) * 8;
318
+
319
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
320
+ }
321
+
322
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uint4>(void)
323
+ {
324
+ int e = (int)sizeof(unsigned int) * 8;
325
+
326
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
327
+ }
328
+
329
+ #if !defined(__LP64__)
330
+
331
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long>(void)
332
+ {
333
+ int e = (int)sizeof(long) * 8;
334
+
335
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
336
+ }
337
+
338
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned long>(void)
339
+ {
340
+ int e = (int)sizeof(unsigned long) * 8;
341
+
342
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
343
+ }
344
+
345
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long1>(void)
346
+ {
347
+ int e = (int)sizeof(long) * 8;
348
+
349
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
350
+ }
351
+
352
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ulong1>(void)
353
+ {
354
+ int e = (int)sizeof(unsigned long) * 8;
355
+
356
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
357
+ }
358
+
359
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long2>(void)
360
+ {
361
+ int e = (int)sizeof(long) * 8;
362
+
363
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
364
+ }
365
+
366
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ulong2>(void)
367
+ {
368
+ int e = (int)sizeof(unsigned long) * 8;
369
+
370
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
371
+ }
372
+
373
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long4>(void)
374
+ {
375
+ int e = (int)sizeof(long) * 8;
376
+
377
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
378
+ }
379
+
380
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ulong4>(void)
381
+ {
382
+ int e = (int)sizeof(unsigned long) * 8;
383
+
384
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
385
+ }
386
+
387
+ #endif /* !__LP64__ */
388
+
389
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float>(void)
390
+ {
391
+ int e = (int)sizeof(float) * 8;
392
+
393
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
394
+ }
395
+
396
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float1>(void)
397
+ {
398
+ int e = (int)sizeof(float) * 8;
399
+
400
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
401
+ }
402
+
403
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float2>(void)
404
+ {
405
+ int e = (int)sizeof(float) * 8;
406
+
407
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindFloat);
408
+ }
409
+
410
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float4>(void)
411
+ {
412
+ int e = (int)sizeof(float) * 8;
413
+
414
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindFloat);
415
+ }
416
+
417
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescNV12(void)
418
+ {
419
+ int e = (int)sizeof(char) * 8;
420
+
421
+ return cudaCreateChannelDesc(e, e, e, 0, cudaChannelFormatKindNV12);
422
+ }
423
+
424
+ template<cudaChannelFormatKind> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void)
425
+ {
426
+ return cudaCreateChannelDesc(0, 0, 0, 0, cudaChannelFormatKindNone);
427
+ }
428
+
429
+ /* Signed 8-bit normalized integer formats */
430
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized8X1>(void)
431
+ {
432
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSignedNormalized8X1);
433
+ }
434
+
435
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized8X2>(void)
436
+ {
437
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindSignedNormalized8X2);
438
+ }
439
+
440
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized8X4>(void)
441
+ {
442
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindSignedNormalized8X4);
443
+ }
444
+
445
+ /* Unsigned 8-bit normalized integer formats */
446
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized8X1>(void)
447
+ {
448
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsignedNormalized8X1);
449
+ }
450
+
451
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized8X2>(void)
452
+ {
453
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindUnsignedNormalized8X2);
454
+ }
455
+
456
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized8X4>(void)
457
+ {
458
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedNormalized8X4);
459
+ }
460
+
461
+ /* Signed 16-bit normalized integer formats */
462
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized16X1>(void)
463
+ {
464
+ return cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindSignedNormalized16X1);
465
+ }
466
+
467
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized16X2>(void)
468
+ {
469
+ return cudaCreateChannelDesc(16, 16, 0, 0, cudaChannelFormatKindSignedNormalized16X2);
470
+ }
471
+
472
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized16X4>(void)
473
+ {
474
+ return cudaCreateChannelDesc(16, 16, 16, 16, cudaChannelFormatKindSignedNormalized16X4);
475
+ }
476
+
477
+ /* Unsigned 16-bit normalized integer formats */
478
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized16X1>(void)
479
+ {
480
+ return cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindUnsignedNormalized16X1);
481
+ }
482
+
483
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized16X2>(void)
484
+ {
485
+ return cudaCreateChannelDesc(16, 16, 0, 0, cudaChannelFormatKindUnsignedNormalized16X2);
486
+ }
487
+
488
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized16X4>(void)
489
+ {
490
+ return cudaCreateChannelDesc(16, 16, 16, 16, cudaChannelFormatKindUnsignedNormalized16X4);
491
+ }
492
+
493
+ /* NV12 format */
494
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindNV12>(void)
495
+ {
496
+ return cudaCreateChannelDesc(8, 8, 8, 0, cudaChannelFormatKindNV12);
497
+ }
498
+
499
+ /* BC1 format */
500
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed1>(void)
501
+ {
502
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed1);
503
+ }
504
+
505
+ /* BC1sRGB format */
506
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed1SRGB>(void)
507
+ {
508
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed1SRGB);
509
+ }
510
+
511
+ /* BC2 format */
512
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed2>(void)
513
+ {
514
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed2);
515
+ }
516
+
517
+ /* BC2sRGB format */
518
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed2SRGB>(void)
519
+ {
520
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed2SRGB);
521
+ }
522
+
523
+ /* BC3 format */
524
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed3>(void)
525
+ {
526
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed3);
527
+ }
528
+
529
+ /* BC3sRGB format */
530
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed3SRGB>(void)
531
+ {
532
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed3SRGB);
533
+ }
534
+
535
+ /* BC4 unsigned format */
536
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed4>(void)
537
+ {
538
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsignedBlockCompressed4);
539
+ }
540
+
541
+ /* BC4 signed format */
542
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedBlockCompressed4>(void)
543
+ {
544
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSignedBlockCompressed4);
545
+ }
546
+
547
+ /* BC5 unsigned format */
548
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed5>(void)
549
+ {
550
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindUnsignedBlockCompressed5);
551
+ }
552
+
553
+ /* BC5 signed format */
554
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedBlockCompressed5>(void)
555
+ {
556
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindSignedBlockCompressed5);
557
+ }
558
+
559
+ /* BC6H unsigned format */
560
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed6H>(void)
561
+ {
562
+ return cudaCreateChannelDesc(16, 16, 16, 0, cudaChannelFormatKindUnsignedBlockCompressed6H);
563
+ }
564
+
565
+ /* BC6H signed format */
566
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedBlockCompressed6H>(void)
567
+ {
568
+ return cudaCreateChannelDesc(16, 16, 16, 0, cudaChannelFormatKindSignedBlockCompressed6H);
569
+ }
570
+
571
+ /* BC7 format */
572
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed7>(void)
573
+ {
574
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed7);
575
+ }
576
+
577
+ /* BC7sRGB format */
578
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed7SRGB>(void)
579
+ {
580
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed7SRGB);
581
+ }
582
+
583
+ #endif /* __cplusplus */
584
+
585
+ /** @} */
586
+ /** @} */ /* END CUDART_TEXTURE_HL */
587
+
588
+ #endif /* !__CHANNEL_DESCRIPTOR_H__ */
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/common_functions.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("common_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "common_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__
58
+ #endif
59
+
60
+ #include "crt/common_functions.h"
61
+
62
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__)
63
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
64
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__
65
+ #endif
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h ADDED
@@ -0,0 +1,1743 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _COOPERATIVE_GROUPS_H_
51
+ #define _COOPERATIVE_GROUPS_H_
52
+
53
+ #if defined(__cplusplus) && defined(__CUDACC__)
54
+
55
+ #include "cooperative_groups/details/info.h"
56
+ #include "cooperative_groups/details/driver_abi.h"
57
+ #include "cooperative_groups/details/helpers.h"
58
+ #include "cooperative_groups/details/memory.h"
59
+
60
+ #if defined(_CG_HAS_STL_ATOMICS)
61
+ #include <cuda/atomic>
62
+ #define _CG_THREAD_SCOPE(scope) _CG_STATIC_CONST_DECL cuda::thread_scope thread_scope = scope;
63
+ #else
64
+ #define _CG_THREAD_SCOPE(scope)
65
+ #endif
66
+
67
+ _CG_BEGIN_NAMESPACE
68
+
69
+ namespace details {
70
+ _CG_CONST_DECL unsigned int coalesced_group_id = 1;
71
+ _CG_CONST_DECL unsigned int multi_grid_group_id = 2;
72
+ _CG_CONST_DECL unsigned int grid_group_id = 3;
73
+ _CG_CONST_DECL unsigned int thread_block_id = 4;
74
+ _CG_CONST_DECL unsigned int multi_tile_group_id = 5;
75
+ _CG_CONST_DECL unsigned int cluster_group_id = 6;
76
+ }
77
+
78
+ /**
79
+ * class thread_group;
80
+ *
81
+ * Generic thread group type, into which all groups are convertible.
82
+ * It acts as a container for all storage necessary for the derived groups,
83
+ * and will dispatch the API calls to the correct derived group. This means
84
+ * that all derived groups must implement the same interface as thread_group.
85
+ */
86
+ class thread_group
87
+ {
88
+ protected:
89
+ struct group_data {
90
+ unsigned int _unused : 1;
91
+ unsigned int type : 7, : 0;
92
+ };
93
+
94
+ struct gg_data {
95
+ details::grid_workspace *gridWs;
96
+ };
97
+
98
+ #if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
99
+ struct mg_data {
100
+ unsigned long long _unused : 1;
101
+ unsigned long long type : 7;
102
+ unsigned long long handle : 56;
103
+ const details::multi_grid::multi_grid_functions *functions;
104
+ };
105
+ #endif
106
+
107
+ struct tg_data {
108
+ unsigned int is_tiled : 1;
109
+ unsigned int type : 7;
110
+ unsigned int size : 24;
111
+ // packed to 4b
112
+ unsigned int metaGroupSize : 16;
113
+ unsigned int metaGroupRank : 16;
114
+ // packed to 8b
115
+ unsigned int mask;
116
+ // packed to 12b
117
+ unsigned int _res;
118
+ };
119
+
120
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
121
+ friend class thread_block;
122
+
123
+ union __align__(8) {
124
+ group_data group;
125
+ tg_data coalesced;
126
+ gg_data grid;
127
+ #if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
128
+ mg_data multi_grid;
129
+ #endif
130
+ } _data;
131
+
132
+ _CG_QUALIFIER thread_group operator=(const thread_group& src);
133
+
134
+ _CG_QUALIFIER thread_group(unsigned int type) {
135
+ _data.group.type = type;
136
+ _data.group._unused = false;
137
+ }
138
+
139
+ #ifdef _CG_CPP11_FEATURES
140
+ static_assert(sizeof(tg_data) <= 16, "Failed size check");
141
+ static_assert(sizeof(gg_data) <= 16, "Failed size check");
142
+ # ifdef _CG_ABI_EXPERIMENTAL
143
+ static_assert(sizeof(mg_data) <= 16, "Failed size check");
144
+ # endif
145
+ #endif
146
+
147
+ public:
148
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device)
149
+
150
+ _CG_QUALIFIER unsigned long long size() const;
151
+ _CG_QUALIFIER unsigned long long num_threads() const;
152
+ _CG_QUALIFIER unsigned long long thread_rank() const;
153
+ _CG_QUALIFIER void sync() const;
154
+ _CG_QUALIFIER unsigned int get_type() const {
155
+ return _data.group.type;
156
+ }
157
+
158
+ };
159
+
160
+ template <unsigned int TyId>
161
+ struct thread_group_base : public thread_group {
162
+ _CG_QUALIFIER thread_group_base() : thread_group(TyId) {}
163
+ _CG_STATIC_CONST_DECL unsigned int id = TyId;
164
+ };
165
+
166
+ #if defined(_CG_HAS_MULTI_GRID_GROUP)
167
+
168
+ /**
169
+ * class multi_grid_group;
170
+ *
171
+ * Threads within this this group are guaranteed to be co-resident on the
172
+ * same system, on multiple devices within the same launched kernels.
173
+ * To use this group, the kernel must have been launched with
174
+ * cuLaunchCooperativeKernelMultiDevice (or the CUDA Runtime equivalent),
175
+ * and the device must support it (queryable device attribute).
176
+ *
177
+ * Constructed via this_multi_grid();
178
+ */
179
+
180
+
181
+ # if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
182
+ class multi_grid_group;
183
+
184
+ // Multi grid group requires these functions to be templated to prevent ptxas from trying to use CG syscalls
185
+ template <typename = void>
186
+ __device__ _CG_DEPRECATED multi_grid_group this_multi_grid();
187
+
188
+ class multi_grid_group : public thread_group_base<details::multi_grid_group_id>
189
+ {
190
+ private:
191
+ template <typename = void>
192
+ _CG_QUALIFIER multi_grid_group() {
193
+ _data.multi_grid.functions = details::multi_grid::load_grid_intrinsics();
194
+ _data.multi_grid.handle = _data.multi_grid.functions->get_intrinsic_handle();
195
+ }
196
+
197
+ friend multi_grid_group this_multi_grid<void>();
198
+
199
+ public:
200
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system)
201
+
202
+ _CG_QUALIFIER bool is_valid() const {
203
+ return (_data.multi_grid.handle != 0);
204
+ }
205
+
206
+ _CG_QUALIFIER void sync() const {
207
+ if (!is_valid()) {
208
+ _CG_ABORT();
209
+ }
210
+ _data.multi_grid.functions->sync(_data.multi_grid.handle);
211
+ }
212
+
213
+ _CG_QUALIFIER unsigned long long num_threads() const {
214
+ _CG_ASSERT(is_valid());
215
+ return _data.multi_grid.functions->size(_data.multi_grid.handle);
216
+ }
217
+
218
+ _CG_QUALIFIER unsigned long long size() const {
219
+ return num_threads();
220
+ }
221
+
222
+ _CG_QUALIFIER unsigned long long thread_rank() const {
223
+ _CG_ASSERT(is_valid());
224
+ return _data.multi_grid.functions->thread_rank(_data.multi_grid.handle);
225
+ }
226
+
227
+ _CG_QUALIFIER unsigned int grid_rank() const {
228
+ _CG_ASSERT(is_valid());
229
+ return (_data.multi_grid.functions->grid_rank(_data.multi_grid.handle));
230
+ }
231
+
232
+ _CG_QUALIFIER unsigned int num_grids() const {
233
+ _CG_ASSERT(is_valid());
234
+ return (_data.multi_grid.functions->num_grids(_data.multi_grid.handle));
235
+ }
236
+ };
237
+ # else
238
+ class multi_grid_group
239
+ {
240
+ private:
241
+ unsigned long long _handle;
242
+ unsigned int _size;
243
+ unsigned int _rank;
244
+
245
+ friend _CG_QUALIFIER multi_grid_group this_multi_grid();
246
+
247
+ _CG_QUALIFIER multi_grid_group() {
248
+ _handle = details::multi_grid::get_intrinsic_handle();
249
+ _size = details::multi_grid::size(_handle);
250
+ _rank = details::multi_grid::thread_rank(_handle);
251
+ }
252
+
253
+ public:
254
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system)
255
+
256
+ _CG_QUALIFIER _CG_DEPRECATED bool is_valid() const {
257
+ return (_handle != 0);
258
+ }
259
+
260
+ _CG_QUALIFIER _CG_DEPRECATED void sync() const {
261
+ if (!is_valid()) {
262
+ _CG_ABORT();
263
+ }
264
+ details::multi_grid::sync(_handle);
265
+ }
266
+
267
+ _CG_QUALIFIER _CG_DEPRECATED unsigned long long num_threads() const {
268
+ _CG_ASSERT(is_valid());
269
+ return _size;
270
+ }
271
+
272
+ _CG_QUALIFIER _CG_DEPRECATED unsigned long long size() const {
273
+ return num_threads();
274
+ }
275
+
276
+ _CG_QUALIFIER _CG_DEPRECATED unsigned long long thread_rank() const {
277
+ _CG_ASSERT(is_valid());
278
+ return _rank;
279
+ }
280
+
281
+ _CG_QUALIFIER _CG_DEPRECATED unsigned int grid_rank() const {
282
+ _CG_ASSERT(is_valid());
283
+ return (details::multi_grid::grid_rank(_handle));
284
+ }
285
+
286
+ _CG_QUALIFIER _CG_DEPRECATED unsigned int num_grids() const {
287
+ _CG_ASSERT(is_valid());
288
+ return (details::multi_grid::num_grids(_handle));
289
+ }
290
+ };
291
+ # endif
292
+
293
+ /**
294
+ * multi_grid_group this_multi_grid()
295
+ *
296
+ * Constructs a multi_grid_group
297
+ */
298
+ # if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
299
+ template <typename>
300
+ __device__
301
+ #else
302
+ _CG_QUALIFIER
303
+ # endif
304
+ _CG_DEPRECATED
305
+ multi_grid_group this_multi_grid()
306
+ {
307
+ return multi_grid_group();
308
+ }
309
+ #endif
310
+
311
+ /**
312
+ * class grid_group;
313
+ *
314
+ * Threads within this this group are guaranteed to be co-resident on the
315
+ * same device within the same launched kernel. To use this group, the kernel
316
+ * must have been launched with cuLaunchCooperativeKernel (or the CUDA Runtime equivalent),
317
+ * and the device must support it (queryable device attribute).
318
+ *
319
+ * Constructed via this_grid();
320
+ */
321
+ class grid_group : public thread_group_base<details::grid_group_id>
322
+ {
323
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::grid_group_id;
324
+ friend _CG_QUALIFIER grid_group this_grid();
325
+
326
+ private:
327
+ _CG_QUALIFIER grid_group(details::grid_workspace *gridWs) {
328
+ _data.grid.gridWs = gridWs;
329
+ }
330
+
331
+ public:
332
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device)
333
+
334
+ _CG_QUALIFIER bool is_valid() const {
335
+ return (_data.grid.gridWs != NULL);
336
+ }
337
+
338
+ _CG_QUALIFIER void sync() const {
339
+ if (!is_valid()) {
340
+ _CG_ABORT();
341
+ }
342
+ details::grid::sync(&_data.grid.gridWs->barrier);
343
+ }
344
+
345
+ #if defined(_CG_CPP11_FEATURES)
346
+ using arrival_token = unsigned int;
347
+
348
+ _CG_QUALIFIER arrival_token barrier_arrive() const {
349
+ if (!is_valid()) {
350
+ _CG_ABORT();
351
+ }
352
+ return details::grid::barrier_arrive(&_data.grid.gridWs->barrier);
353
+ }
354
+
355
+ _CG_QUALIFIER void barrier_wait(arrival_token&& token) const {
356
+ details::grid::barrier_wait(token, &_data.grid.gridWs->barrier);
357
+ }
358
+ #endif
359
+
360
+ _CG_STATIC_QUALIFIER unsigned long long size() {
361
+ return details::grid::size();
362
+ }
363
+
364
+ _CG_STATIC_QUALIFIER dim3 group_dim() {
365
+ return details::grid::grid_dim();
366
+ }
367
+
368
+ _CG_STATIC_QUALIFIER dim3 dim_threads() {
369
+ return details::grid::dim_threads();
370
+ }
371
+
372
+ _CG_STATIC_QUALIFIER unsigned long long num_threads() {
373
+ return details::grid::num_threads();
374
+ }
375
+
376
+ _CG_STATIC_QUALIFIER dim3 thread_index() {
377
+ return details::grid::thread_index();
378
+ }
379
+
380
+ _CG_STATIC_QUALIFIER unsigned long long thread_rank() {
381
+ return details::grid::thread_rank();
382
+ }
383
+
384
+ _CG_STATIC_QUALIFIER dim3 dim_blocks() {
385
+ return details::grid::dim_blocks();
386
+ }
387
+
388
+ _CG_STATIC_QUALIFIER unsigned long long num_blocks() {
389
+ return details::grid::num_blocks();
390
+ }
391
+
392
+ _CG_STATIC_QUALIFIER dim3 block_index() {
393
+ return details::grid::block_index();
394
+ }
395
+
396
+ _CG_STATIC_QUALIFIER unsigned long long block_rank() {
397
+ return details::grid::block_rank();
398
+ }
399
+
400
+ # if defined(_CG_HAS_CLUSTER_GROUP)
401
+ _CG_STATIC_QUALIFIER dim3 dim_clusters() {
402
+ return details::grid::dim_clusters();
403
+ }
404
+
405
+ _CG_STATIC_QUALIFIER unsigned long long num_clusters() {
406
+ return details::grid::num_clusters();
407
+ }
408
+
409
+ _CG_STATIC_QUALIFIER dim3 cluster_index() {
410
+ return details::grid::cluster_index();
411
+ }
412
+
413
+ _CG_STATIC_QUALIFIER unsigned long long cluster_rank() {
414
+ return details::grid::cluster_rank();
415
+ }
416
+ # endif
417
+ };
418
+
419
+ _CG_QUALIFIER grid_group this_grid() {
420
+ // Load a workspace from the driver
421
+ grid_group gg(details::get_grid_workspace());
422
+ #ifdef _CG_DEBUG
423
+ // *all* threads must be available to synchronize
424
+ gg.sync();
425
+ #endif // _CG_DEBUG
426
+ return gg;
427
+ }
428
+
429
+ #if defined(_CG_HAS_CLUSTER_GROUP)
430
+ /**
431
+ * class cluster_group
432
+ *
433
+ * Every GPU kernel is executed by a grid of thread blocks. A grid can be evenly
434
+ * divided along all dimensions to form groups of blocks, each group of which is
435
+ * a block cluster. Clustered grids are subject to various restrictions and
436
+ * limitations. Primarily, a cluster consists of at most 8 blocks by default
437
+ * (although the user is allowed to opt-in to non-standard sizes,) and clustered
438
+ * grids are subject to additional occupancy limitations due to per-cluster
439
+ * hardware resource consumption. In exchange, a block cluster is guaranteed to
440
+ * be a cooperative group, with access to all cooperative group capabilities, as
441
+ * well as cluster specific capabilities and accelerations. A cluster_group
442
+ * represents a block cluster.
443
+ *
444
+ * Constructed via this_cluster_group();
445
+ */
446
+ class cluster_group : public thread_group_base<details::cluster_group_id>
447
+ {
448
+ // Friends
449
+ friend _CG_QUALIFIER cluster_group this_cluster();
450
+
451
+ // Disable constructor
452
+ _CG_QUALIFIER cluster_group()
453
+ {
454
+ }
455
+
456
+ public:
457
+ //_CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_cluster)
458
+
459
+ using arrival_token = struct {};
460
+
461
+ // Functionality exposed by the group
462
+ _CG_STATIC_QUALIFIER void sync()
463
+ {
464
+ return details::cluster::sync();
465
+ }
466
+
467
+ _CG_STATIC_QUALIFIER arrival_token barrier_arrive()
468
+ {
469
+ details::cluster::barrier_arrive();
470
+ return arrival_token();
471
+ }
472
+
473
+ _CG_STATIC_QUALIFIER void barrier_wait()
474
+ {
475
+ return details::cluster::barrier_wait();
476
+ }
477
+
478
+ _CG_STATIC_QUALIFIER void barrier_wait(arrival_token&&)
479
+ {
480
+ return details::cluster::barrier_wait();
481
+ }
482
+
483
+ _CG_STATIC_QUALIFIER unsigned int query_shared_rank(const void *addr)
484
+ {
485
+ return details::cluster::query_shared_rank(addr);
486
+ }
487
+
488
+ template <typename T>
489
+ _CG_STATIC_QUALIFIER T* map_shared_rank(T *addr, int rank)
490
+ {
491
+ return details::cluster::map_shared_rank(addr, rank);
492
+ }
493
+
494
+ _CG_STATIC_QUALIFIER dim3 block_index()
495
+ {
496
+ return details::cluster::block_index();
497
+ }
498
+
499
+ _CG_STATIC_QUALIFIER unsigned int block_rank()
500
+ {
501
+ return details::cluster::block_rank();
502
+ }
503
+
504
+ _CG_STATIC_QUALIFIER dim3 thread_index()
505
+ {
506
+ return details::cluster::thread_index();
507
+ }
508
+
509
+ _CG_STATIC_QUALIFIER unsigned int thread_rank()
510
+ {
511
+ return details::cluster::thread_rank();
512
+ }
513
+
514
+ _CG_STATIC_QUALIFIER dim3 dim_blocks()
515
+ {
516
+ return details::cluster::dim_blocks();
517
+ }
518
+
519
+ _CG_STATIC_QUALIFIER unsigned int num_blocks()
520
+ {
521
+ return details::cluster::num_blocks();
522
+ }
523
+
524
+ _CG_STATIC_QUALIFIER dim3 dim_threads()
525
+ {
526
+ return details::cluster::dim_threads();
527
+ }
528
+
529
+ _CG_STATIC_QUALIFIER unsigned int num_threads()
530
+ {
531
+ return details::cluster::num_threads();
532
+ }
533
+
534
+ // Legacy aliases
535
+ _CG_STATIC_QUALIFIER unsigned int size()
536
+ {
537
+ return num_threads();
538
+ }
539
+ };
540
+
541
+ /*
542
+ * cluster_group this_cluster()
543
+ *
544
+ * Constructs a cluster_group
545
+ */
546
+ _CG_QUALIFIER cluster_group this_cluster()
547
+ {
548
+ cluster_group cg;
549
+ #ifdef _CG_DEBUG
550
+ cg.sync();
551
+ #endif
552
+ return cg;
553
+ }
554
+ #endif
555
+
556
+ #if defined(_CG_CPP11_FEATURES)
557
+ class thread_block;
558
+ template <unsigned int MaxBlockSize>
559
+ _CG_QUALIFIER thread_block this_thread_block(block_tile_memory<MaxBlockSize>& scratch);
560
+ #endif
561
+
562
+ /**
563
+ * class thread_block
564
+ *
565
+ * Every GPU kernel is executed by a grid of thread blocks, and threads within
566
+ * each block are guaranteed to reside on the same streaming multiprocessor.
567
+ * A thread_block represents a thread block whose dimensions are not known until runtime.
568
+ *
569
+ * Constructed via this_thread_block();
570
+ */
571
+ class thread_block : public thread_group_base<details::thread_block_id>
572
+ {
573
+ // Friends
574
+ friend _CG_QUALIFIER thread_block this_thread_block();
575
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
576
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz);
577
+
578
+ #if defined(_CG_CPP11_FEATURES)
579
+ template <unsigned int MaxBlockSize>
580
+ friend _CG_QUALIFIER thread_block this_thread_block(block_tile_memory<MaxBlockSize>& scratch);
581
+ template <unsigned int Size>
582
+ friend class __static_size_multi_warp_tile_base;
583
+
584
+ details::multi_warp_scratch* const tile_memory;
585
+
586
+ template <unsigned int MaxBlockSize>
587
+ _CG_QUALIFIER thread_block(block_tile_memory<MaxBlockSize>& scratch) :
588
+ tile_memory(details::get_scratch_ptr(&scratch)) {
589
+ #ifdef _CG_DEBUG
590
+ if (num_threads() > MaxBlockSize) {
591
+ details::abort();
592
+ }
593
+ #endif
594
+
595
+
596
+ #if defined(_CG_USER_PROVIDED_SHARED_MEMORY)
597
+ #define _CG_SKIP_BARRIER_INIT_TARGET NV_NO_TARGET
598
+ #else
599
+ #define _CG_SKIP_BARRIER_INIT_TARGET NV_PROVIDES_SM_80
600
+ #endif
601
+ NV_IF_ELSE_TARGET(
602
+ _CG_SKIP_BARRIER_INIT_TARGET,
603
+ // skip if clause
604
+ ,
605
+ (tile_memory->init_barriers(thread_rank());
606
+ sync();)
607
+ )
608
+ }
609
+ #endif
610
+ #undef _CG_SKIP_BARRIER_INIT_TARGET
611
+
612
+ // Disable constructor
613
+ _CG_QUALIFIER thread_block()
614
+ #if defined(_CG_CPP11_FEATURES)
615
+ : tile_memory(details::get_scratch_ptr(NULL))
616
+ #endif
617
+ { }
618
+
619
+ // Internal Use
620
+ _CG_QUALIFIER thread_group _get_tiled_threads(unsigned int tilesz) const {
621
+ const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0);
622
+
623
+ // Invalid, immediately fail
624
+ if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) {
625
+ details::abort();
626
+ return (thread_block());
627
+ }
628
+
629
+ unsigned int mask;
630
+ unsigned int base_offset = thread_rank() & (~(tilesz - 1));
631
+ unsigned int masklength = min((unsigned int)size() - base_offset, tilesz);
632
+
633
+ mask = (unsigned int)(-1) >> (32 - masklength);
634
+ mask <<= (details::laneid() & ~(tilesz - 1));
635
+ thread_group tile = thread_group(details::coalesced_group_id);
636
+ tile._data.coalesced.mask = mask;
637
+ tile._data.coalesced.size = __popc(mask);
638
+ tile._data.coalesced.metaGroupSize = (details::cta::size() + tilesz - 1) / tilesz;
639
+ tile._data.coalesced.metaGroupRank = details::cta::thread_rank() / tilesz;
640
+ tile._data.coalesced.is_tiled = true;
641
+ return (tile);
642
+ }
643
+
644
+ public:
645
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::thread_block_id;
646
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block)
647
+
648
+ _CG_STATIC_QUALIFIER void sync() {
649
+ details::cta::sync();
650
+ }
651
+
652
+ #if defined(_CG_CPP11_FEATURES)
653
+ struct arrival_token {};
654
+
655
+ _CG_QUALIFIER arrival_token barrier_arrive() const {
656
+ return arrival_token();
657
+ }
658
+
659
+ _CG_QUALIFIER void barrier_wait(arrival_token&&) const {
660
+ details::cta::sync();
661
+ }
662
+ #endif
663
+
664
+ _CG_STATIC_QUALIFIER unsigned int size() {
665
+ return details::cta::size();
666
+ }
667
+
668
+ _CG_STATIC_QUALIFIER unsigned int thread_rank() {
669
+ return details::cta::thread_rank();
670
+ }
671
+
672
+ // Additional functionality exposed by the group
673
+ _CG_STATIC_QUALIFIER dim3 group_index() {
674
+ return details::cta::group_index();
675
+ }
676
+
677
+ _CG_STATIC_QUALIFIER dim3 thread_index() {
678
+ return details::cta::thread_index();
679
+ }
680
+
681
+ _CG_STATIC_QUALIFIER dim3 group_dim() {
682
+ return details::cta::block_dim();
683
+ }
684
+
685
+ _CG_STATIC_QUALIFIER dim3 dim_threads() {
686
+ return details::cta::dim_threads();
687
+ }
688
+
689
+ _CG_STATIC_QUALIFIER unsigned int num_threads() {
690
+ return details::cta::num_threads();
691
+ }
692
+
693
+ };
694
+
695
+ /**
696
+ * thread_block this_thread_block()
697
+ *
698
+ * Constructs a thread_block group
699
+ */
700
+ _CG_QUALIFIER thread_block this_thread_block()
701
+ {
702
+ return (thread_block());
703
+ }
704
+
705
+ #if defined(_CG_CPP11_FEATURES)
706
+ template <unsigned int MaxBlockSize>
707
+ _CG_QUALIFIER thread_block this_thread_block(block_tile_memory<MaxBlockSize>& scratch) {
708
+ return (thread_block(scratch));
709
+ }
710
+ #endif
711
+
712
+ /**
713
+ * class coalesced_group
714
+ *
715
+ * A group representing the current set of converged threads in a warp.
716
+ * The size of the group is not guaranteed and it may return a group of
717
+ * only one thread (itself).
718
+ *
719
+ * This group exposes warp-synchronous builtins.
720
+ * Constructed via coalesced_threads();
721
+ */
722
+ class coalesced_group : public thread_group_base<details::coalesced_group_id>
723
+ {
724
+ private:
725
+ friend _CG_QUALIFIER coalesced_group coalesced_threads();
726
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
727
+ friend _CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz);
728
+ friend class details::_coalesced_group_data_access;
729
+
730
+ _CG_QUALIFIER unsigned int _packLanes(unsigned laneMask) const {
731
+ unsigned int member_pack = 0;
732
+ unsigned int member_rank = 0;
733
+ for (int bit_idx = 0; bit_idx < 32; bit_idx++) {
734
+ unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx);
735
+ if (lane_bit) {
736
+ if (laneMask & lane_bit)
737
+ member_pack |= 1 << member_rank;
738
+ member_rank++;
739
+ }
740
+ }
741
+ return (member_pack);
742
+ }
743
+
744
+ // Internal Use
745
+ _CG_QUALIFIER coalesced_group _get_tiled_threads(unsigned int tilesz) const {
746
+ const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0);
747
+
748
+ // Invalid, immediately fail
749
+ if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) {
750
+ details::abort();
751
+ return (coalesced_group(0));
752
+ }
753
+ if (size() <= tilesz) {
754
+ return (*this);
755
+ }
756
+
757
+ if ((_data.coalesced.is_tiled == true) && pow2_tilesz) {
758
+ unsigned int base_offset = (thread_rank() & (~(tilesz - 1)));
759
+ unsigned int masklength = min((unsigned int)size() - base_offset, tilesz);
760
+ unsigned int mask = (unsigned int)(-1) >> (32 - masklength);
761
+
762
+ mask <<= (details::laneid() & ~(tilesz - 1));
763
+ coalesced_group coalesced_tile = coalesced_group(mask);
764
+ coalesced_tile._data.coalesced.metaGroupSize = size() / tilesz;
765
+ coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz;
766
+ coalesced_tile._data.coalesced.is_tiled = true;
767
+ return (coalesced_tile);
768
+ }
769
+ else if ((_data.coalesced.is_tiled == false) && pow2_tilesz) {
770
+ unsigned int mask = 0;
771
+ unsigned int member_rank = 0;
772
+ int seen_lanes = (thread_rank() / tilesz) * tilesz;
773
+ for (unsigned int bit_idx = 0; bit_idx < 32; bit_idx++) {
774
+ unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx);
775
+ if (lane_bit) {
776
+ if (seen_lanes <= 0 && member_rank < tilesz) {
777
+ mask |= lane_bit;
778
+ member_rank++;
779
+ }
780
+ seen_lanes--;
781
+ }
782
+ }
783
+ coalesced_group coalesced_tile = coalesced_group(mask);
784
+ // Override parent with the size of this group
785
+ coalesced_tile._data.coalesced.metaGroupSize = (size() + tilesz - 1) / tilesz;
786
+ coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz;
787
+ return coalesced_tile;
788
+ }
789
+ else {
790
+ // None in _CG_VERSION 1000
791
+ details::abort();
792
+ }
793
+
794
+ return (coalesced_group(0));
795
+ }
796
+
797
+ protected:
798
+ _CG_QUALIFIER coalesced_group(unsigned int mask) {
799
+ _data.coalesced.mask = mask;
800
+ _data.coalesced.size = __popc(mask);
801
+ _data.coalesced.metaGroupRank = 0;
802
+ _data.coalesced.metaGroupSize = 1;
803
+ _data.coalesced.is_tiled = false;
804
+ }
805
+
806
+ _CG_QUALIFIER unsigned int get_mask() const {
807
+ return (_data.coalesced.mask);
808
+ }
809
+
810
+ public:
811
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id;
812
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block)
813
+
814
+ _CG_QUALIFIER unsigned int num_threads() const {
815
+ return _data.coalesced.size;
816
+ }
817
+
818
+ _CG_QUALIFIER unsigned int size() const {
819
+ return num_threads();
820
+ }
821
+
822
+ _CG_QUALIFIER unsigned int thread_rank() const {
823
+ return (__popc(_data.coalesced.mask & details::lanemask32_lt()));
824
+ }
825
+
826
+ // Rank of this group in the upper level of the hierarchy
827
+ _CG_QUALIFIER unsigned int meta_group_rank() const {
828
+ return _data.coalesced.metaGroupRank;
829
+ }
830
+
831
+ // Total num partitions created out of all CTAs when the group was created
832
+ _CG_QUALIFIER unsigned int meta_group_size() const {
833
+ return _data.coalesced.metaGroupSize;
834
+ }
835
+
836
+ _CG_QUALIFIER void sync() const {
837
+ __syncwarp(_data.coalesced.mask);
838
+ }
839
+
840
+ #ifdef _CG_CPP11_FEATURES
841
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
842
+ _CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const {
843
+ unsigned int lane = (srcRank == 0) ? __ffs(_data.coalesced.mask) - 1 :
844
+ (size() == 32) ? srcRank : __fns(_data.coalesced.mask, 0, (srcRank + 1));
845
+
846
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
847
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), _data.coalesced.mask, lane, 32);
848
+ }
849
+
850
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
851
+ _CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const {
852
+ if (size() == 32) {
853
+ return details::tile::shuffle_dispatch<TyElem>::shfl_down(
854
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), 0xFFFFFFFF, delta, 32);
855
+ }
856
+
857
+ unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1);
858
+
859
+ if (lane >= 32)
860
+ lane = details::laneid();
861
+
862
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
863
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), _data.coalesced.mask, lane, 32);
864
+ }
865
+
866
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
867
+ _CG_QUALIFIER TyRet shfl_up(TyElem&& elem, int delta) const {
868
+ if (size() == 32) {
869
+ return details::tile::shuffle_dispatch<TyElem>::shfl_up(
870
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), 0xFFFFFFFF, delta, 32);
871
+ }
872
+
873
+ unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1));
874
+ if (lane >= 32)
875
+ lane = details::laneid();
876
+
877
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
878
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), _data.coalesced.mask, lane, 32);
879
+ }
880
+ #else
881
+ template <typename TyIntegral>
882
+ _CG_QUALIFIER TyIntegral shfl(TyIntegral var, unsigned int src_rank) const {
883
+ details::assert_if_not_arithmetic<TyIntegral>();
884
+ unsigned int lane = (src_rank == 0) ? __ffs(_data.coalesced.mask) - 1 :
885
+ (size() == 32) ? src_rank : __fns(_data.coalesced.mask, 0, (src_rank + 1));
886
+ return (__shfl_sync(_data.coalesced.mask, var, lane, 32));
887
+ }
888
+
889
+ template <typename TyIntegral>
890
+ _CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, int delta) const {
891
+ details::assert_if_not_arithmetic<TyIntegral>();
892
+ if (size() == 32) {
893
+ return (__shfl_up_sync(0xFFFFFFFF, var, delta, 32));
894
+ }
895
+ unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1));
896
+ if (lane >= 32) lane = details::laneid();
897
+ return (__shfl_sync(_data.coalesced.mask, var, lane, 32));
898
+ }
899
+
900
+ template <typename TyIntegral>
901
+ _CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, int delta) const {
902
+ details::assert_if_not_arithmetic<TyIntegral>();
903
+ if (size() == 32) {
904
+ return (__shfl_down_sync(0xFFFFFFFF, var, delta, 32));
905
+ }
906
+ unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1);
907
+ if (lane >= 32) lane = details::laneid();
908
+ return (__shfl_sync(_data.coalesced.mask, var, lane, 32));
909
+ }
910
+ #endif
911
+
912
+ _CG_QUALIFIER int any(int predicate) const {
913
+ return (__ballot_sync(_data.coalesced.mask, predicate) != 0);
914
+ }
915
+ _CG_QUALIFIER int all(int predicate) const {
916
+ return (__ballot_sync(_data.coalesced.mask, predicate) == _data.coalesced.mask);
917
+ }
918
+ _CG_QUALIFIER unsigned int ballot(int predicate) const {
919
+ if (size() == 32) {
920
+ return (__ballot_sync(0xFFFFFFFF, predicate));
921
+ }
922
+ unsigned int lane_ballot = __ballot_sync(_data.coalesced.mask, predicate);
923
+ return (_packLanes(lane_ballot));
924
+ }
925
+
926
+ #ifdef _CG_HAS_MATCH_COLLECTIVE
927
+
928
+ template <typename TyIntegral>
929
+ _CG_QUALIFIER unsigned int match_any(TyIntegral val) const {
930
+ details::assert_if_not_arithmetic<TyIntegral>();
931
+ if (size() == 32) {
932
+ return (__match_any_sync(0xFFFFFFFF, val));
933
+ }
934
+ unsigned int lane_match = __match_any_sync(_data.coalesced.mask, val);
935
+ return (_packLanes(lane_match));
936
+ }
937
+
938
+ template <typename TyIntegral>
939
+ _CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const {
940
+ details::assert_if_not_arithmetic<TyIntegral>();
941
+ if (size() == 32) {
942
+ return (__match_all_sync(0xFFFFFFFF, val, &pred));
943
+ }
944
+ unsigned int lane_match = __match_all_sync(_data.coalesced.mask, val, &pred);
945
+ return (_packLanes(lane_match));
946
+ }
947
+
948
+ #endif /* !_CG_HAS_MATCH_COLLECTIVE */
949
+
950
+ };
951
+
952
+ _CG_QUALIFIER coalesced_group coalesced_threads()
953
+ {
954
+ return (coalesced_group(__activemask()));
955
+ }
956
+
957
+ namespace details {
958
+ template <unsigned int Size> struct verify_thread_block_tile_size;
959
+ template <> struct verify_thread_block_tile_size<32> { typedef void OK; };
960
+ template <> struct verify_thread_block_tile_size<16> { typedef void OK; };
961
+ template <> struct verify_thread_block_tile_size<8> { typedef void OK; };
962
+ template <> struct verify_thread_block_tile_size<4> { typedef void OK; };
963
+ template <> struct verify_thread_block_tile_size<2> { typedef void OK; };
964
+ template <> struct verify_thread_block_tile_size<1> { typedef void OK; };
965
+
966
+ #ifdef _CG_CPP11_FEATURES
967
+ template <unsigned int Size>
968
+ using _is_power_of_2 = _CG_STL_NAMESPACE::integral_constant<bool, (Size & (Size - 1)) == 0>;
969
+
970
+ template <unsigned int Size>
971
+ using _is_single_warp = _CG_STL_NAMESPACE::integral_constant<bool, Size <= 32>;
972
+ template <unsigned int Size>
973
+ using _is_multi_warp =
974
+ _CG_STL_NAMESPACE::integral_constant<bool, (Size > 32) && (Size <= 1024)>;
975
+
976
+ template <unsigned int Size>
977
+ using _is_valid_single_warp_tile =
978
+ _CG_STL_NAMESPACE::integral_constant<bool, _is_power_of_2<Size>::value && _is_single_warp<Size>::value>;
979
+ template <unsigned int Size>
980
+ using _is_valid_multi_warp_tile =
981
+ _CG_STL_NAMESPACE::integral_constant<bool, _is_power_of_2<Size>::value && _is_multi_warp<Size>::value>;
982
+ #else
983
+ template <unsigned int Size>
984
+ struct _is_multi_warp {
985
+ static const bool value = false;
986
+ };
987
+ #endif
988
+ }
989
+
990
+ template <unsigned int Size>
991
+ class __static_size_tile_base
992
+ {
993
+ protected:
994
+ _CG_STATIC_CONST_DECL unsigned int numThreads = Size;
995
+
996
+ public:
997
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block)
998
+
999
+ // Rank of thread within tile
1000
+ _CG_STATIC_QUALIFIER unsigned int thread_rank() {
1001
+ return (details::cta::thread_rank() & (numThreads - 1));
1002
+ }
1003
+
1004
+ // Number of threads within tile
1005
+ _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int num_threads() {
1006
+ return numThreads;
1007
+ }
1008
+
1009
+ _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int size() {
1010
+ return num_threads();
1011
+ }
1012
+ };
1013
+
1014
+ template <unsigned int Size>
1015
+ class __static_size_thread_block_tile_base : public __static_size_tile_base<Size>
1016
+ {
1017
+ friend class details::_coalesced_group_data_access;
1018
+ typedef details::tile::tile_helpers<Size> th;
1019
+
1020
+ #ifdef _CG_CPP11_FEATURES
1021
+ static_assert(details::_is_valid_single_warp_tile<Size>::value, "Size must be one of 1/2/4/8/16/32");
1022
+ #else
1023
+ typedef typename details::verify_thread_block_tile_size<Size>::OK valid;
1024
+ #endif
1025
+ using __static_size_tile_base<Size>::numThreads;
1026
+ _CG_STATIC_CONST_DECL unsigned int fullMask = 0xFFFFFFFF;
1027
+
1028
+ protected:
1029
+ _CG_STATIC_QUALIFIER unsigned int build_mask() {
1030
+ unsigned int mask = fullMask;
1031
+ if (numThreads != 32) {
1032
+ // [0,31] representing the current active thread in the warp
1033
+ unsigned int laneId = details::laneid();
1034
+ // shift mask according to the partition it belongs to
1035
+ mask = th::tileMask << (laneId & ~(th::laneMask));
1036
+ }
1037
+ return (mask);
1038
+ }
1039
+
1040
+ public:
1041
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id;
1042
+
1043
+ _CG_STATIC_QUALIFIER void sync() {
1044
+ __syncwarp(build_mask());
1045
+ }
1046
+
1047
+ #ifdef _CG_CPP11_FEATURES
1048
+ // PTX supported collectives
1049
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
1050
+ _CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const {
1051
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
1052
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), srcRank, numThreads);
1053
+ }
1054
+
1055
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
1056
+ _CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const {
1057
+ return details::tile::shuffle_dispatch<TyElem>::shfl_down(
1058
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), delta, numThreads);
1059
+ }
1060
+
1061
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
1062
+ _CG_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int delta) const {
1063
+ return details::tile::shuffle_dispatch<TyElem>::shfl_up(
1064
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), delta, numThreads);
1065
+ }
1066
+
1067
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
1068
+ _CG_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int laneMask) const {
1069
+ return details::tile::shuffle_dispatch<TyElem>::shfl_xor(
1070
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), laneMask, numThreads);
1071
+ }
1072
+ #else
1073
+ template <typename TyIntegral>
1074
+ _CG_QUALIFIER TyIntegral shfl(TyIntegral var, int srcRank) const {
1075
+ details::assert_if_not_arithmetic<TyIntegral>();
1076
+ return (__shfl_sync(build_mask(), var, srcRank, numThreads));
1077
+ }
1078
+
1079
+ template <typename TyIntegral>
1080
+ _CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, unsigned int delta) const {
1081
+ details::assert_if_not_arithmetic<TyIntegral>();
1082
+ return (__shfl_down_sync(build_mask(), var, delta, numThreads));
1083
+ }
1084
+
1085
+ template <typename TyIntegral>
1086
+ _CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, unsigned int delta) const {
1087
+ details::assert_if_not_arithmetic<TyIntegral>();
1088
+ return (__shfl_up_sync(build_mask(), var, delta, numThreads));
1089
+ }
1090
+
1091
+ template <typename TyIntegral>
1092
+ _CG_QUALIFIER TyIntegral shfl_xor(TyIntegral var, unsigned int laneMask) const {
1093
+ details::assert_if_not_arithmetic<TyIntegral>();
1094
+ return (__shfl_xor_sync(build_mask(), var, laneMask, numThreads));
1095
+ }
1096
+ #endif //_CG_CPP11_FEATURES
1097
+
1098
+ _CG_QUALIFIER int any(int predicate) const {
1099
+ unsigned int lane_ballot = __ballot_sync(build_mask(), predicate);
1100
+ return (lane_ballot != 0);
1101
+ }
1102
+ _CG_QUALIFIER int all(int predicate) const {
1103
+ unsigned int lane_ballot = __ballot_sync(build_mask(), predicate);
1104
+ return (lane_ballot == build_mask());
1105
+ }
1106
+ _CG_QUALIFIER unsigned int ballot(int predicate) const {
1107
+ unsigned int lane_ballot = __ballot_sync(build_mask(), predicate);
1108
+ return (lane_ballot >> (details::laneid() & (~(th::laneMask))));
1109
+ }
1110
+
1111
+ #ifdef _CG_HAS_MATCH_COLLECTIVE
1112
+ template <typename TyIntegral>
1113
+ _CG_QUALIFIER unsigned int match_any(TyIntegral val) const {
1114
+ details::assert_if_not_arithmetic<TyIntegral>();
1115
+ unsigned int lane_match = __match_any_sync(build_mask(), val);
1116
+ return (lane_match >> (details::laneid() & (~(th::laneMask))));
1117
+ }
1118
+
1119
+ template <typename TyIntegral>
1120
+ _CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const {
1121
+ details::assert_if_not_arithmetic<TyIntegral>();
1122
+ unsigned int lane_match = __match_all_sync(build_mask(), val, &pred);
1123
+ return (lane_match >> (details::laneid() & (~(th::laneMask))));
1124
+ }
1125
+ #endif
1126
+
1127
+ };
1128
+
1129
+ template <unsigned int Size, typename ParentT>
1130
+ class __static_parent_thread_block_tile_base
1131
+ {
1132
+ public:
1133
+ // Rank of this group in the upper level of the hierarchy
1134
+ _CG_STATIC_QUALIFIER unsigned int meta_group_rank() {
1135
+ return ParentT::thread_rank() / Size;
1136
+ }
1137
+
1138
+ // Total num partitions created out of all CTAs when the group was created
1139
+ _CG_STATIC_QUALIFIER unsigned int meta_group_size() {
1140
+ return (ParentT::size() + Size - 1) / Size;
1141
+ }
1142
+ };
1143
+
1144
+ /**
1145
+ * class thread_block_tile<unsigned int Size, ParentT = void>
1146
+ *
1147
+ * Statically-sized group type, representing one tile of a thread block.
1148
+ * The only specializations currently supported are those with native
1149
+ * hardware support (1/2/4/8/16/32)
1150
+ *
1151
+ * This group exposes warp-synchronous builtins.
1152
+ * Can only be constructed via tiled_partition<Size>(ParentT&)
1153
+ */
1154
+
1155
+ template <unsigned int Size, typename ParentT = void>
1156
+ class __single_warp_thread_block_tile :
1157
+ public __static_size_thread_block_tile_base<Size>,
1158
+ public __static_parent_thread_block_tile_base<Size, ParentT>
1159
+ {
1160
+ typedef __static_parent_thread_block_tile_base<Size, ParentT> staticParentBaseT;
1161
+ friend class details::_coalesced_group_data_access;
1162
+
1163
+ protected:
1164
+ _CG_QUALIFIER __single_warp_thread_block_tile() { };
1165
+ _CG_QUALIFIER __single_warp_thread_block_tile(unsigned int, unsigned int) { };
1166
+
1167
+ _CG_STATIC_QUALIFIER unsigned int get_mask() {
1168
+ return __static_size_thread_block_tile_base<Size>::build_mask();
1169
+ }
1170
+ };
1171
+
1172
+ template <unsigned int Size>
1173
+ class __single_warp_thread_block_tile<Size, void> :
1174
+ public __static_size_thread_block_tile_base<Size>,
1175
+ public thread_group_base<details::coalesced_group_id>
1176
+ {
1177
+ _CG_STATIC_CONST_DECL unsigned int numThreads = Size;
1178
+
1179
+ template <unsigned int, typename ParentT> friend class __single_warp_thread_block_tile;
1180
+ friend class details::_coalesced_group_data_access;
1181
+
1182
+ typedef __static_size_thread_block_tile_base<numThreads> staticSizeBaseT;
1183
+
1184
+ protected:
1185
+ _CG_QUALIFIER __single_warp_thread_block_tile(unsigned int meta_group_rank = 0, unsigned int meta_group_size = 1) {
1186
+ _data.coalesced.mask = staticSizeBaseT::build_mask();
1187
+ _data.coalesced.size = numThreads;
1188
+ _data.coalesced.metaGroupRank = meta_group_rank;
1189
+ _data.coalesced.metaGroupSize = meta_group_size;
1190
+ _data.coalesced.is_tiled = true;
1191
+ }
1192
+
1193
+ _CG_QUALIFIER unsigned int get_mask() const {
1194
+ return (_data.coalesced.mask);
1195
+ }
1196
+
1197
+ public:
1198
+ using staticSizeBaseT::sync;
1199
+ using staticSizeBaseT::size;
1200
+ using staticSizeBaseT::num_threads;
1201
+ using staticSizeBaseT::thread_rank;
1202
+
1203
+ _CG_QUALIFIER unsigned int meta_group_rank() const {
1204
+ return _data.coalesced.metaGroupRank;
1205
+ }
1206
+
1207
+ _CG_QUALIFIER unsigned int meta_group_size() const {
1208
+ return _data.coalesced.metaGroupSize;
1209
+ }
1210
+ };
1211
+
1212
+ /**
1213
+ * Outer level API calls
1214
+ * void sync(GroupT) - see <group_type>.sync()
1215
+ * void thread_rank(GroupT) - see <group_type>.thread_rank()
1216
+ * void group_size(GroupT) - see <group_type>.size()
1217
+ */
1218
+ template <class GroupT>
1219
+ _CG_QUALIFIER void sync(GroupT const &g)
1220
+ {
1221
+ g.sync();
1222
+ }
1223
+
1224
+ // TODO: Use a static dispatch to determine appropriate return type
1225
+ // C++03 is stuck with unsigned long long for now
1226
+ #ifdef _CG_CPP11_FEATURES
1227
+ template <class GroupT>
1228
+ _CG_QUALIFIER auto thread_rank(GroupT const& g) -> decltype(g.thread_rank()) {
1229
+ return g.thread_rank();
1230
+ }
1231
+
1232
+
1233
+ template <class GroupT>
1234
+ _CG_QUALIFIER auto group_size(GroupT const &g) -> decltype(g.num_threads()) {
1235
+ return g.num_threads();
1236
+ }
1237
+ #else
1238
+ template <class GroupT>
1239
+ _CG_QUALIFIER unsigned long long thread_rank(GroupT const& g) {
1240
+ return static_cast<unsigned long long>(g.thread_rank());
1241
+ }
1242
+
1243
+
1244
+ template <class GroupT>
1245
+ _CG_QUALIFIER unsigned long long group_size(GroupT const &g) {
1246
+ return static_cast<unsigned long long>(g.num_threads());
1247
+ }
1248
+ #endif
1249
+
1250
+
1251
+ /**
1252
+ * tiled_partition
1253
+ *
1254
+ * The tiled_partition(parent, tilesz) method is a collective operation that
1255
+ * partitions the parent group into a one-dimensional, row-major, tiling of subgroups.
1256
+ *
1257
+ * A total of ((size(parent)+tilesz-1)/tilesz) subgroups will
1258
+ * be created where threads having identical k = (thread_rank(parent)/tilesz)
1259
+ * will be members of the same subgroup.
1260
+ *
1261
+ * The implementation may cause the calling thread to wait until all the members
1262
+ * of the parent group have invoked the operation before resuming execution.
1263
+ *
1264
+ * Functionality is limited to power-of-two sized subgorup instances of at most
1265
+ * 32 threads. Only thread_block, thread_block_tile<>, and their subgroups can be
1266
+ * tiled_partition() in _CG_VERSION 1000.
1267
+ */
1268
+ _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz)
1269
+ {
1270
+ if (parent.get_type() == details::coalesced_group_id) {
1271
+ const coalesced_group *_cg = static_cast<const coalesced_group*>(&parent);
1272
+ return _cg->_get_tiled_threads(tilesz);
1273
+ }
1274
+ else {
1275
+ const thread_block *_tb = static_cast<const thread_block*>(&parent);
1276
+ return _tb->_get_tiled_threads(tilesz);
1277
+ }
1278
+ }
1279
+
1280
+ // Thread block type overload: returns a basic thread_group for now (may be specialized later)
1281
+ _CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz)
1282
+ {
1283
+ return (parent._get_tiled_threads(tilesz));
1284
+ }
1285
+
1286
+ // Coalesced group type overload: retains its ability to stay coalesced
1287
+ _CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz)
1288
+ {
1289
+ return (parent._get_tiled_threads(tilesz));
1290
+ }
1291
+
1292
+ namespace details {
1293
+ template <unsigned int Size, typename ParentT>
1294
+ class internal_thread_block_tile : public __single_warp_thread_block_tile<Size, ParentT> {};
1295
+
1296
+ template <unsigned int Size, typename ParentT>
1297
+ _CG_QUALIFIER internal_thread_block_tile<Size, ParentT> tiled_partition_internal() {
1298
+ return internal_thread_block_tile<Size, ParentT>();
1299
+ }
1300
+
1301
+ template <typename TyVal, typename GroupT, typename WarpLambda, typename InterWarpLambda>
1302
+ _CG_QUALIFIER TyVal multi_warp_collectives_helper(
1303
+ const GroupT& group,
1304
+ WarpLambda warp_lambda,
1305
+ InterWarpLambda inter_warp_lambda) {
1306
+ return group.template collectives_scheme<TyVal>(warp_lambda, inter_warp_lambda);
1307
+ }
1308
+
1309
+ template <typename T, typename GroupT>
1310
+ _CG_QUALIFIER T* multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id) {
1311
+ return group.template get_scratch_location<T>(warp_id);
1312
+ }
1313
+
1314
+ template <typename GroupT>
1315
+ _CG_QUALIFIER details::barrier_t* multi_warp_sync_location_getter(const GroupT& group) {
1316
+ return group.get_sync_location();
1317
+ }
1318
+
1319
+ }
1320
+ /**
1321
+ * tiled_partition<tilesz>
1322
+ *
1323
+ * The tiled_partition<tilesz>(parent) method is a collective operation that
1324
+ * partitions the parent group into a one-dimensional, row-major, tiling of subgroups.
1325
+ *
1326
+ * A total of ((size(parent)/tilesz) subgroups will be created,
1327
+ * therefore the parent group size must be evenly divisible by the tilesz.
1328
+ * The allow parent groups are thread_block or thread_block_tile<size>.
1329
+ *
1330
+ * The implementation may cause the calling thread to wait until all the members
1331
+ * of the parent group have invoked the operation before resuming execution.
1332
+ *
1333
+ * Functionality is limited to native hardware sizes, 1/2/4/8/16/32.
1334
+ * The size(parent) must be greater than the template Size parameter
1335
+ * otherwise the results are undefined.
1336
+ */
1337
+
1338
+ #if defined(_CG_CPP11_FEATURES)
1339
+ template <unsigned int Size>
1340
+ class __static_size_multi_warp_tile_base : public __static_size_tile_base<Size>
1341
+ {
1342
+ static_assert(details::_is_valid_multi_warp_tile<Size>::value, "Size must be one of 64/128/256/512");
1343
+
1344
+ template <typename TyVal, typename GroupT, typename WarpLambda, typename InterWarpLambda>
1345
+ friend __device__ TyVal details::multi_warp_collectives_helper(
1346
+ const GroupT& group,
1347
+ WarpLambda warp_lambda,
1348
+ InterWarpLambda inter_warp_lambda);
1349
+ template <typename T, typename GroupT>
1350
+ friend __device__ T* details::multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id);
1351
+ template <typename GroupT>
1352
+ friend __device__ details::barrier_t* details::multi_warp_sync_location_getter(const GroupT& group);
1353
+ template <unsigned int OtherSize>
1354
+ friend class __static_size_multi_warp_tile_base;
1355
+ using WarpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base<Size>>;
1356
+ using ThisType = __static_size_multi_warp_tile_base<Size>;
1357
+ _CG_STATIC_CONST_DECL int numWarps = Size / 32;
1358
+
1359
+ protected:
1360
+ details::multi_warp_scratch* const tile_memory;
1361
+
1362
+ template <typename GroupT>
1363
+ _CG_QUALIFIER __static_size_multi_warp_tile_base(const GroupT& g) : tile_memory(g.tile_memory) {
1364
+ #if !defined(_CG_USER_PROVIDED_SHARED_MEMORY)
1365
+ NV_IF_TARGET(NV_PROVIDES_SM_80,
1366
+ details::sync_warps_reset(get_sync_location(), details::cta::thread_rank());
1367
+ g.sync();
1368
+ )
1369
+ #endif
1370
+ }
1371
+
1372
+
1373
+ private:
1374
+ _CG_QUALIFIER details::barrier_t* get_sync_location() const {
1375
+ // Different group sizes use different barriers, all groups of a given size share one barrier.
1376
+ unsigned int sync_id = details::log2(Size / 64);
1377
+ return &tile_memory->barriers[sync_id];
1378
+ }
1379
+
1380
+ template <typename T>
1381
+ _CG_QUALIFIER T* get_scratch_location(unsigned int warp_id) const {
1382
+ unsigned int scratch_id = (details::cta::thread_rank() - thread_rank()) / 32 + warp_id;
1383
+ return reinterpret_cast<T*>(&tile_memory->communication_memory[scratch_id]);
1384
+ }
1385
+
1386
+ template <typename T>
1387
+ _CG_QUALIFIER T* get_scratch_location() const {
1388
+ unsigned int scratch_id = details::cta::thread_rank() / 32;
1389
+ return reinterpret_cast<T*>(&tile_memory->communication_memory[scratch_id]);
1390
+ }
1391
+
1392
+ template <typename TyVal>
1393
+ _CG_QUALIFIER TyVal shfl_impl(TyVal val, unsigned int src) const {
1394
+ unsigned int src_warp = src / 32;
1395
+ auto warp = details::tiled_partition_internal<32, ThisType>();
1396
+ details::barrier_t* sync_location = get_sync_location();
1397
+
1398
+ // Get warp slot of the source threads warp.
1399
+ TyVal* warp_scratch_location = get_scratch_location<TyVal>(src_warp);
1400
+
1401
+ if (warp.meta_group_rank() == src_warp) {
1402
+ warp.sync();
1403
+ // Put shuffled value into my warp slot and let my warp arrive at the barrier.
1404
+ if (thread_rank() == src) {
1405
+ *warp_scratch_location = val;
1406
+ }
1407
+ details::sync_warps_arrive(sync_location, details::cta::thread_rank(), numWarps);
1408
+ TyVal result = *warp_scratch_location;
1409
+ details::sync_warps_wait(sync_location, details::cta::thread_rank());
1410
+ return result;
1411
+ }
1412
+ else {
1413
+ // Wait for the source warp to arrive on the barrier.
1414
+ details::sync_warps_wait_for_specific_warp(sync_location,
1415
+ (details::cta::thread_rank() / 32 - warp.meta_group_rank() + src_warp));
1416
+ TyVal result = *warp_scratch_location;
1417
+ details::sync_warps(sync_location, details::cta::thread_rank(), numWarps);
1418
+ return result;
1419
+ }
1420
+ }
1421
+
1422
+ template <typename TyVal, typename WarpLambda, typename InterWarpLambda>
1423
+ _CG_QUALIFIER TyVal collectives_scheme(const WarpLambda& warp_lambda, const InterWarpLambda& inter_warp_lambda) const {
1424
+ static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size,
1425
+ "Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes");
1426
+ auto warp = details::tiled_partition_internal<32, ThisType>();
1427
+ details::barrier_t* sync_location = get_sync_location();
1428
+ TyVal* warp_scratch_location = get_scratch_location<TyVal>();
1429
+
1430
+ warp_lambda(warp, warp_scratch_location);
1431
+
1432
+ if (details::sync_warps_last_releases(sync_location, details::cta::thread_rank(), numWarps)) {
1433
+ auto subwarp = details::tiled_partition_internal<numWarps, decltype(warp)>();
1434
+ if (subwarp.meta_group_rank() == 0) {
1435
+ TyVal* thread_scratch_location = get_scratch_location<TyVal>(subwarp.thread_rank());
1436
+ inter_warp_lambda(subwarp, thread_scratch_location);
1437
+ }
1438
+ warp.sync();
1439
+ details::sync_warps_release(sync_location, warp.thread_rank() == 0, details::cta::thread_rank(), numWarps);
1440
+ }
1441
+ TyVal result = *warp_scratch_location;
1442
+ return result;
1443
+ }
1444
+
1445
+ public:
1446
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::multi_tile_group_id;
1447
+
1448
+ using __static_size_tile_base<Size>::thread_rank;
1449
+
1450
+ template <typename TyVal>
1451
+ _CG_QUALIFIER TyVal shfl(TyVal val, unsigned int src) const {
1452
+ static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size,
1453
+ "Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes");
1454
+ return shfl_impl(val, src);
1455
+ }
1456
+
1457
+ _CG_QUALIFIER void sync() const {
1458
+ details::sync_warps(get_sync_location(), details::cta::thread_rank(), numWarps);
1459
+ }
1460
+
1461
+ _CG_QUALIFIER int any(int predicate) const {
1462
+ auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) {
1463
+ *warp_scratch_location = __any_sync(0xFFFFFFFF, predicate);
1464
+ };
1465
+ auto inter_warp_lambda =
1466
+ [] (details::internal_thread_block_tile<numWarps, WarpType>& subwarp, int* thread_scratch_location) {
1467
+ *thread_scratch_location = __any_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location);
1468
+ };
1469
+ return collectives_scheme<int>(warp_lambda, inter_warp_lambda);
1470
+ }
1471
+
1472
+ _CG_QUALIFIER int all(int predicate) const {
1473
+ auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) {
1474
+ *warp_scratch_location = __all_sync(0xFFFFFFFF, predicate);
1475
+ };
1476
+ auto inter_warp_lambda =
1477
+ [] (details::internal_thread_block_tile<numWarps, WarpType>& subwarp, int* thread_scratch_location) {
1478
+ *thread_scratch_location = __all_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location);
1479
+ };
1480
+ return collectives_scheme<int>(warp_lambda, inter_warp_lambda);
1481
+ }
1482
+ };
1483
+
1484
+
1485
+ template <unsigned int Size, typename ParentT = void>
1486
+ class __multi_warp_thread_block_tile :
1487
+ public __static_size_multi_warp_tile_base<Size>,
1488
+ public __static_parent_thread_block_tile_base<Size, ParentT>
1489
+ {
1490
+ typedef __static_parent_thread_block_tile_base<Size, ParentT> staticParentBaseT;
1491
+ typedef __static_size_multi_warp_tile_base<Size> staticTileBaseT;
1492
+ protected:
1493
+ _CG_QUALIFIER __multi_warp_thread_block_tile(const ParentT& g) :
1494
+ __static_size_multi_warp_tile_base<Size>(g) {}
1495
+ };
1496
+
1497
+ template <unsigned int Size>
1498
+ class __multi_warp_thread_block_tile<Size, void> : public __static_size_multi_warp_tile_base<Size>
1499
+ {
1500
+ const unsigned int metaGroupRank;
1501
+ const unsigned int metaGroupSize;
1502
+
1503
+ protected:
1504
+ template <unsigned int OtherSize, typename ParentT>
1505
+ _CG_QUALIFIER __multi_warp_thread_block_tile(const __multi_warp_thread_block_tile<OtherSize, ParentT>& g) :
1506
+ __static_size_multi_warp_tile_base<Size>(g), metaGroupRank(g.meta_group_rank()), metaGroupSize(g.meta_group_size()) {}
1507
+
1508
+ public:
1509
+ _CG_QUALIFIER unsigned int meta_group_rank() const {
1510
+ return metaGroupRank;
1511
+ }
1512
+
1513
+ _CG_QUALIFIER unsigned int meta_group_size() const {
1514
+ return metaGroupSize;
1515
+ }
1516
+ };
1517
+ #endif
1518
+
1519
+ template <unsigned int Size, typename ParentT = void>
1520
+ class thread_block_tile;
1521
+
1522
+ namespace details {
1523
+ template <unsigned int Size, typename ParentT, bool IsMultiWarp>
1524
+ class thread_block_tile_impl;
1525
+
1526
+ template <unsigned int Size, typename ParentT>
1527
+ class thread_block_tile_impl<Size, ParentT, false>: public __single_warp_thread_block_tile<Size, ParentT>
1528
+ {
1529
+ protected:
1530
+ template <unsigned int OtherSize, typename OtherParentT, bool OtherIsMultiWarp>
1531
+ _CG_QUALIFIER thread_block_tile_impl(const thread_block_tile_impl<OtherSize, OtherParentT, OtherIsMultiWarp>& g) :
1532
+ __single_warp_thread_block_tile<Size, ParentT>(g.meta_group_rank(), g.meta_group_size()) {}
1533
+
1534
+ _CG_QUALIFIER thread_block_tile_impl(const thread_block& g) :
1535
+ __single_warp_thread_block_tile<Size, ParentT>() {}
1536
+ };
1537
+
1538
+ #if defined(_CG_CPP11_FEATURES)
1539
+ template <unsigned int Size, typename ParentT>
1540
+ class thread_block_tile_impl<Size, ParentT, true> : public __multi_warp_thread_block_tile<Size, ParentT>
1541
+ {
1542
+ protected:
1543
+ template <typename GroupT>
1544
+ _CG_QUALIFIER thread_block_tile_impl(const GroupT& g) :
1545
+ __multi_warp_thread_block_tile<Size, ParentT>(g) {}
1546
+ };
1547
+ #else
1548
+ template <unsigned int Size, typename ParentT>
1549
+ class thread_block_tile_impl<Size, ParentT, true>
1550
+ {
1551
+ protected:
1552
+ template <typename GroupT>
1553
+ _CG_QUALIFIER thread_block_tile_impl(const GroupT& g) {}
1554
+ };
1555
+ #endif
1556
+ }
1557
+
1558
+ template <unsigned int Size, typename ParentT>
1559
+ class thread_block_tile : public details::thread_block_tile_impl<Size, ParentT, details::_is_multi_warp<Size>::value>
1560
+ {
1561
+ friend _CG_QUALIFIER thread_block_tile<1, void> this_thread();
1562
+
1563
+ protected:
1564
+ _CG_QUALIFIER thread_block_tile(const ParentT& g) :
1565
+ details::thread_block_tile_impl<Size, ParentT, details::_is_multi_warp<Size>::value>(g) {}
1566
+
1567
+ public:
1568
+ _CG_QUALIFIER operator thread_block_tile<Size, void>() const {
1569
+ return thread_block_tile<Size, void>(*this);
1570
+ }
1571
+ };
1572
+
1573
+ template <unsigned int Size>
1574
+ class thread_block_tile<Size, void> : public details::thread_block_tile_impl<Size, void, details::_is_multi_warp<Size>::value>
1575
+ {
1576
+ template <unsigned int, typename ParentT>
1577
+ friend class thread_block_tile;
1578
+
1579
+ protected:
1580
+ template <unsigned int OtherSize, typename OtherParentT>
1581
+ _CG_QUALIFIER thread_block_tile(const thread_block_tile<OtherSize, OtherParentT>& g) :
1582
+ details::thread_block_tile_impl<Size, void, details::_is_multi_warp<Size>::value>(g) {}
1583
+
1584
+ public:
1585
+ template <typename ParentT>
1586
+ _CG_QUALIFIER thread_block_tile(const thread_block_tile<Size, ParentT>& g) :
1587
+ details::thread_block_tile_impl<Size, void, details::_is_multi_warp<Size>::value>(g) {}
1588
+ };
1589
+
1590
+ namespace details {
1591
+ template <unsigned int Size, typename ParentT>
1592
+ struct tiled_partition_impl;
1593
+
1594
+ template <unsigned int Size>
1595
+ struct tiled_partition_impl<Size, thread_block> : public thread_block_tile<Size, thread_block> {
1596
+ _CG_QUALIFIER tiled_partition_impl(const thread_block& g) :
1597
+ thread_block_tile<Size, thread_block>(g) {}
1598
+ };
1599
+
1600
+ // ParentT = static thread_block_tile<ParentSize, GrandParent> specialization
1601
+ template <unsigned int Size, unsigned int ParentSize, typename GrandParent>
1602
+ struct tiled_partition_impl<Size, thread_block_tile<ParentSize, GrandParent> > :
1603
+ public thread_block_tile<Size, thread_block_tile<ParentSize, GrandParent> > {
1604
+ #ifdef _CG_CPP11_FEATURES
1605
+ static_assert(Size < ParentSize, "Tile size bigger or equal to the parent group size");
1606
+ #endif
1607
+ _CG_QUALIFIER tiled_partition_impl(const thread_block_tile<ParentSize, GrandParent>& g) :
1608
+ thread_block_tile<Size, thread_block_tile<ParentSize, GrandParent> >(g) {}
1609
+ };
1610
+
1611
+ }
1612
+
1613
+ template <unsigned int Size, typename ParentT>
1614
+ _CG_QUALIFIER thread_block_tile<Size, ParentT> tiled_partition(const ParentT& g)
1615
+ {
1616
+ return details::tiled_partition_impl<Size, ParentT>(g);
1617
+ }
1618
+
1619
+ /**
1620
+ * thread_group this_thread()
1621
+ *
1622
+ * Constructs a generic thread_group containing only the calling thread
1623
+ */
1624
+ _CG_QUALIFIER thread_block_tile<1, void> this_thread()
1625
+ {
1626
+ // Make thread_block_tile<1, thread_block> parent of the returned group, so it will have its
1627
+ // meta group rank and size set to 0 and 1 respectively.
1628
+ return thread_block_tile<1, thread_block_tile<1, thread_block> >(this_thread_block());
1629
+ }
1630
+
1631
+ /**
1632
+ * <group_type>.sync()
1633
+ *
1634
+ * Executes a barrier across the group
1635
+ *
1636
+ * Implements both a compiler fence and an architectural fence to prevent,
1637
+ * memory reordering around the barrier.
1638
+ */
1639
+ _CG_QUALIFIER void thread_group::sync() const
1640
+ {
1641
+ switch (_data.group.type) {
1642
+ case details::coalesced_group_id:
1643
+ cooperative_groups::sync(*static_cast<const coalesced_group*>(this));
1644
+ break;
1645
+ case details::thread_block_id:
1646
+ cooperative_groups::sync(*static_cast<const thread_block*>(this));
1647
+ break;
1648
+ case details::grid_group_id:
1649
+ cooperative_groups::sync(*static_cast<const grid_group*>(this));
1650
+ break;
1651
+ #if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
1652
+ case details::multi_grid_group_id:
1653
+ cooperative_groups::sync(*static_cast<const multi_grid_group*>(this));
1654
+ break;
1655
+ #endif
1656
+ #if defined(_CG_HAS_CLUSTER_GROUP)
1657
+ case details::cluster_group_id:
1658
+ cooperative_groups::sync(*static_cast<const cluster_group*>(this));
1659
+ break;
1660
+ #endif
1661
+ default:
1662
+ break;
1663
+ }
1664
+ }
1665
+
1666
+ /**
1667
+ * <group_type>.size()
1668
+ *
1669
+ * Returns the total number of threads in the group.
1670
+ */
1671
+ _CG_QUALIFIER unsigned long long thread_group::size() const
1672
+ {
1673
+ unsigned long long size = 0;
1674
+ switch (_data.group.type) {
1675
+ case details::coalesced_group_id:
1676
+ size = cooperative_groups::group_size(*static_cast<const coalesced_group*>(this));
1677
+ break;
1678
+ case details::thread_block_id:
1679
+ size = cooperative_groups::group_size(*static_cast<const thread_block*>(this));
1680
+ break;
1681
+ case details::grid_group_id:
1682
+ size = cooperative_groups::group_size(*static_cast<const grid_group*>(this));
1683
+ break;
1684
+ #if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
1685
+ case details::multi_grid_group_id:
1686
+ size = cooperative_groups::group_size(*static_cast<const multi_grid_group*>(this));
1687
+ break;
1688
+ #endif
1689
+ #if defined(_CG_HAS_CLUSTER_GROUP)
1690
+ case details::cluster_group_id:
1691
+ size = cooperative_groups::group_size(*static_cast<const cluster_group*>(this));
1692
+ break;
1693
+ #endif
1694
+ default:
1695
+ break;
1696
+ }
1697
+ return size;
1698
+ }
1699
+
1700
+ /**
1701
+ * <group_type>.thread_rank()
1702
+ *
1703
+ * Returns the linearized rank of the calling thread along the interval [0, size()).
1704
+ */
1705
+ _CG_QUALIFIER unsigned long long thread_group::thread_rank() const
1706
+ {
1707
+ unsigned long long rank = 0;
1708
+ switch (_data.group.type) {
1709
+ case details::coalesced_group_id:
1710
+ rank = cooperative_groups::thread_rank(*static_cast<const coalesced_group*>(this));
1711
+ break;
1712
+ case details::thread_block_id:
1713
+ rank = cooperative_groups::thread_rank(*static_cast<const thread_block*>(this));
1714
+ break;
1715
+ case details::grid_group_id:
1716
+ rank = cooperative_groups::thread_rank(*static_cast<const grid_group*>(this));
1717
+ break;
1718
+ #if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
1719
+ case details::multi_grid_group_id:
1720
+ rank = cooperative_groups::thread_rank(*static_cast<const multi_grid_group*>(this));
1721
+ break;
1722
+ #endif
1723
+ #if defined(_CG_HAS_CLUSTER_GROUP)
1724
+ case details::cluster_group_id:
1725
+ rank = cooperative_groups::thread_rank(*static_cast<const cluster_group*>(this));
1726
+ break;
1727
+ #endif
1728
+ default:
1729
+ break;
1730
+ }
1731
+ return rank;
1732
+ }
1733
+
1734
+ _CG_END_NAMESPACE
1735
+
1736
+ #include <cooperative_groups/details/partitioning.h>
1737
+ #if (!defined(_MSC_VER) || defined(_WIN64))
1738
+ # include <cooperative_groups/details/invoke.h>
1739
+ #endif
1740
+
1741
+ # endif /* ! (__cplusplus, __CUDACC__) */
1742
+
1743
+ #endif /* !_COOPERATIVE_GROUPS_H_ */
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/async.h ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_ASYNC_H
50
+ #define _CG_ASYNC_H
51
+
52
+ #include "helpers.h"
53
+ #include "info.h"
54
+
55
+ #include <cuda_pipeline.h>
56
+
57
+ _CG_BEGIN_NAMESPACE
58
+
59
+ namespace details {
60
+ // Groups supported by memcpy_async
61
+ template <class TyGroup>
62
+ struct _async_copy_group_supported : public _CG_STL_NAMESPACE::false_type {};
63
+
64
+ template <unsigned int Sz, typename TyPar>
65
+ struct _async_copy_group_supported<cooperative_groups::thread_block_tile<Sz, TyPar>>
66
+ : public _CG_STL_NAMESPACE::true_type {};
67
+ template <>
68
+ struct _async_copy_group_supported<cooperative_groups::coalesced_group> : public _CG_STL_NAMESPACE::true_type {};
69
+ template <>
70
+ struct _async_copy_group_supported<cooperative_groups::thread_block> : public _CG_STL_NAMESPACE::true_type {};
71
+
72
+ template <class TyGroup>
73
+ using async_copy_group_supported = _async_copy_group_supported<details::remove_qual<TyGroup>>;
74
+
75
+ // Groups that require optimization
76
+ template <class TyGroup>
77
+ struct _async_copy_optimize_tile : public _CG_STL_NAMESPACE::false_type {};
78
+
79
+ template <typename TyPar>
80
+ struct _async_copy_optimize_tile<cooperative_groups::thread_block_tile<1, TyPar>>
81
+ : public _CG_STL_NAMESPACE::false_type {};
82
+
83
+ template <unsigned int Sz, typename TyPar>
84
+ struct _async_copy_optimize_tile<cooperative_groups::thread_block_tile<Sz, TyPar>>
85
+ : public _CG_STL_NAMESPACE::true_type {};
86
+
87
+ template <class TyGroup>
88
+ using async_copy_optimize_tile = _async_copy_optimize_tile<details::remove_qual<TyGroup>>;
89
+
90
+ // SFINAE helpers for tile optimizations
91
+ template <class TyGroup>
92
+ using enable_tile_optimization =
93
+ typename _CG_STL_NAMESPACE::enable_if<async_copy_optimize_tile<TyGroup>::value, void *>::type;
94
+
95
+ template <class TyGroup>
96
+ using disable_tile_optimization =
97
+ typename _CG_STL_NAMESPACE::enable_if<!async_copy_optimize_tile<TyGroup>::value, void *>::type;
98
+
99
+ // Segment for punning to aligned types
100
+ template <unsigned int N>
101
+ struct _Segment {
102
+ int _seg[N];
103
+ };
104
+
105
+ // Trivial layout guaranteed-aligned copy-async compatible segments
106
+ template <unsigned int N>
107
+ struct Segment;
108
+ template <>
109
+ struct __align__(4) Segment<1> : public _Segment<1>{};
110
+ template <>
111
+ struct __align__(8) Segment<2> : public _Segment<2>{};
112
+ template <>
113
+ struct __align__(16) Segment<4> : public _Segment<4>{};
114
+
115
+ // Interleaved element by element copies from source to dest
116
+ template <typename TyGroup, typename TyElem>
117
+ _CG_STATIC_QUALIFIER void inline_copy(TyGroup &group, TyElem *__restrict__ dst, const TyElem *__restrict__ src,
118
+ size_t count) {
119
+ const unsigned int rank = group.thread_rank();
120
+ const unsigned int stride = group.size();
121
+
122
+ for (size_t idx = rank; idx < count; idx += stride) {
123
+ dst[idx] = src[idx];
124
+ }
125
+ }
126
+
127
+ template <typename TyGroup, typename TyElem, enable_tile_optimization<TyGroup> = nullptr>
128
+ _CG_STATIC_QUALIFIER void accelerated_async_copy(TyGroup &group, TyElem *__restrict__ dst,
129
+ const TyElem *__restrict__ src, size_t count) {
130
+ static_assert(async_copy_group_supported<TyGroup>::value,
131
+ "Async copy is only supported for groups that represent private shared memory");
132
+
133
+ if (count == 0) {
134
+ return;
135
+ }
136
+
137
+ const bool dstIsNotShared = !__isShared(dst);
138
+ const bool srcIsNotGlobal = !__isGlobal(src);
139
+
140
+ if (dstIsNotShared || srcIsNotGlobal) {
141
+ inline_copy(group, dst, src, count);
142
+ return;
143
+ }
144
+
145
+ const unsigned int stride = group.size();
146
+ const unsigned int rank = group.thread_rank();
147
+ // Efficient copies require warps to operate on the same amount of work at each step.
148
+ // remainders are handled in a separate stage to prevent branching
149
+ const unsigned int subWarpMask = (stride - 1);
150
+ const unsigned int subwarpCopies = (subWarpMask & (unsigned int)count);
151
+ const unsigned int maxSubwarpRank = min(rank, subwarpCopies - 1);
152
+
153
+ const size_t warpCopies = (count & (~subWarpMask));
154
+
155
+ for (size_t idx = 0; idx < warpCopies; idx += stride) {
156
+ size_t _srcIdx = rank + idx;
157
+ size_t _dstIdx = rank + idx;
158
+ __pipeline_memcpy_async(dst + _dstIdx, src + _srcIdx, sizeof(TyElem));
159
+ }
160
+
161
+ if (subwarpCopies) {
162
+ size_t _srcIdx = warpCopies + maxSubwarpRank;
163
+ size_t _dstIdx = warpCopies + maxSubwarpRank;
164
+ __pipeline_memcpy_async(dst + _dstIdx, src + _srcIdx, sizeof(TyElem));
165
+ }
166
+ }
167
+
168
+ template <typename TyGroup, typename TyElem, disable_tile_optimization<TyGroup> = nullptr>
169
+ _CG_STATIC_QUALIFIER void accelerated_async_copy(TyGroup &group, TyElem *__restrict__ dst,
170
+ const TyElem *__restrict__ src, size_t count) {
171
+ static_assert(async_copy_group_supported<TyGroup>::value,
172
+ "Async copy is only supported for groups that represent private shared memory");
173
+
174
+ const bool dstIsNotShared = !__isShared(dst);
175
+ const bool srcIsNotGlobal = !__isGlobal(src);
176
+
177
+ if (dstIsNotShared || srcIsNotGlobal) {
178
+ inline_copy(group, dst, src, count);
179
+ return;
180
+ }
181
+
182
+ unsigned int stride = group.size();
183
+ unsigned int rank = group.thread_rank();
184
+
185
+ for (size_t idx = rank; idx < count; idx += stride) {
186
+ size_t _srcIdx = idx;
187
+ size_t _dstIdx = idx;
188
+ __pipeline_memcpy_async(dst + _dstIdx, src + _srcIdx, sizeof(TyElem));
189
+ }
190
+ }
191
+
192
+ // Determine best possible alignment given an input and initial conditions
193
+ // Attempts to generate as little code as possible, most likely should only be used with 1 and 2 byte alignments
194
+ template <unsigned int MinAlignment, unsigned int MaxAlignment>
195
+ _CG_STATIC_QUALIFIER uint32_t find_best_alignment(void *__restrict__ dst, const void *__restrict__ src) {
196
+ // Narrowing conversion intentional
197
+ uint32_t base1 = (uint32_t) reinterpret_cast<uintptr_t>(src);
198
+ uint32_t base2 = (uint32_t) reinterpret_cast<uintptr_t>(dst);
199
+
200
+ uint32_t diff = ((base1) ^ (base2)) & (MaxAlignment - 1);
201
+
202
+ // range [MaxAlignment, alignof(elem)], step: x >> 1
203
+ // over range of possible alignments, choose best available out of range
204
+ uint32_t out = MaxAlignment;
205
+ #pragma unroll
206
+ for (uint32_t alignment = (MaxAlignment >> 1); alignment >= MinAlignment; alignment >>= 1) {
207
+ if (alignment & diff)
208
+ out = alignment;
209
+ }
210
+
211
+ return out;
212
+ }
213
+
214
+ // Determine best possible alignment given an input and initial conditions
215
+ // Attempts to generate as little code as possible, most likely should only be used with 1 and 2 byte alignments
216
+ template <typename TyType, typename TyGroup>
217
+ _CG_STATIC_QUALIFIER void copy_like(const TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
218
+ size_t count) {
219
+ const char *src = reinterpret_cast<const char *>(_src);
220
+ char *dst = reinterpret_cast<char *>(_dst);
221
+
222
+ constexpr uint32_t targetAlignment = (uint32_t)alignof(TyType);
223
+
224
+ uint32_t base = (uint32_t) reinterpret_cast<uintptr_t>(src);
225
+ uint32_t alignOffset = ((~base) + 1) & (targetAlignment - 1);
226
+
227
+ inline_copy(group, dst, src, alignOffset);
228
+ count -= alignOffset;
229
+ src += alignOffset;
230
+ dst += alignOffset;
231
+
232
+ // Copy using the best available alignment, async_copy expects n-datums, not bytes
233
+ size_t asyncCount = count / sizeof(TyType);
234
+ accelerated_async_copy(group, reinterpret_cast<TyType *>(dst), reinterpret_cast<const TyType *>(src), asyncCount);
235
+ asyncCount *= sizeof(TyType);
236
+
237
+ count -= asyncCount;
238
+ src += asyncCount;
239
+ dst += asyncCount;
240
+ inline_copy(group, dst, src, count);
241
+ }
242
+
243
+ // We must determine alignment and manually align src/dst ourselves
244
+ template <size_t AlignHint>
245
+ struct _memcpy_async_align_dispatch {
246
+ template <typename TyGroup>
247
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ dst, const void *__restrict__ src, size_t count) {
248
+ uint32_t alignment = find_best_alignment<AlignHint, 16>(dst, src);
249
+
250
+ // Avoid copying the extra bytes if desired copy count is smaller
251
+ alignment = count < alignment ? AlignHint : alignment;
252
+
253
+ switch (alignment) {
254
+ default:
255
+ case 1:
256
+ inline_copy(group, reinterpret_cast<char *>(dst), reinterpret_cast<const char *>(src), count);
257
+ break;
258
+ case 2:
259
+ inline_copy(group, reinterpret_cast<short *>(dst), reinterpret_cast<const short *>(src), count >> 1);
260
+ break;
261
+ case 4:
262
+ copy_like<Segment<1>>(group, dst, src, count);
263
+ break;
264
+ case 8:
265
+ copy_like<Segment<2>>(group, dst, src, count);
266
+ break;
267
+ case 16:
268
+ copy_like<Segment<4>>(group, dst, src, count);
269
+ break;
270
+ }
271
+ }
272
+ };
273
+
274
+ // Specialization for 4 byte alignments
275
+ template <>
276
+ struct _memcpy_async_align_dispatch<4> {
277
+ template <typename TyGroup>
278
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
279
+ size_t count) {
280
+ const Segment<1> *src = reinterpret_cast<const Segment<1> *>(_src);
281
+ Segment<1> *dst = reinterpret_cast<Segment<1> *>(_dst);
282
+
283
+ // Dispatch straight to aligned LDGSTS calls
284
+ accelerated_async_copy(group, dst, src, count / sizeof(*dst));
285
+ }
286
+ };
287
+
288
+ // Specialization for 8 byte alignments
289
+ template <>
290
+ struct _memcpy_async_align_dispatch<8> {
291
+ template <typename TyGroup>
292
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
293
+ size_t count) {
294
+ const Segment<2> *src = reinterpret_cast<const Segment<2> *>(_src);
295
+ Segment<2> *dst = reinterpret_cast<Segment<2> *>(_dst);
296
+
297
+ // Dispatch straight to aligned LDGSTS calls
298
+ accelerated_async_copy(group, dst, src, count / sizeof(*dst));
299
+ }
300
+ };
301
+
302
+ // Alignments over 16 are truncated to 16 and bypass alignment
303
+ // This is the highest performing memcpy available
304
+ template <>
305
+ struct _memcpy_async_align_dispatch<16> {
306
+ template <typename TyGroup>
307
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
308
+ size_t count) {
309
+ const Segment<4> *src = reinterpret_cast<const Segment<4> *>(_src);
310
+ Segment<4> *dst = reinterpret_cast<Segment<4> *>(_dst);
311
+
312
+ // Dispatch straight to aligned LDGSTS calls
313
+ accelerated_async_copy(group, dst, src, count / sizeof(*dst));
314
+ }
315
+ };
316
+
317
+ // byte-wide API
318
+ template <size_t Alignment, class TyGroup>
319
+ _CG_STATIC_QUALIFIER void _memcpy_async_dispatch_to_aligned_copy(const TyGroup &group, void *__restrict__ _dst,
320
+ const void *__restrict__ _src, size_t count) {
321
+ static_assert(!(Alignment & (Alignment - 1)), "Known static alignment dispatch must be a power of 2");
322
+ details::_memcpy_async_align_dispatch<Alignment>::copy(group, _dst, _src, count);
323
+ }
324
+
325
+ // Internal dispatch APIs
326
+ // These deduce the alignments and sizes necessary to invoke the underlying copy engine
327
+ template <typename Ty>
328
+ using is_void = _CG_STL_NAMESPACE::is_same<Ty, void>;
329
+
330
+ template <typename Ty>
331
+ using enable_if_not_void = typename _CG_STL_NAMESPACE::enable_if<!is_void<Ty>::value, void *>::type;
332
+
333
+ template <typename Ty>
334
+ using enable_if_void = typename _CG_STL_NAMESPACE::enable_if<is_void<Ty>::value, void *>::type;
335
+
336
+ template <typename Ty>
337
+ using enable_if_integral =
338
+ typename _CG_STL_NAMESPACE::enable_if<_CG_STL_NAMESPACE::is_integral<Ty>::value, void *>::type;
339
+
340
+ // byte-wide API using aligned_sized_t
341
+ template <class TyGroup, template <size_t> typename Alignment, size_t Hint>
342
+ _CG_STATIC_QUALIFIER void _memcpy_async_bytes(const TyGroup &group, void *__restrict__ _dst,
343
+ const void *__restrict__ _src, const Alignment<Hint> &count) {
344
+ constexpr size_t _align = (Hint > 16) ? 16 : Hint;
345
+
346
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, _dst, _src, (size_t)count);
347
+ }
348
+
349
+ // byte-wide API using type for aligment
350
+ template <class TyGroup, typename TyElem, typename TySize, size_t Hint = alignof(TyElem),
351
+ enable_if_not_void<TyElem> = nullptr, enable_if_integral<TySize> = nullptr>
352
+ _CG_STATIC_QUALIFIER void _memcpy_async_bytes(const TyGroup &group, TyElem *__restrict__ _dst,
353
+ const TyElem *__restrict__ _src, const TySize& count) {
354
+ constexpr size_t _align = (Hint > 16) ? 16 : Hint;
355
+
356
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, _dst, _src, count);
357
+ }
358
+
359
+ // byte-wide API with full alignment deduction required
360
+ template <class TyGroup, typename TyElem, typename TySize, enable_if_void<TyElem> = nullptr,
361
+ enable_if_integral<TySize> = nullptr>
362
+ _CG_STATIC_QUALIFIER void _memcpy_async_bytes(const TyGroup &group, TyElem *__restrict__ _dst,
363
+ const TyElem *__restrict__ _src, const TySize& count) {
364
+ details::_memcpy_async_dispatch_to_aligned_copy<1>(group, _dst, _src, count);
365
+ }
366
+
367
+ // 1d-datum API
368
+ template <class TyGroup, typename TyElem, size_t Hint = alignof(TyElem)>
369
+ _CG_STATIC_QUALIFIER void _memcpy_async_datum(const TyGroup &group, TyElem *__restrict__ dst, const size_t dstCount,
370
+ const TyElem *__restrict__ src, const size_t srcCount) {
371
+ constexpr unsigned int _align = Hint;
372
+ const size_t totalCount = min(dstCount, srcCount) * sizeof(TyElem);
373
+
374
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, dst, src, totalCount);
375
+ }
376
+
377
+ // 1d-datum API using aligned_size_t
378
+ template <class TyGroup, typename TyElem, template <size_t> typename Alignment, size_t Hint>
379
+ _CG_STATIC_QUALIFIER void _memcpy_async_datum(const TyGroup &group, TyElem *__restrict__ dst, const Alignment<Hint> &dstCount,
380
+ const TyElem *__restrict__ src, const Alignment<Hint> &srcCount) {
381
+ constexpr unsigned int _align = Hint;
382
+ const size_t totalCount = min((size_t)dstCount, (size_t)srcCount) * sizeof(TyElem);
383
+
384
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, dst, src, totalCount);
385
+ }
386
+
387
+ } // namespace details
388
+
389
+ /*
390
+ * Group submit batch of async-copy to cover contiguous 1D array
391
+ * and commit that batch to eventually wait for completion.
392
+ */
393
+ template <class TyGroup, typename TyElem, typename TySizeT>
394
+ _CG_STATIC_QUALIFIER void memcpy_async(const TyGroup &group, TyElem *__restrict__ _dst, const TyElem *__restrict__ _src,
395
+ const TySizeT &count) {
396
+ details::_memcpy_async_bytes(group, _dst, _src, count);
397
+ __pipeline_commit();
398
+ }
399
+
400
+ /*
401
+ * Group submit batch of async-copy to cover contiguous 1D array
402
+ * and commit that batch to eventually wait for completion.
403
+ * Object counts are in datum sized chunks, not bytes.
404
+ */
405
+ template <class TyGroup, class TyElem, typename DstLayout, typename SrcLayout>
406
+ _CG_STATIC_QUALIFIER void memcpy_async(const TyGroup &group, TyElem *__restrict__ dst, const DstLayout &dstLayout,
407
+ const TyElem *__restrict__ src, const SrcLayout &srcLayout) {
408
+ details::_memcpy_async_datum(group, dst, dstLayout, src, srcLayout);
409
+ __pipeline_commit();
410
+ }
411
+
412
+ /* Group wait for prior Nth stage of memcpy_async to complete. */
413
+ template <unsigned int Stage, class TyGroup>
414
+ _CG_STATIC_QUALIFIER void wait_prior(const TyGroup &group) {
415
+ __pipeline_wait_prior(Stage);
416
+ group.sync();
417
+ }
418
+
419
+ /* Group wait all previously submitted memcpy_async to complete. */
420
+ template <class TyGroup>
421
+ _CG_STATIC_QUALIFIER void wait(const TyGroup &group) {
422
+ __pipeline_wait_prior(0);
423
+ group.sync();
424
+ }
425
+
426
+ /***************** CG APIs including pipeline are deprecated *****************/
427
+
428
+ /* Group submit batch of async-copy to cover of contiguous 1D array
429
+ to a pipeline and commit the batch*/
430
+ template <class TyGroup, class TyElem>
431
+ _CG_DEPRECATED _CG_STATIC_QUALIFIER void memcpy_async(TyGroup &group, TyElem *dst, size_t dstCount, const TyElem *src, size_t srcCount,
432
+ nvcuda::experimental::pipeline &pipe) {
433
+ details::_memcpy_async_datum(group, dst, dstCount, src, srcCount);
434
+ pipe.commit();
435
+ }
436
+
437
+ /* Group wait for prior Nth stage of memcpy_async to complete. */
438
+ template <unsigned int Stage, class TyGroup>
439
+ _CG_DEPRECATED _CG_STATIC_QUALIFIER void wait_prior(TyGroup &group, nvcuda::experimental::pipeline &pipe) {
440
+ pipe.wait_prior<Stage>();
441
+ group.sync();
442
+ }
443
+
444
+ /* Group wait for stage-S of memcpy_async to complete. */
445
+ template <class TyGroup>
446
+ _CG_DEPRECATED _CG_STATIC_QUALIFIER void wait(TyGroup &group, nvcuda::experimental::pipeline &pipe, size_t stage) {
447
+ pipe.wait(stage);
448
+ group.sync();
449
+ }
450
+ _CG_END_NAMESPACE
451
+
452
+ #endif // _CG_ASYNC_H
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_reduce.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_COALESCED_REDUCE_H_
50
+ #define _CG_COALESCED_REDUCE_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "cooperative_groups.h"
55
+ #include "partitioning.h"
56
+ #include "coalesced_scan.h"
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <typename TyVal, typename TyOp, unsigned int TySize, typename ParentT>
63
+ _CG_QUALIFIER auto coalesced_reduce(const __single_warp_thread_block_tile<TySize, ParentT>& group,
64
+ TyVal&& val,
65
+ TyOp&& op) -> decltype(op(val, val)) {
66
+ auto out = val;
67
+ for (int mask = TySize >> 1; mask > 0; mask >>= 1) {
68
+ out = op(out, group.shfl_xor(out, mask));
69
+ }
70
+
71
+ return out;
72
+ }
73
+
74
+ template <typename TyVal, typename TyOp>
75
+ _CG_QUALIFIER auto coalesced_reduce(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
76
+ if (group.size() == 32) {
77
+ // Full coalesced group can go through faster path by being treated as a tile of size 32
78
+ auto tile = details::tiled_partition_internal<32, void>();
79
+ return coalesced_reduce(tile, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
80
+ }
81
+ else {
82
+ auto scan_result =
83
+ inclusive_scan_non_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
84
+ unsigned int group_mask = _coalesced_group_data_access::get_mask(group);
85
+ unsigned int last_thread_id = 31 - __clz(group_mask);
86
+ return details::tile::shuffle_dispatch<TyVal>::shfl(
87
+ _CG_STL_NAMESPACE::forward<TyVal>(scan_result), group_mask, last_thread_id, 32);
88
+ }
89
+ }
90
+
91
+ } // details
92
+
93
+ _CG_END_NAMESPACE
94
+
95
+ #endif // _CG_COALESCED_REDUCE_H_
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_COALESCED_SCAN_H_
50
+ #define _CG_COALESCED_SCAN_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "cooperative_groups.h"
55
+ #include "partitioning.h"
56
+ #include "functional.h"
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <typename TyGroup, typename TyVal, typename TyOp>
63
+ _CG_QUALIFIER auto inclusive_scan_contiguous(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
64
+ auto out = val;
65
+ for (int mask = 1; mask < group.size(); mask <<= 1) {
66
+ auto tmp = group.shfl_up(out, mask);
67
+ if (mask <= group.thread_rank()) {
68
+ out = op(out, tmp);
69
+ }
70
+ }
71
+
72
+ return out;
73
+ }
74
+
75
+ template <typename TyGroup, typename TyVal, typename TyOp>
76
+ _CG_QUALIFIER auto inclusive_scan_non_contiguous(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
77
+ const unsigned int groupSize = group.size();
78
+ auto out = val;
79
+
80
+ const unsigned int mask = details::_coalesced_group_data_access::get_mask(group);
81
+ unsigned int lanemask = details::lanemask32_lt() & mask;
82
+ unsigned int srcLane = details::laneid();
83
+
84
+ const unsigned int base = __ffs(mask)-1; /* lane with rank == 0 */
85
+ const unsigned int rank = __popc(lanemask);
86
+
87
+ for (unsigned int i = 1, j = 1; i < groupSize; i <<= 1) {
88
+ if (i <= rank) {
89
+ srcLane -= j;
90
+ j = i; /* maximum possible lane */
91
+
92
+ unsigned int begLane = base + rank - i; /* minimum possible lane */
93
+
94
+ /* Next source lane is in the range [ begLane .. srcLane ]
95
+ * If begLane < srcLane then do a binary search.
96
+ */
97
+ while (begLane < srcLane) {
98
+ const unsigned int halfLane = (begLane + srcLane) >> 1;
99
+ const unsigned int halfMask = lanemask >> halfLane;
100
+ const unsigned int d = __popc(halfMask);
101
+ if (d < i) {
102
+ srcLane = halfLane - 1; /* halfLane too large */
103
+ }
104
+ else if ((i < d) || !(halfMask & 0x01)) {
105
+ begLane = halfLane + 1; /* halfLane too small */
106
+ }
107
+ else {
108
+ begLane = srcLane = halfLane; /* happen to hit */
109
+ }
110
+ }
111
+ }
112
+
113
+ auto tmp = details::tile::shuffle_dispatch<TyVal>::shfl(out, mask, srcLane, 32);
114
+ if (i <= rank) {
115
+ out = op(out, tmp);
116
+ }
117
+ }
118
+ return out;
119
+ }
120
+
121
+ template <unsigned int TySize, typename ParentT, typename TyVal, typename TyOp>
122
+ _CG_QUALIFIER auto coalesced_inclusive_scan(const __single_warp_thread_block_tile<TySize, ParentT>& group,
123
+ TyVal&& val,
124
+ TyOp&& op) -> decltype(op(val, val)) {
125
+ return inclusive_scan_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
126
+ }
127
+
128
+ template <typename TyVal, typename TyOp>
129
+ _CG_QUALIFIER auto coalesced_inclusive_scan(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
130
+ if (group.size() == 32) {
131
+ return inclusive_scan_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
132
+ }
133
+ else {
134
+ return inclusive_scan_non_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
135
+ }
136
+ }
137
+
138
+ template <bool IntegralOptimized>
139
+ struct scan_choose_convertion;
140
+
141
+ template<>
142
+ struct scan_choose_convertion<true> {
143
+ template <typename TyGroup, typename TyRes, typename TyVal>
144
+ _CG_STATIC_QUALIFIER details::remove_qual<TyVal> convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val) {
145
+ return result - val;
146
+ }
147
+ };
148
+
149
+ template<>
150
+ struct scan_choose_convertion<false> {
151
+ template <typename TyGroup, typename TyRes, typename TyVal>
152
+ _CG_STATIC_QUALIFIER details::remove_qual<TyVal> convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val) {
153
+ auto ret = group.shfl_up(result, 1);
154
+ if (group.thread_rank() == 0) {
155
+ return {};
156
+ }
157
+ else {
158
+ return ret;
159
+ }
160
+ }
161
+ };
162
+
163
+ template <typename TyGroup, typename TyRes, typename TyVal, typename TyFn>
164
+ _CG_QUALIFIER auto convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
165
+ using conversion = scan_choose_convertion<_CG_STL_NAMESPACE::is_same<remove_qual<TyFn>, cooperative_groups::plus<remove_qual<TyVal>>>::value
166
+ && _CG_STL_NAMESPACE::is_integral<remove_qual<TyVal>>::value>;
167
+ return conversion::convert_inclusive_to_exclusive(group, result, _CG_STL_NAMESPACE::forward<TyVal>(val));
168
+ }
169
+
170
+ } // details
171
+
172
+ _CG_END_NAMESPACE
173
+
174
+ #endif // _CG_COALESCED_SCAN_H_
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/driver_abi.h ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_DRIVER_API_H
50
+ #define _CG_DRIVER_API_H
51
+
52
+ #include "info.h"
53
+
54
+ _CG_BEGIN_NAMESPACE
55
+
56
+ namespace details {
57
+ template <unsigned int RegId>
58
+ _CG_QUALIFIER unsigned int load_env_reg() {
59
+ // Abort by default
60
+ _CG_ABORT();
61
+ return 0;
62
+ }
63
+
64
+ template <unsigned int HiReg, unsigned int LoReg>
65
+ _CG_QUALIFIER unsigned long long load_env_reg64() {
66
+ unsigned long long registerLo = load_env_reg<LoReg>();
67
+ unsigned long long registerHi = load_env_reg<HiReg>();
68
+
69
+ return (registerHi << 32) | registerLo;
70
+ }
71
+
72
+ // inline PTX for accessing registers requires an immediate for the special reg
73
+ # define LOAD_ENVREG(NUMBER) \
74
+ template <> _CG_QUALIFIER unsigned int load_env_reg<NUMBER>() { \
75
+ unsigned int r; \
76
+ asm ("mov.u32 %0, %%envreg" #NUMBER ";" : "=r"(r)); \
77
+ return r; \
78
+ }
79
+
80
+ // Instantiate loaders for registers used
81
+ LOAD_ENVREG(0);
82
+ LOAD_ENVREG(1);
83
+ LOAD_ENVREG(2);
84
+ # undef LOAD_ENVREG
85
+
86
+ struct grid_workspace {
87
+ unsigned int wsSize;
88
+ unsigned int barrier;
89
+ };
90
+
91
+ _CG_QUALIFIER grid_workspace* get_grid_workspace() {
92
+ unsigned long long gridWsAbiAddress = load_env_reg64<1, 2>();
93
+ // Interpret the address from envreg 1 and 2 as the driver's grid workspace
94
+ return (reinterpret_cast<grid_workspace*>(gridWsAbiAddress));
95
+ }
96
+ }
97
+ _CG_END_NAMESPACE
98
+
99
+ #endif // _CG_DRIVER_API_H
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/functional.h ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_FUNCTIONAL_H
50
+ #define _CG_FUNCTIONAL_H
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+
55
+ #ifdef _CG_CPP11_FEATURES
56
+ #ifdef _CG_USE_CUDA_STL
57
+ # include <cuda/std/functional>
58
+ #endif
59
+
60
+ _CG_BEGIN_NAMESPACE
61
+
62
+ namespace details {
63
+ #ifdef _CG_USE_CUDA_STL
64
+ using cuda::std::plus;
65
+ using cuda::std::bit_and;
66
+ using cuda::std::bit_xor;
67
+ using cuda::std::bit_or;
68
+ #else
69
+ template <typename Ty> struct plus {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 + arg2;}};
70
+ template <typename Ty> struct bit_and {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 & arg2;}};
71
+ template <typename Ty> struct bit_xor {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 ^ arg2;}};
72
+ template <typename Ty> struct bit_or {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 | arg2;}};
73
+ #endif // _CG_USE_PLATFORM_STL
74
+ } // details
75
+
76
+ template <typename Ty>
77
+ struct plus : public details::plus<Ty> {};
78
+
79
+ template <typename Ty>
80
+ struct less {
81
+ __device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {
82
+ return (arg2 < arg1) ? arg2 : arg1;
83
+ }
84
+ };
85
+
86
+ template <typename Ty>
87
+ struct greater {
88
+ __device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {
89
+ return (arg1 < arg2) ? arg2 : arg1;
90
+ }
91
+ };
92
+
93
+ template <typename Ty>
94
+ struct bit_and : public details::bit_and<Ty> {};
95
+
96
+ template <typename Ty>
97
+ struct bit_xor : public details::bit_xor<Ty> {};
98
+
99
+ template <typename Ty>
100
+ struct bit_or : public details::bit_or<Ty> {};
101
+
102
+ #if defined(_CG_HAS_STL_ATOMICS)
103
+ namespace details {
104
+ template <class Ty>
105
+ using _atomic_is_type_supported = _CG_STL_NAMESPACE::integral_constant<bool,
106
+ _CG_STL_NAMESPACE::is_integral<Ty>::value && (sizeof(Ty) == 4 || sizeof(Ty) == 8)>;
107
+
108
+ template <typename TyOp> struct _atomic_op_supported : public _CG_STL_NAMESPACE::false_type {};
109
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::plus<Ty>> : public _atomic_is_type_supported<Ty> {};
110
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::less<Ty>> : public _atomic_is_type_supported<Ty> {};
111
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::greater<Ty>> : public _atomic_is_type_supported<Ty> {};
112
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::bit_and<Ty>> : public _atomic_is_type_supported<Ty> {};
113
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::bit_or<Ty>> : public _atomic_is_type_supported<Ty> {};
114
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::bit_xor<Ty>> : public _atomic_is_type_supported<Ty> {};
115
+
116
+ template<typename TyAtomic, typename TyVal, typename TyOp>
117
+ _CG_QUALIFIER remove_qual<TyVal> atomic_cas_fallback(TyAtomic&& atomic, TyVal&& val, TyOp&& op) {
118
+ auto old = atomic.load(cuda::std::memory_order_relaxed);
119
+ while(!atomic.compare_exchange_weak(old, op(old, val), cuda::std::memory_order_relaxed));
120
+ return old;
121
+ }
122
+
123
+ template<typename TyOp>
124
+ struct op_picker;
125
+
126
+ template<typename TyVal>
127
+ struct op_picker<cooperative_groups::plus<TyVal>> {
128
+ template<typename TyAtomic>
129
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
130
+ return atomic.fetch_add(val, cuda::std::memory_order_relaxed);
131
+ }
132
+ };
133
+
134
+ template<typename TyVal>
135
+ struct op_picker<cooperative_groups::less<TyVal>> {
136
+ template<typename TyAtomic>
137
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
138
+ return atomic.fetch_min(val, cuda::std::memory_order_relaxed);
139
+ }
140
+ };
141
+
142
+ template<typename TyVal>
143
+ struct op_picker<cooperative_groups::greater<TyVal>> {
144
+ template<typename TyAtomic>
145
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
146
+ return atomic.fetch_max(val, cuda::std::memory_order_relaxed);
147
+ }
148
+ };
149
+
150
+ template<typename TyVal>
151
+ struct op_picker<cooperative_groups::bit_and<TyVal>> {
152
+ template<typename TyAtomic>
153
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
154
+ return atomic.fetch_and(val, cuda::std::memory_order_relaxed);
155
+ }
156
+ };
157
+
158
+ template<typename TyVal>
159
+ struct op_picker<cooperative_groups::bit_xor<TyVal>> {
160
+ template<typename TyAtomic>
161
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
162
+ return atomic.fetch_xor(val, cuda::std::memory_order_relaxed);
163
+ }
164
+ };
165
+
166
+ template<typename TyVal>
167
+ struct op_picker<cooperative_groups::bit_or<TyVal>> {
168
+ template<typename TyAtomic>
169
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
170
+ return atomic.fetch_or(val, cuda::std::memory_order_relaxed);
171
+ }
172
+ };
173
+
174
+ template<bool atomic_supported>
175
+ struct atomic_update_dispatch {};
176
+
177
+ template<>
178
+ struct atomic_update_dispatch<false> {
179
+ template<typename TyAtomic, typename TyVal, typename TyOp>
180
+ _CG_STATIC_QUALIFIER remove_qual<TyVal> atomic_update(TyAtomic& atomic, TyVal&& val, TyOp&& op) {
181
+ return atomic_cas_fallback(atomic, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
182
+ }
183
+ };
184
+
185
+ template<>
186
+ struct atomic_update_dispatch<true> {
187
+ template<typename TyAtomic, typename TyVal, typename TyOp>
188
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val, TyOp&& op) {
189
+ using dispatch = op_picker<details::remove_qual<TyOp>>;
190
+
191
+ return dispatch::atomic_update(atomic, val);
192
+ }
193
+ };
194
+
195
+ template<typename TyAtomic, typename TyVal, typename TyOp>
196
+ _CG_QUALIFIER remove_qual<TyVal> atomic_update(TyAtomic& atomic, TyVal&& val, TyOp&& op) {
197
+ using dispatch = atomic_update_dispatch<_atomic_op_supported<details::remove_qual<TyOp>>::value>;
198
+
199
+ return dispatch::atomic_update(atomic, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
200
+ }
201
+
202
+ template<typename TyAtomic, typename TyVal>
203
+ _CG_QUALIFIER void atomic_store(TyAtomic& atomic, TyVal&& val) {
204
+ atomic.store(val, cuda::std::memory_order_relaxed);
205
+ }
206
+ }
207
+ #endif
208
+
209
+ _CG_END_NAMESPACE
210
+
211
+ #endif
212
+ #endif //_CG_FUNCTIONAL_H
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/helpers.h ADDED
@@ -0,0 +1,693 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _COOPERATIVE_GROUPS_HELPERS_H_
50
+ # define _COOPERATIVE_GROUPS_HELPERS_H_
51
+
52
+ #include "info.h"
53
+ #include "sync.h"
54
+
55
+ _CG_BEGIN_NAMESPACE
56
+
57
+ namespace details {
58
+ #ifdef _CG_CPP11_FEATURES
59
+ template <typename Ty> struct _is_float_or_half : public _CG_STL_NAMESPACE::is_floating_point<Ty> {};
60
+ # ifdef _CG_HAS_FP16_COLLECTIVE
61
+ template <> struct _is_float_or_half<__half> : public _CG_STL_NAMESPACE::true_type {};
62
+ template <> struct _is_float_or_half<__half2> : public _CG_STL_NAMESPACE::true_type {};
63
+ # endif
64
+ template <typename Ty>
65
+ using is_float_or_half = _is_float_or_half<typename _CG_STL_NAMESPACE::remove_cv<Ty>::type>;
66
+
67
+ // Non-STL utility templates
68
+ template <typename Ty>
69
+ using remove_qual = typename _CG_STL_NAMESPACE::remove_cv<typename _CG_STL_NAMESPACE::remove_reference<Ty>::type>::type;
70
+
71
+ template <typename TyLhs, typename TyRhs>
72
+ using is_op_type_same = _CG_STL_NAMESPACE::is_same<remove_qual<TyLhs>, remove_qual<TyRhs>
73
+ >;
74
+ #endif
75
+
76
+ template <typename TyTrunc>
77
+ _CG_STATIC_QUALIFIER TyTrunc vec3_to_linear(dim3 index, dim3 nIndex) {
78
+ return ((TyTrunc)index.z * nIndex.y * nIndex.x) +
79
+ ((TyTrunc)index.y * nIndex.x) +
80
+ (TyTrunc)index.x;
81
+ }
82
+
83
+ namespace cta {
84
+
85
+ _CG_STATIC_QUALIFIER void sync()
86
+ {
87
+ __barrier_sync(0);
88
+ }
89
+
90
+ _CG_STATIC_QUALIFIER unsigned int num_threads()
91
+ {
92
+ return static_cast<unsigned int>(blockDim.x * blockDim.y * blockDim.z);
93
+ }
94
+
95
+ _CG_STATIC_QUALIFIER unsigned int thread_rank()
96
+ {
97
+ return vec3_to_linear<unsigned int>(threadIdx, blockDim);
98
+ }
99
+
100
+ _CG_STATIC_QUALIFIER dim3 group_index()
101
+ {
102
+ return dim3(blockIdx.x, blockIdx.y, blockIdx.z);
103
+ }
104
+
105
+ _CG_STATIC_QUALIFIER dim3 thread_index()
106
+ {
107
+ return dim3(threadIdx.x, threadIdx.y, threadIdx.z);
108
+ }
109
+
110
+ _CG_STATIC_QUALIFIER dim3 dim_threads()
111
+ {
112
+ return dim3(blockDim.x, blockDim.y, blockDim.z);
113
+ }
114
+
115
+ // Legacy aliases
116
+ _CG_STATIC_QUALIFIER unsigned int size()
117
+ {
118
+ return num_threads();
119
+ }
120
+
121
+ _CG_STATIC_QUALIFIER dim3 block_dim()
122
+ {
123
+ return dim_threads();
124
+ }
125
+
126
+ };
127
+
128
+ class _coalesced_group_data_access {
129
+ public:
130
+ // Retrieve mask of coalesced groups and tiles
131
+ template <typename TyGroup>
132
+ _CG_STATIC_QUALIFIER unsigned int get_mask(const TyGroup &group) {
133
+ return group.get_mask();
134
+ }
135
+
136
+ template <typename TyGroup>
137
+ _CG_STATIC_QUALIFIER TyGroup construct_from_mask(unsigned int mask) {
138
+ return TyGroup(mask);
139
+ }
140
+
141
+ template <typename TyGroup>
142
+ _CG_STATIC_QUALIFIER void modify_meta_group(TyGroup &group, unsigned int mgRank, unsigned int mgSize) {
143
+ group._data.coalesced.metaGroupRank = mgRank;
144
+ group._data.coalesced.metaGroupSize = mgSize;
145
+ }
146
+ };
147
+
148
+ namespace tile {
149
+ template <unsigned int TileCount, unsigned int TileMask, unsigned int LaneMask, unsigned int ShiftCount>
150
+ struct _tile_helpers{
151
+ _CG_STATIC_CONST_DECL unsigned int tileCount = TileCount;
152
+ _CG_STATIC_CONST_DECL unsigned int tileMask = TileMask;
153
+ _CG_STATIC_CONST_DECL unsigned int laneMask = LaneMask;
154
+ _CG_STATIC_CONST_DECL unsigned int shiftCount = ShiftCount;
155
+ };
156
+
157
+ template <unsigned int> struct tile_helpers;
158
+ template <> struct tile_helpers<32> : public _tile_helpers<1, 0xFFFFFFFF, 0x1F, 5> {};
159
+ template <> struct tile_helpers<16> : public _tile_helpers<2, 0x0000FFFF, 0x0F, 4> {};
160
+ template <> struct tile_helpers<8> : public _tile_helpers<4, 0x000000FF, 0x07, 3> {};
161
+ template <> struct tile_helpers<4> : public _tile_helpers<8, 0x0000000F, 0x03, 2> {};
162
+ template <> struct tile_helpers<2> : public _tile_helpers<16, 0x00000003, 0x01, 1> {};
163
+ template <> struct tile_helpers<1> : public _tile_helpers<32, 0x00000001, 0x00, 0> {};
164
+
165
+ #ifdef _CG_CPP11_FEATURES
166
+ namespace shfl {
167
+ /***********************************************************************************
168
+ * Recursively Sliced Shuffle
169
+ * Purpose:
170
+ * Slices an input type a number of times into integral types so that shuffles
171
+ * are well defined
172
+ * Expectations:
173
+ * This object *should not* be used from a reinterpret_cast pointer unless
174
+ * some alignment guarantees can be met. Use a memcpy to guarantee that loads
175
+ * from the integral types stored within are aligned and correct.
176
+ **********************************************************************************/
177
+ template <unsigned int count, bool intSized = (count <= sizeof(int))>
178
+ struct recursive_sliced_shuffle_helper;
179
+
180
+ template <unsigned int count>
181
+ struct recursive_sliced_shuffle_helper<count, true> {
182
+ int val;
183
+
184
+ template <typename TyFn>
185
+ _CG_QUALIFIER void invoke_shuffle(const TyFn &shfl) {
186
+ val = shfl(val);
187
+ }
188
+ };
189
+
190
+ template <unsigned int count>
191
+ struct recursive_sliced_shuffle_helper<count, false> {
192
+ int val;
193
+ recursive_sliced_shuffle_helper<count - sizeof(int)> next;
194
+
195
+ template <typename TyFn>
196
+ _CG_QUALIFIER void invoke_shuffle(const TyFn &shfl) {
197
+ val = shfl(val);
198
+ next.invoke_shuffle(shfl);
199
+ }
200
+ };
201
+ }
202
+
203
+ struct _memory_shuffle {
204
+ template <typename TyElem, typename TyShflFn>
205
+ _CG_STATIC_QUALIFIER TyElem _shfl_internal(TyElem elem, const TyShflFn& fn) {
206
+ static_assert(sizeof(TyElem) <= 32, "Cooperative groups collectives are limited to types smaller than 32B");
207
+ return TyElem{};
208
+ }
209
+
210
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
211
+ _CG_STATIC_QUALIFIER TyRet shfl(TyElem&& elem, unsigned int gMask, unsigned int srcRank, unsigned int threads) {
212
+ auto shfl = [=](int val) -> int {
213
+ return 0;
214
+ };
215
+
216
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
217
+ }
218
+
219
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
220
+ _CG_STATIC_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
221
+ auto shfl = [=](int val) -> int {
222
+ return 0;
223
+ };
224
+
225
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
226
+ }
227
+
228
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
229
+ _CG_STATIC_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
230
+ auto shfl = [=](int val) -> int {
231
+ return 0;
232
+ };
233
+
234
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
235
+ }
236
+
237
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
238
+ _CG_STATIC_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int gMask, unsigned int lMask, unsigned int threads) {
239
+ auto shfl = [=](int val) -> int {
240
+ return 0;
241
+ };
242
+
243
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
244
+ }
245
+ };
246
+
247
+ /***********************************************************************************
248
+ * Intrinsic Device Function Shuffle
249
+ * Purpose:
250
+ * Uses a shuffle helper that has characteristics best suited for moving
251
+ * elements between threads
252
+ * Expectations:
253
+ * Object given will be forced into an l-value type so that it can be used
254
+ * with a helper structure that reinterprets the data into intrinsic compatible
255
+ * types
256
+ * Notes:
257
+ * !! TyRet is required so that objects are returned by value and not as
258
+ * dangling references depending on the value category of the passed object
259
+ **********************************************************************************/
260
+ struct _intrinsic_compat_shuffle {
261
+ template <unsigned int count>
262
+ using shfl_helper = shfl::recursive_sliced_shuffle_helper<count>;
263
+
264
+ template <typename TyElem, typename TyShflFn>
265
+ _CG_STATIC_QUALIFIER TyElem _shfl_internal(TyElem elem, const TyShflFn& fn) {
266
+ static_assert(__is_trivially_copyable(TyElem), "Type is not compatible with device shuffle");
267
+ shfl_helper<sizeof(TyElem)> helper;
268
+ memcpy(&helper, &elem, sizeof(TyElem));
269
+ helper.invoke_shuffle(fn);
270
+ memcpy(&elem, &helper, sizeof(TyElem));
271
+ return elem;
272
+ }
273
+
274
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
275
+ _CG_STATIC_QUALIFIER TyRet shfl(TyElem&& elem, unsigned int gMask, unsigned int srcRank, unsigned int threads) {
276
+ auto shfl = [=](int val) -> int {
277
+ return __shfl_sync(gMask, val, srcRank, threads);
278
+ };
279
+
280
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
281
+ }
282
+
283
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
284
+ _CG_STATIC_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
285
+ auto shfl = [=](int val) -> int {
286
+ return __shfl_down_sync(gMask, val, delta, threads);
287
+ };
288
+
289
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
290
+ }
291
+
292
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
293
+ _CG_STATIC_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
294
+ auto shfl = [=](int val) -> int {
295
+ return __shfl_up_sync(gMask, val, delta, threads);
296
+ };
297
+
298
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
299
+ }
300
+
301
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
302
+ _CG_STATIC_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int gMask, unsigned int lMask, unsigned int threads) {
303
+ auto shfl = [=](int val) -> int {
304
+ return __shfl_xor_sync(gMask, val, lMask, threads);
305
+ };
306
+
307
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
308
+ }
309
+ };
310
+
311
+ struct _native_shuffle {
312
+ template <typename TyElem>
313
+ _CG_STATIC_QUALIFIER TyElem shfl(
314
+ TyElem elem, unsigned int gMask, unsigned int srcRank, unsigned int threads) {
315
+ return static_cast<TyElem>(__shfl_sync(gMask, elem, srcRank, threads));
316
+ }
317
+
318
+ template <typename TyElem>
319
+ _CG_STATIC_QUALIFIER TyElem shfl_down(
320
+ TyElem elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
321
+ return static_cast<TyElem>(__shfl_down_sync(gMask, elem, delta, threads));
322
+ }
323
+
324
+ template <typename TyElem>
325
+ _CG_STATIC_QUALIFIER TyElem shfl_up(
326
+ TyElem elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
327
+ return static_cast<TyElem>(__shfl_up_sync(gMask, elem, delta, threads));
328
+ }
329
+
330
+ template <typename TyElem>
331
+ _CG_STATIC_QUALIFIER TyElem shfl_xor(
332
+ TyElem elem, unsigned int gMask, unsigned int lMask, unsigned int threads) {
333
+ return static_cast<TyElem>(__shfl_xor_sync(gMask, elem, lMask, threads));
334
+ }
335
+ };
336
+
337
+ // Almost all arithmetic types are supported by native shuffle
338
+ // Vector types are the exception
339
+ template <typename TyElem>
340
+ using use_native_shuffle = _CG_STL_NAMESPACE::integral_constant<
341
+ bool,
342
+ _CG_STL_NAMESPACE::is_integral<
343
+ remove_qual<TyElem>>::value ||
344
+ details::is_float_or_half<
345
+ remove_qual<TyElem>>::value
346
+ >;
347
+
348
+ constexpr unsigned long long _MemoryShuffleCutoff = 32;
349
+
350
+ template <typename TyElem,
351
+ bool IsNative = use_native_shuffle<TyElem>::value,
352
+ bool InMem = (sizeof(TyElem) > _MemoryShuffleCutoff)>
353
+ struct shuffle_dispatch;
354
+
355
+ template <typename TyElem>
356
+ struct shuffle_dispatch<TyElem, true, false> : public _native_shuffle {};
357
+
358
+ template <typename TyElem>
359
+ struct shuffle_dispatch<TyElem, false, false> : public _intrinsic_compat_shuffle {};
360
+
361
+ template <typename TyElem>
362
+ struct shuffle_dispatch<TyElem, false, true> : public _memory_shuffle {};
363
+
364
+ #endif //_CG_CPP11_FEATURES
365
+ };
366
+
367
+ namespace multi_grid {
368
+ struct multi_grid_functions;
369
+ };
370
+
371
+ namespace grid {
372
+ _CG_STATIC_QUALIFIER unsigned int barrier_arrive(unsigned int *bar) {
373
+ return details::sync_grids_arrive(bar);
374
+ }
375
+
376
+ _CG_STATIC_QUALIFIER void barrier_wait(unsigned int token, unsigned int *bar) {
377
+ details::sync_grids_wait(token, bar);
378
+ }
379
+
380
+ _CG_STATIC_QUALIFIER void sync(unsigned int *bar) {
381
+ unsigned int token = details::sync_grids_arrive(bar);
382
+ details::sync_grids_wait(token, bar);
383
+ }
384
+
385
+ _CG_STATIC_QUALIFIER unsigned long long num_blocks()
386
+ {
387
+ // grid.y * grid.z -> [max(65535) * max(65535)] fits within 4b, promote after multiplication
388
+ // grid.x * (grid.y * grid.z) -> [max(2^31-1) * max(65535 * 65535)] exceeds 4b, promote before multiplication
389
+ return (unsigned long long)gridDim.x * (gridDim.y * gridDim.z);
390
+ }
391
+
392
+ _CG_STATIC_QUALIFIER unsigned long long num_threads()
393
+ {
394
+ return num_blocks() * cta::num_threads();
395
+ }
396
+
397
+ _CG_STATIC_QUALIFIER unsigned long long block_rank()
398
+ {
399
+ return vec3_to_linear<unsigned long long>(blockIdx, gridDim);
400
+ }
401
+
402
+ _CG_STATIC_QUALIFIER unsigned long long thread_rank()
403
+ {
404
+ return block_rank() * cta::num_threads() + cta::thread_rank();
405
+ }
406
+
407
+ _CG_STATIC_QUALIFIER dim3 dim_blocks()
408
+ {
409
+ return dim3(gridDim.x, gridDim.y, gridDim.z);
410
+ }
411
+
412
+ _CG_STATIC_QUALIFIER dim3 block_index()
413
+ {
414
+ return dim3(blockIdx.x, blockIdx.y, blockIdx.z);
415
+ }
416
+
417
+ _CG_STATIC_QUALIFIER dim3 dim_threads()
418
+ {
419
+ return dim3(gridDim.x * blockDim.x, gridDim.y * blockDim.y, gridDim.z * blockDim.z);
420
+ }
421
+
422
+ _CG_STATIC_QUALIFIER dim3 thread_index()
423
+ {
424
+ return dim3(blockIdx.x * blockDim.x + threadIdx.x,
425
+ blockIdx.y * blockDim.y + threadIdx.y,
426
+ blockIdx.z * blockDim.z + threadIdx.z);
427
+ }
428
+
429
+ #if defined(_CG_HAS_CLUSTER_GROUP)
430
+ _CG_STATIC_QUALIFIER dim3 dim_clusters() {
431
+ return __clusterGridDimInClusters();
432
+ }
433
+
434
+ _CG_STATIC_QUALIFIER unsigned long long num_clusters() {
435
+ const dim3 dimClusters = dim_clusters();
436
+ return dimClusters.x * dimClusters.y * dimClusters.z;
437
+ }
438
+
439
+ _CG_STATIC_QUALIFIER dim3 cluster_index() {
440
+ return __clusterIdx();
441
+ }
442
+
443
+ _CG_STATIC_QUALIFIER unsigned long long cluster_rank() {
444
+ return vec3_to_linear<unsigned long long>(cluster_index(), dim_clusters());
445
+ }
446
+ #endif
447
+
448
+ // Legacy aliases
449
+ _CG_STATIC_QUALIFIER unsigned long long size()
450
+ {
451
+ return num_threads();
452
+ }
453
+
454
+ _CG_STATIC_QUALIFIER dim3 grid_dim()
455
+ {
456
+ return dim_blocks();
457
+ }
458
+ };
459
+
460
+
461
+ #if defined(_CG_HAS_MULTI_GRID_GROUP)
462
+
463
+ namespace multi_grid {
464
+ _CG_STATIC_QUALIFIER unsigned long long get_intrinsic_handle()
465
+ {
466
+ #if defined(__CUDACC_RDC__) || defined(__CUDACC_EWP__)
467
+ //this function is defined in device runtime library
468
+ //which requires separate compilation mode (__CUDACC_RDC__)
469
+ //or extended whole program mode (__CUDACC_EWP__)
470
+ return (cudaCGGetIntrinsicHandle(cudaCGScopeMultiGrid));
471
+ #else /* !(__CUDACC_RDC__ || __CUDACC_EWP__) */
472
+ return 0;
473
+ #endif /* __CUDACC_RDC__ || __CUDACC_EWP__ */
474
+ }
475
+
476
+ _CG_STATIC_QUALIFIER void sync(const unsigned long long handle)
477
+ {
478
+ #if defined(__CUDACC_RDC__) || defined(__CUDACC_EWP__)
479
+ //this function is defined in device runtime library
480
+ //which requires separate compilation mode (__CUDACC_RDC__)
481
+ //or extended whole program mode (__CUDACC_EWP__)
482
+ cudaError_t err = cudaCGSynchronize(handle, 0);
483
+ #endif /* __CUDACC_RDC__ || __CUDACC_EWP__ */
484
+ }
485
+
486
+ _CG_STATIC_QUALIFIER unsigned int size(const unsigned long long handle)
487
+ {
488
+ unsigned int numThreads = 0;
489
+ #if defined(__CUDACC_RDC__) || defined(__CUDACC_EWP__)
490
+ //this function is defined in device runtime library
491
+ //which requires separate compilation mode (__CUDACC_RDC__)
492
+ //or extended whole program mode (__CUDACC_EWP__)
493
+ cudaCGGetSize(&numThreads, NULL, handle);
494
+ #endif /* __CUDACC_RDC__ || __CUDACC_EWP__ */
495
+ return numThreads;
496
+ }
497
+
498
+ _CG_STATIC_QUALIFIER unsigned int thread_rank(const unsigned long long handle)
499
+ {
500
+ unsigned int threadRank = 0;
501
+ #if defined(__CUDACC_RDC__) || defined(__CUDACC_EWP__)
502
+ //this function is defined in device runtime library
503
+ //which requires separate compilation mode (__CUDACC_RDC__)
504
+ //or extended whole program mode (__CUDACC_EWP__)
505
+ cudaCGGetRank(&threadRank, NULL, handle);
506
+ #endif /* __CUDACC_RDC__ || __CUDACC_EWP__ */
507
+ return threadRank;
508
+ }
509
+
510
+ _CG_STATIC_QUALIFIER unsigned int grid_rank(const unsigned long long handle)
511
+ {
512
+ unsigned int gridRank = 0;
513
+ #if defined(__CUDACC_RDC__) || defined(__CUDACC_EWP__)
514
+ //this function is defined in device runtime library
515
+ //which requires separate compilation mode (__CUDACC_RDC__)
516
+ //or extended whole program mode (__CUDACC_EWP__)
517
+ cudaCGGetRank(NULL, &gridRank, handle);
518
+ #endif /* __CUDACC_RDC__ || __CUDACC_EWP__ */
519
+ return gridRank;
520
+ }
521
+
522
+ _CG_STATIC_QUALIFIER unsigned int num_grids(const unsigned long long handle)
523
+ {
524
+ unsigned int numGrids = 0;
525
+ #if defined(__CUDACC_RDC__) || defined(__CUDACC_EWP__)
526
+ //this function is defined in device runtime library
527
+ //which requires separate compilation mode (__CUDACC_RDC__)
528
+ //or extended whole program mode (__CUDACC_EWP__)
529
+ cudaCGGetSize(NULL, &numGrids, handle);
530
+ #endif /* __CUDACC_RDC__ || __CUDACC_EWP__ */
531
+ return numGrids;
532
+ }
533
+
534
+ # ifdef _CG_CPP11_FEATURES
535
+ struct multi_grid_functions {
536
+ decltype(multi_grid::get_intrinsic_handle) *get_intrinsic_handle;
537
+ decltype(multi_grid::sync) *sync;
538
+ decltype(multi_grid::size) *size;
539
+ decltype(multi_grid::thread_rank) *thread_rank;
540
+ decltype(multi_grid::grid_rank) *grid_rank;
541
+ decltype(multi_grid::num_grids) *num_grids;
542
+ };
543
+
544
+ template <typename = void>
545
+ _CG_STATIC_QUALIFIER const multi_grid_functions* load_grid_intrinsics() {
546
+ __constant__ static const multi_grid_functions mgf {
547
+ &multi_grid::get_intrinsic_handle,
548
+ &multi_grid::sync,
549
+ &multi_grid::size,
550
+ &multi_grid::thread_rank,
551
+ &multi_grid::grid_rank,
552
+ &multi_grid::num_grids
553
+ };
554
+
555
+ return &mgf;
556
+ }
557
+ # endif
558
+ };
559
+ #endif
560
+
561
+ #if defined(_CG_HAS_CLUSTER_GROUP)
562
+ namespace cluster {
563
+
564
+ _CG_STATIC_QUALIFIER bool isReal()
565
+ {
566
+ return __clusterDimIsSpecified();
567
+ }
568
+
569
+ _CG_STATIC_QUALIFIER void barrier_arrive()
570
+ {
571
+ __cluster_barrier_arrive();
572
+ }
573
+
574
+ _CG_STATIC_QUALIFIER void barrier_wait()
575
+ {
576
+ __cluster_barrier_wait();
577
+ }
578
+
579
+ _CG_STATIC_QUALIFIER void sync()
580
+ {
581
+ barrier_arrive();
582
+ barrier_wait();
583
+ }
584
+
585
+ _CG_STATIC_QUALIFIER unsigned int query_shared_rank(const void *addr)
586
+ {
587
+ return __cluster_query_shared_rank(addr);
588
+ }
589
+
590
+ template <typename T>
591
+ _CG_STATIC_QUALIFIER T* map_shared_rank(T *addr, int rank)
592
+ {
593
+ return static_cast<T*>(__cluster_map_shared_rank(addr, rank));
594
+ }
595
+
596
+ _CG_STATIC_QUALIFIER dim3 block_index()
597
+ {
598
+ return __clusterRelativeBlockIdx();
599
+ }
600
+
601
+ _CG_STATIC_QUALIFIER unsigned int block_rank()
602
+ {
603
+ return __clusterRelativeBlockRank();
604
+ }
605
+
606
+ _CG_STATIC_QUALIFIER dim3 thread_index()
607
+ {
608
+ const dim3 blockIndex = block_index();
609
+ return dim3(blockIndex.x * blockDim.x + threadIdx.x,
610
+ blockIndex.y * blockDim.y + threadIdx.y,
611
+ blockIndex.z * blockDim.z + threadIdx.z);
612
+ }
613
+
614
+ _CG_STATIC_QUALIFIER unsigned int thread_rank()
615
+ {
616
+ return block_rank() * cta::num_threads() + cta::thread_rank();
617
+ }
618
+
619
+ _CG_STATIC_QUALIFIER dim3 dim_blocks()
620
+ {
621
+ return __clusterDim();
622
+ }
623
+
624
+ _CG_STATIC_QUALIFIER unsigned int num_blocks()
625
+ {
626
+ return __clusterSizeInBlocks();
627
+ }
628
+
629
+ _CG_STATIC_QUALIFIER dim3 dim_threads()
630
+ {
631
+ const dim3 dimBlocks = dim_blocks();
632
+ const unsigned int x = dimBlocks.x * blockDim.x;
633
+ const unsigned int y = dimBlocks.y * blockDim.y;
634
+ const unsigned int z = dimBlocks.z * blockDim.z;
635
+ return dim3(x, y, z);
636
+ }
637
+
638
+ _CG_STATIC_QUALIFIER unsigned int num_threads()
639
+ {
640
+ return num_blocks() * cta::num_threads();
641
+ }
642
+
643
+ };
644
+ #endif
645
+
646
+ _CG_STATIC_QUALIFIER unsigned int laneid()
647
+ {
648
+ unsigned int laneid;
649
+ asm ("mov.u32 %0, %%laneid;" : "=r"(laneid));
650
+ return laneid;
651
+ }
652
+
653
+ _CG_STATIC_QUALIFIER unsigned int lanemask32_eq()
654
+ {
655
+ unsigned int lanemask32_eq;
656
+ asm ("mov.u32 %0, %%lanemask_eq;" : "=r"(lanemask32_eq));
657
+ return (lanemask32_eq);
658
+ }
659
+
660
+ _CG_STATIC_QUALIFIER unsigned int lanemask32_lt()
661
+ {
662
+ unsigned int lanemask32_lt;
663
+ asm ("mov.u32 %0, %%lanemask_lt;" : "=r"(lanemask32_lt));
664
+ return (lanemask32_lt);
665
+ }
666
+
667
+ _CG_STATIC_QUALIFIER void abort()
668
+ {
669
+ _CG_ABORT();
670
+ }
671
+
672
+ template <typename Ty>
673
+ _CG_QUALIFIER void assert_if_not_arithmetic() {
674
+ #ifdef _CG_CPP11_FEATURES
675
+ static_assert(
676
+ _CG_STL_NAMESPACE::is_integral<Ty>::value ||
677
+ details::is_float_or_half<Ty>::value,
678
+ "Error: Ty is neither integer or float"
679
+ );
680
+ #endif //_CG_CPP11_FEATURES
681
+ }
682
+
683
+ #ifdef _CG_CPP11_FEATURES
684
+ _CG_STATIC_QUALIFIER constexpr unsigned int log2(unsigned int x) {
685
+ return x == 1 ? 0 : 1 + log2(x / 2);
686
+ }
687
+ #endif //_CG_CPP11_FEATURES
688
+
689
+ }; // !Namespace internal
690
+
691
+ _CG_END_NAMESPACE
692
+
693
+ #endif /* !_COOPERATIVE_GROUPS_HELPERS_H_ */
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/info.h ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+
50
+ #include <nv/target>
51
+
52
+ #ifndef _CG_INFO_H_
53
+ #define _CG_INFO_H_
54
+ /*
55
+ ** Define: _CG_VERSION
56
+ */
57
+ #define _CG_VERSION 1000
58
+
59
+ /*
60
+ ** Define: _CG_ABI_VERSION
61
+ */
62
+ #ifndef _CG_ABI_VERSION
63
+ # define _CG_ABI_VERSION 1
64
+ #endif
65
+
66
+ /*
67
+ ** Define: _CG_ABI_EXPERIMENTAL
68
+ ** Desc: If enabled, sets all features enabled (ABI-breaking or experimental)
69
+ */
70
+ #if defined(_CG_ABI_EXPERIMENTAL)
71
+ #endif
72
+
73
+ #define _CG_CONCAT_INNER(x, y) x ## y
74
+ #define _CG_CONCAT_OUTER(x, y) _CG_CONCAT_INNER(x, y)
75
+ #define _CG_NAMESPACE _CG_CONCAT_OUTER(__v, _CG_ABI_VERSION)
76
+
77
+ #define _CG_BEGIN_NAMESPACE \
78
+ namespace cooperative_groups { namespace _CG_NAMESPACE {
79
+ #define _CG_END_NAMESPACE \
80
+ }; using namespace _CG_NAMESPACE; };
81
+
82
+ #if (defined(__cplusplus) && (__cplusplus >= 201103L)) || (defined(_MSC_VER) && (_MSC_VER >= 1900))
83
+ # define _CG_CPP11_FEATURES
84
+ #endif
85
+
86
+ #if !defined(_CG_QUALIFIER)
87
+ # define _CG_QUALIFIER __forceinline__ __device__
88
+ #endif
89
+ #if !defined(_CG_STATIC_QUALIFIER)
90
+ # define _CG_STATIC_QUALIFIER static __forceinline__ __device__
91
+ #endif
92
+ #if !defined(_CG_CONSTEXPR_QUALIFIER)
93
+ # if defined(_CG_CPP11_FEATURES)
94
+ # define _CG_CONSTEXPR_QUALIFIER constexpr __forceinline__ __device__
95
+ # else
96
+ # define _CG_CONSTEXPR_QUALIFIER _CG_QUALIFIER
97
+ # endif
98
+ #endif
99
+ #if !defined(_CG_STATIC_CONSTEXPR_QUALIFIER)
100
+ # if defined(_CG_CPP11_FEATURES)
101
+ # define _CG_STATIC_CONSTEXPR_QUALIFIER static constexpr __forceinline__ __device__
102
+ # else
103
+ # define _CG_STATIC_CONSTEXPR_QUALIFIER _CG_STATIC_QUALIFIER
104
+ # endif
105
+ #endif
106
+
107
+ #if defined(_MSC_VER)
108
+ # define _CG_DEPRECATED __declspec(deprecated)
109
+ #else
110
+ # define _CG_DEPRECATED __attribute__((deprecated))
111
+ #endif
112
+
113
+ #if defined(__CUDA_MINIMUM_ARCH__)
114
+ # define _CG_CUDA_ARCH __CUDA_MINIMUM_ARCH__
115
+ #elif defined(__CUDA_ARCH__)
116
+ # define _CG_CUDA_ARCH __CUDA_ARCH__
117
+ #endif
118
+
119
+ #if (_CG_CUDA_ARCH >= 600) || !defined(_CG_CUDA_ARCH)
120
+ # define _CG_HAS_GRID_GROUP
121
+ #endif
122
+ #if (_CG_CUDA_ARCH >= 600) || !defined(_CG_CUDA_ARCH)
123
+ # define _CG_HAS_MULTI_GRID_GROUP
124
+ #endif
125
+ #if (_CG_CUDA_ARCH >= 700) || !defined(_CG_CUDA_ARCH)
126
+ # define _CG_HAS_MATCH_COLLECTIVE
127
+ #endif
128
+
129
+ #if ((_CG_CUDA_ARCH >= 800) || !defined(_CG_CUDA_ARCH)) && !defined(_CG_USER_PROVIDED_SHARED_MEMORY)
130
+ # define _CG_HAS_RESERVED_SHARED
131
+ #endif
132
+
133
+ #if ((_CG_CUDA_ARCH >= 900) || !defined(_CG_CUDA_ARCH)) && \
134
+ (defined(__NVCC__) || defined(__CUDACC_RTC__) || defined(_CG_CLUSTER_INTRINSICS_AVAILABLE)) && \
135
+ defined(_CG_CPP11_FEATURES)
136
+ # define _CG_HAS_CLUSTER_GROUP
137
+ #endif
138
+
139
+ #if (_CG_CUDA_ARCH >= 900) || !defined(_CG_CUDA_ARCH)
140
+ # define _CG_HAS_INSTR_ELECT
141
+ #endif
142
+
143
+ // Has __half and __half2
144
+ // Only usable if you include the cuda_fp16.h extension, and
145
+ // _before_ including cooperative_groups.h
146
+ #ifdef __CUDA_FP16_TYPES_EXIST__
147
+ # define _CG_HAS_FP16_COLLECTIVE
148
+ #endif
149
+
150
+ // Include libcu++ where supported.
151
+ #if defined(_CG_CPP11_FEATURES) && !defined(__ibmxl__) && (!defined(_MSC_VER) || defined(_WIN64)) && \
152
+ !defined(_CG_LIMIT_INCLUDED_DEPENDENCIES)
153
+ # define _CG_USE_CUDA_STL
154
+ #else
155
+ # define _CG_USE_OWN_TRAITS
156
+ #endif
157
+
158
+ #if defined(_CG_USE_CUDA_STL) && !defined(__QNX__) && (!defined(__CUDA_ARCH__) || \
159
+ ((!defined(_MSC_VER) && __CUDA_ARCH__ >= 600) || (defined(_MSC_VER) && __CUDA_ARCH__ >= 700)))
160
+ # define _CG_HAS_STL_ATOMICS
161
+ #endif
162
+
163
+ #ifdef _CG_CPP11_FEATURES
164
+ // Use cuda::std:: for type_traits
165
+ # if defined(_CG_USE_CUDA_STL)
166
+ # define _CG_STL_NAMESPACE cuda::std
167
+ # include <cuda/std/type_traits>
168
+ // Use CG's implementation of type traits
169
+ # else
170
+ # define _CG_STL_NAMESPACE cooperative_groups::details::templates
171
+ # endif
172
+ #endif
173
+
174
+ #ifdef _CG_CPP11_FEATURES
175
+ # define _CG_STATIC_CONST_DECL static constexpr
176
+ # define _CG_CONST_DECL constexpr
177
+ #else
178
+ # define _CG_STATIC_CONST_DECL static const
179
+ # define _CG_CONST_DECL const
180
+ #endif
181
+
182
+ #if (defined(_MSC_VER) && !defined(_WIN64)) || defined(__arm__)
183
+ # define _CG_ASM_PTR_CONSTRAINT "r"
184
+ #else
185
+ # define _CG_ASM_PTR_CONSTRAINT "l"
186
+ #endif
187
+
188
+ /*
189
+ ** Define: CG_DEBUG
190
+ ** What: Enables various runtime safety checks
191
+ */
192
+ #if defined(__CUDACC_DEBUG__) && defined(CG_DEBUG) && !defined(NDEBUG)
193
+ # define _CG_DEBUG
194
+ #endif
195
+
196
+ #if defined(_CG_DEBUG)
197
+ # include <assert.h>
198
+ # define _CG_ASSERT(x) assert((x));
199
+ # define _CG_ABORT() assert(0);
200
+ #else
201
+ # define _CG_ASSERT(x)
202
+ # define _CG_ABORT() __trap();
203
+ #endif
204
+
205
+ _CG_BEGIN_NAMESPACE
206
+
207
+ namespace details {
208
+ _CG_STATIC_CONST_DECL unsigned int default_max_block_size = 1024;
209
+
210
+ #if defined(_CG_CPP11_FEATURES) && !defined(_CG_USE_CUDA_STL)
211
+ namespace templates {
212
+
213
+ /**
214
+ * Integral constants
215
+ **/
216
+ template <typename Ty, Ty Val>
217
+ struct integral_constant {
218
+ static constexpr Ty value = Val;
219
+ typedef Ty type;
220
+
221
+ _CG_QUALIFIER constexpr operator type() const noexcept { return value; }
222
+ _CG_QUALIFIER constexpr type operator()() const noexcept { return value; }
223
+ };
224
+
225
+ typedef integral_constant<bool, true> true_type;
226
+ typedef integral_constant<bool, false> false_type;
227
+
228
+ /**
229
+ * CV Qualifiers
230
+ **/
231
+ template <class Ty> struct is_lvalue_reference : public details::templates::false_type {};
232
+ template <class Ty> struct is_lvalue_reference<Ty&> : public details::templates::true_type {};
233
+
234
+ template <class Ty> struct remove_reference {typedef Ty type;};
235
+ template <class Ty> struct remove_reference<Ty&> {typedef Ty type;};
236
+ template <class Ty> struct remove_reference<Ty&&> {typedef Ty type;};
237
+
238
+ template <class Ty>
239
+ using remove_reference_t = typename details::templates::remove_reference<Ty>::type;
240
+
241
+ template <class Ty> struct remove_const {typedef Ty type;};
242
+ template <class Ty> struct remove_const<const Ty> {typedef Ty type;};
243
+
244
+ template <class Ty> struct remove_volatile {typedef Ty type;};
245
+ template <class Ty> struct remove_volatile<volatile Ty> {typedef Ty type;};
246
+
247
+ template <class Ty> struct remove_cv {typedef typename details::templates::remove_volatile<typename details::templates::remove_const<Ty>::type>::type type;};
248
+
249
+ template <class Ty>
250
+ using remove_cv_t = typename details::templates::remove_cv<Ty>::type;
251
+
252
+ template <class Ty>
253
+ _CG_QUALIFIER Ty&& forward(remove_reference_t<Ty> &t) noexcept {
254
+ return static_cast<Ty&&>(t);
255
+ }
256
+
257
+ template <class Ty>
258
+ _CG_QUALIFIER Ty&& forward(remove_reference_t<Ty> &&t) noexcept {
259
+ static_assert(!details::templates::is_lvalue_reference<Ty>::value, "Forwarding an rvalue as an lvalue is not allowed.");
260
+ return static_cast<Ty&&>(t);
261
+ }
262
+
263
+ /**
264
+ * is_integral
265
+ **/
266
+ template <class Ty> struct _is_integral : public details::templates::false_type {};
267
+ template <> struct _is_integral<bool> : public details::templates::true_type {};
268
+ template <> struct _is_integral<char> : public details::templates::true_type {};
269
+ template <> struct _is_integral<unsigned char> : public details::templates::true_type {};
270
+ template <> struct _is_integral<short> : public details::templates::true_type {};
271
+ template <> struct _is_integral<unsigned short> : public details::templates::true_type {};
272
+ template <> struct _is_integral<int> : public details::templates::true_type {};
273
+ template <> struct _is_integral<unsigned int> : public details::templates::true_type {};
274
+ template <> struct _is_integral<long> : public details::templates::true_type {};
275
+ template <> struct _is_integral<long long> : public details::templates::true_type {};
276
+ template <> struct _is_integral<unsigned long> : public details::templates::true_type {};
277
+ template <> struct _is_integral<unsigned long long> : public details::templates::true_type {};
278
+ //Vector type support?
279
+
280
+ template <typename Ty>
281
+ struct is_integral : public details::templates::_is_integral<typename details::templates::remove_cv<Ty>::type> {};
282
+
283
+ /**
284
+ * is_floating_point
285
+ **/
286
+ template <class Ty> struct _is_floating_point : public details::templates::false_type {};
287
+ template <> struct _is_floating_point<float> : public details::templates::true_type {};
288
+ template <> struct _is_floating_point<double> : public details::templates::true_type {};
289
+ template <> struct _is_floating_point<long double> : public details::templates::true_type {};
290
+ # ifdef __CUDA_FP16_TYPES_EXIST__
291
+ template <> struct _is_floating_point<__half> : public details::templates::true_type {};
292
+ template <> struct _is_floating_point<__half2> : public details::templates::true_type {};
293
+ # endif
294
+ //Vector type support?
295
+
296
+ template <typename Ty>
297
+ struct is_floating_point : public details::templates::_is_floating_point<typename details::templates::remove_cv<Ty>::type> {};
298
+
299
+ template <class T>
300
+ struct is_arithmetic : details::templates::integral_constant<
301
+ bool,
302
+ details::templates::is_integral<T>::value ||
303
+ details::templates::is_floating_point<T>::value> {};
304
+
305
+ template <typename Ty, bool = details::templates::is_arithmetic<Ty>::value>
306
+ struct _is_unsigned : details::templates::integral_constant<bool, Ty(0) < Ty(-1)> {};
307
+
308
+ template <typename Ty>
309
+ struct _is_unsigned<Ty,false> : details::templates::false_type {};
310
+
311
+ template <typename Ty>
312
+ struct is_unsigned : _is_unsigned<typename details::templates::remove_cv<Ty>::type> {};
313
+
314
+ template <typename Ty> struct _is_pointer : public details::templates::false_type {};
315
+ template <typename Ty> struct _is_pointer<Ty*> : public details::templates::true_type {};
316
+
317
+ template <typename Ty>
318
+ struct is_pointer : _is_pointer<typename details::templates::remove_cv<Ty>::type> {};
319
+
320
+ /**
321
+ * programmatic type traits
322
+ **/
323
+ template<bool B, class Ty = void>
324
+ struct enable_if {};
325
+
326
+ template<class Ty>
327
+ struct enable_if<true, Ty> { typedef Ty type; };
328
+
329
+ template<bool Cond, typename Ty = void>
330
+ using enable_if_t = typename details::templates::enable_if<Cond, Ty>::type;
331
+
332
+ template<class Ty1, class Ty2>
333
+ struct is_same : details::templates::false_type {};
334
+
335
+ template<class Ty>
336
+ struct is_same<Ty, Ty> : details::templates::true_type {};
337
+
338
+ } // templates
339
+ #endif // _CG_CPP11_FEATURES
340
+
341
+ } // details
342
+ _CG_END_NAMESPACE
343
+
344
+
345
+ #endif // _CG_INFO_H_
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/invoke.h ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CG_INVOKE_H
51
+ #define _CG_INVOKE_H
52
+
53
+ #include "info.h"
54
+ #include "helpers.h"
55
+
56
+ #if defined(_CG_CPP11_FEATURES)
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <typename Group>
63
+ struct _elect_group_supported : _CG_STL_NAMESPACE::false_type {};
64
+ #ifdef _CG_HAS_INSTR_ELECT
65
+ template<>
66
+ struct _elect_group_supported<coalesced_group> : _CG_STL_NAMESPACE::true_type {};
67
+ template<unsigned int Size, typename Parent>
68
+ struct _elect_group_supported<thread_block_tile<Size, Parent>> :
69
+ _CG_STL_NAMESPACE::integral_constant<bool, (Size <= 32)> {};
70
+ #endif
71
+
72
+ template <typename Group>
73
+ struct elect_group_supported : public _elect_group_supported<details::remove_qual<Group>> {};
74
+
75
+ template<typename Group>
76
+ _CG_STATIC_QUALIFIER bool elect_one(const Group& group, unsigned int mask, unsigned int& leader_lane) {
77
+ int is_leader = 0;
78
+ #ifdef _CG_HAS_INSTR_ELECT
79
+ asm("{\n\t"
80
+ " .reg .pred p;\n\t"
81
+ " elect.sync %0|p, %2;\n\t"
82
+ " @p mov.s32 %1, 1;\n\t"
83
+ "}"
84
+ : "+r"(leader_lane), "+r"(is_leader) : "r" (mask));
85
+ #endif
86
+ return is_leader;
87
+ }
88
+
89
+ template<bool UseElect>
90
+ struct invoke_one_impl {};
91
+
92
+ template<>
93
+ struct invoke_one_impl<true> {
94
+ template<typename Group, typename Fn, typename... Args>
95
+ _CG_STATIC_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) {
96
+ auto mask = details::_coalesced_group_data_access::get_mask(group);
97
+ unsigned int leader_lane = 0;
98
+
99
+ if (elect_one(group, mask, leader_lane)) {
100
+ _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
101
+ }
102
+ }
103
+
104
+ template<typename Group, typename Fn, typename... Args>
105
+ _CG_STATIC_QUALIFIER auto invoke_one_broadcast(const Group& group, Fn&& fn, Args&&... args)
106
+ -> typename _CG_STL_NAMESPACE::remove_reference<
107
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
108
+
109
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
110
+ details::remove_qual<ResultType> result;
111
+ auto mask = details::_coalesced_group_data_access::get_mask(group);
112
+ unsigned int leader_lane = 0;
113
+
114
+ if (elect_one(group, mask, leader_lane)) {
115
+ result = _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
116
+ }
117
+
118
+ // Need to use low level api instead of group.shfl, because elect_one returns lane id, not group rank.
119
+ return tile::shuffle_dispatch<ResultType>::shfl(result, mask, leader_lane, 32);
120
+ }
121
+ };
122
+
123
+ template<>
124
+ struct invoke_one_impl<false> {
125
+ template<typename Group, typename Fn, typename... Args>
126
+ _CG_STATIC_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) {
127
+ if (group.thread_rank() == 0) {
128
+ _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
129
+ }
130
+ }
131
+
132
+ template<typename Group, typename Fn, typename... Args>
133
+ _CG_STATIC_QUALIFIER auto invoke_one_broadcast(const Group& group, Fn&& fn, Args&&... args)
134
+ -> typename _CG_STL_NAMESPACE::remove_reference<
135
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
136
+
137
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
138
+ details::remove_qual<ResultType> result;
139
+
140
+ if (group.thread_rank() == 0) {
141
+ result = _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
142
+ }
143
+
144
+ return group.shfl(result, 0);
145
+ }
146
+ };
147
+
148
+
149
+ }; // namespace details
150
+
151
+ template<typename Group, typename Fn, typename... Args>
152
+ _CG_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) {
153
+ using impl = details::invoke_one_impl<details::elect_group_supported<Group>::value>;
154
+ impl::invoke_one(group, _CG_STL_NAMESPACE::forward<Fn>(fn), _CG_STL_NAMESPACE::forward<Args>(args)...);
155
+ }
156
+
157
+ template<typename Fn, typename... Args>
158
+ _CG_QUALIFIER auto invoke_one_broadcast(const coalesced_group& group, Fn&& fn, Args&&... args)
159
+ -> typename _CG_STL_NAMESPACE::remove_reference<
160
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
161
+
162
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
163
+ static_assert(!_CG_STL_NAMESPACE::is_same<ResultType, void>::value,
164
+ "For invocables returning void invoke_one should be used instead");
165
+ using impl = details::invoke_one_impl<details::elect_group_supported<coalesced_group>::value>;
166
+ return impl::invoke_one_broadcast(group,
167
+ _CG_STL_NAMESPACE::forward<Fn>(fn),
168
+ _CG_STL_NAMESPACE::forward<Args>(args)...);
169
+ }
170
+
171
+ template<unsigned int Size, typename Parent, typename Fn, typename... Args>
172
+ _CG_QUALIFIER auto invoke_one_broadcast(const thread_block_tile<Size, Parent>& group, Fn&& fn, Args&&... args)
173
+ -> typename _CG_STL_NAMESPACE::remove_reference<
174
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
175
+
176
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
177
+ static_assert(!_CG_STL_NAMESPACE::is_same<ResultType, void>::value,
178
+ "For invocables returning void invoke_one should be used instead");
179
+ using impl = details::invoke_one_impl<details::elect_group_supported<thread_block_tile<Size, Parent>>::value>;
180
+ return impl::invoke_one_broadcast(group,
181
+ _CG_STL_NAMESPACE::forward<Fn>(fn),
182
+ _CG_STL_NAMESPACE::forward<Args>(args)...);
183
+ }
184
+
185
+ _CG_END_NAMESPACE
186
+
187
+ #endif //_CG_CPP11_FEATURES
188
+
189
+ #endif // _CG_INVOKE_H
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/memory.h ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _COOPERATIVE_GROUPS_MEMORY_H_
50
+ # define _COOPERATIVE_GROUPS_MEMORY_H_
51
+
52
+ #include "info.h"
53
+
54
+ _CG_BEGIN_NAMESPACE
55
+
56
+ #if defined(_CG_CPP11_FEATURES)
57
+ namespace details {
58
+ _CG_STATIC_CONST_DECL int scratch_num_reserved_bytes = 12;
59
+
60
+ // Should only be called for SM80+
61
+ _CG_STATIC_QUALIFIER void* reserved_shared_ptr()
62
+ {
63
+ unsigned long long ptr = 0;
64
+ NV_IF_TARGET(NV_PROVIDES_SM_80,
65
+ (asm ("{\n\t"
66
+ " .reg .u32 start;\n\t"
67
+ " .reg .u64 extended;\n\t"
68
+ " mov.u32 start, %%reserved_smem_offset_1;\n\t"
69
+ " cvt.u64.u32 extended, start;\n\t"
70
+ " cvta.shared.u64 %0, extended;\n\t"
71
+ "}"
72
+ : "=l"(ptr));)
73
+ )
74
+ return reinterpret_cast<void*>(ptr);
75
+ }
76
+
77
+ struct multi_warp_scratch {
78
+ // One barrier per possible size of the group.
79
+ _CG_STATIC_CONST_DECL unsigned int memory_barriers_count = 5;
80
+ _CG_STATIC_CONST_DECL size_t sync_memory_size = memory_barriers_count * sizeof(barrier_t);
81
+
82
+ using communication_type = unsigned long long;
83
+ _CG_STATIC_CONST_DECL size_t communication_size = sizeof(communication_type);
84
+
85
+ // Layout of the scratch space:
86
+ barrier_t barriers[memory_barriers_count];
87
+ char reserved[scratch_num_reserved_bytes]; // Reserve 12 bytes for future use
88
+ communication_type communication_memory[default_max_block_size / 32];
89
+
90
+ _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int scratch_size_needed(unsigned int max_block_size) {
91
+ // One slot of collectives memory per warp.
92
+ return scratch_num_reserved_bytes + (unsigned int)sync_memory_size + max_block_size / 32 * (unsigned int)communication_size;
93
+ }
94
+
95
+ _CG_QUALIFIER void init_barriers(unsigned int thread_rank) {
96
+ if (thread_rank < memory_barriers_count) {
97
+ barriers[thread_rank] = 0;
98
+ }
99
+ }
100
+ };
101
+
102
+ #if defined(_CG_HAS_RESERVED_SHARED)
103
+ // CG can expect at least 288 bytes available in reserved shared
104
+ static_assert(sizeof(multi_warp_scratch) <= 288, "multi-warp scratch size is too large");
105
+ #endif
106
+
107
+ // Make sure the structure can fit into the user provided memory
108
+ static_assert(sizeof(multi_warp_scratch) <= multi_warp_scratch::scratch_size_needed(default_max_block_size),
109
+ "multi-warp scratch size is too large");
110
+
111
+
112
+ _CG_QUALIFIER multi_warp_scratch* get_scratch_ptr(void* user_scratch) {
113
+ void *ptr;
114
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80,
115
+ (ptr = reserved_shared_ptr();)
116
+ ,
117
+ (ptr = user_scratch;)
118
+ )
119
+ return static_cast<multi_warp_scratch*>(ptr);
120
+
121
+ }
122
+
123
+ }
124
+
125
+ template <unsigned int MaxBlockSize = details::default_max_block_size>
126
+ struct __align__(details::multi_warp_scratch::communication_size) block_tile_memory {
127
+ private:
128
+ #if !defined(_CG_HAS_RESERVED_SHARED)
129
+ char scratch[details::multi_warp_scratch::scratch_size_needed(MaxBlockSize)];
130
+ #endif
131
+ };
132
+ #endif
133
+
134
+ _CG_END_NAMESPACE
135
+
136
+ #endif /* !_COOPERATIVE_GROUPS_MEMORY_H_ */
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/partitioning.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CG_PARTITIONING_H
51
+ #define _CG_PARTITIONING_H
52
+
53
+ #include "info.h"
54
+ #include "helpers.h"
55
+
56
+ _CG_BEGIN_NAMESPACE
57
+
58
+ namespace details {
59
+
60
+ template <typename TyGroup>
61
+ _CG_STATIC_QUALIFIER coalesced_group _binary_partition(const TyGroup &tile, bool pred) {
62
+ const unsigned int fullMask = ~0u;
63
+
64
+ unsigned int thisMask = _coalesced_group_data_access::get_mask(tile);
65
+ unsigned int predMask = pred ? 0 : fullMask;
66
+ unsigned int setMask = __ballot_sync(thisMask, pred);
67
+
68
+ if (setMask == thisMask || setMask == 0) {
69
+ coalesced_group subTile = _coalesced_group_data_access::construct_from_mask<coalesced_group>(thisMask);
70
+ _coalesced_group_data_access::modify_meta_group(subTile, 0, 1);
71
+ return subTile;
72
+ }
73
+ else {
74
+ unsigned int subMask = thisMask & (setMask ^ predMask);
75
+ coalesced_group subTile = _coalesced_group_data_access::construct_from_mask<coalesced_group>(subMask);
76
+ _coalesced_group_data_access::modify_meta_group(subTile, pred, 2);
77
+ return subTile;
78
+ }
79
+ }
80
+
81
+ #if defined(_CG_HAS_MATCH_COLLECTIVE) && defined(_CG_CPP11_FEATURES)
82
+ template <typename TyPredicate>
83
+ struct _labeled_partition_dispatch {
84
+ template <typename TyGroup>
85
+ _CG_QUALIFIER coalesced_group operator()(const TyGroup &tile, TyPredicate pred) {
86
+ unsigned int thisMask = _coalesced_group_data_access::get_mask(tile);
87
+ unsigned int subMask = __match_any_sync(thisMask, pred);
88
+ unsigned int laneId = details::laneid();
89
+
90
+ coalesced_group subTile = _coalesced_group_data_access::construct_from_mask<coalesced_group>(subMask);
91
+
92
+ int leaderLaneId = __ffs(subMask) - 1;
93
+ bool isLeader = leaderLaneId == laneId;
94
+ unsigned int leaderMask = __ballot_sync(thisMask, isLeader);
95
+
96
+ // Count leaders with lower laneid, that will be the meta rank of this tile
97
+ unsigned int tileRank = __popc(leaderMask & ((1 << leaderLaneId) - 1));
98
+
99
+ _coalesced_group_data_access::modify_meta_group(subTile, tileRank, __popc(leaderMask));
100
+
101
+ return subTile;
102
+ }
103
+ };
104
+
105
+ template <>
106
+ struct _labeled_partition_dispatch<bool> {
107
+ template <typename TyGroup>
108
+ _CG_QUALIFIER coalesced_group operator()(const TyGroup &tile, bool pred) {
109
+ return _binary_partition(tile, pred);
110
+ }
111
+ };
112
+
113
+ template <typename TyPredicate>
114
+ struct _labeled_partition_dispatch<TyPredicate*> {
115
+ template <typename TyGroup>
116
+ _CG_QUALIFIER coalesced_group operator()(const TyGroup &tile, TyPredicate* pred) {
117
+ auto impl = _labeled_partition_dispatch<unsigned long long>();
118
+ return impl(tile, reinterpret_cast<unsigned long long>(pred));
119
+ }
120
+ };
121
+ #endif
122
+ }; // namespace details
123
+
124
+ _CG_STATIC_QUALIFIER coalesced_group binary_partition(const coalesced_group &tile, bool pred) {
125
+ return details::_binary_partition(tile, pred);
126
+ }
127
+
128
+ template <unsigned int Size, typename ParentT>
129
+ _CG_STATIC_QUALIFIER coalesced_group binary_partition(const thread_block_tile<Size, ParentT> &tile, bool pred) {
130
+ #ifdef _CG_CPP11_FEATURES
131
+ static_assert(Size <= 32, "Binary partition is available only for tiles of size smaller or equal to 32");
132
+ #endif
133
+ return details::_binary_partition(tile, pred);
134
+ }
135
+
136
+
137
+ #if defined(_CG_HAS_MATCH_COLLECTIVE) && defined(_CG_CPP11_FEATURES)
138
+ template <typename TyPredicate>
139
+ _CG_STATIC_QUALIFIER coalesced_group labeled_partition(const coalesced_group &tile, TyPredicate pred) {
140
+ static_assert(_CG_STL_NAMESPACE::is_integral<TyPredicate>::value ||
141
+ _CG_STL_NAMESPACE::is_pointer<TyPredicate>::value,
142
+ "labeled_partition predicate must be an integral or pointer type");
143
+ auto dispatch = details::_labeled_partition_dispatch<details::remove_qual<TyPredicate>>();
144
+ return dispatch(tile, pred);
145
+ }
146
+
147
+ template <typename TyPredicate, unsigned int Size, typename ParentT>
148
+ _CG_STATIC_QUALIFIER coalesced_group labeled_partition(const thread_block_tile<Size, ParentT> &tile, TyPredicate pred) {
149
+ static_assert(_CG_STL_NAMESPACE::is_integral<TyPredicate>::value ||
150
+ _CG_STL_NAMESPACE::is_pointer<TyPredicate>::value,
151
+ "labeled_partition predicate must be an integral or pointer type");
152
+ static_assert(Size <= 32, "Labeled partition is available only for tiles of size smaller or equal to 32");
153
+ auto dispatch = details::_labeled_partition_dispatch<details::remove_qual<TyPredicate>>();
154
+ return dispatch(tile, pred);
155
+ }
156
+ #endif
157
+
158
+ _CG_END_NAMESPACE
159
+
160
+ #endif // _CG_PARTITIONING_H
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/reduce.h ADDED
@@ -0,0 +1,424 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_REDUCE_H_
50
+ #define _CG_REDUCE_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "coalesced_reduce.h"
55
+ #include "functional.h"
56
+ #include "cooperative_groups.h"
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <class Ty>
63
+ using _redux_is_add_supported = _CG_STL_NAMESPACE::integral_constant<
64
+ bool,
65
+ _CG_STL_NAMESPACE::is_integral<Ty>::value && (sizeof(Ty) <= 4)>;
66
+
67
+ template <class Ty>
68
+ using redux_is_add_supported = _redux_is_add_supported<Ty>;
69
+
70
+ // A specialization for 64 bit logical operations is possible
71
+ // but for now only accelerate 32 bit bitwise ops
72
+ template <class Ty>
73
+ using redux_is_logical_supported = redux_is_add_supported<Ty>;
74
+
75
+ // Base operator support case
76
+ template <class TyOp, class Ty> struct _redux_op_supported : public _CG_STL_NAMESPACE::false_type {};
77
+ template <class Ty> struct _redux_op_supported<cooperative_groups::plus<Ty>, Ty> : public redux_is_add_supported<Ty> {};
78
+ template <class Ty> struct _redux_op_supported<cooperative_groups::less<Ty>, Ty> : public redux_is_add_supported<Ty> {};
79
+ template <class Ty> struct _redux_op_supported<cooperative_groups::greater<Ty>, Ty> : public redux_is_add_supported<Ty> {};
80
+ template <class Ty> struct _redux_op_supported<cooperative_groups::bit_and<Ty>, Ty> : public redux_is_logical_supported<Ty> {};
81
+ template <class Ty> struct _redux_op_supported<cooperative_groups::bit_or<Ty>, Ty> : public redux_is_logical_supported<Ty> {};
82
+ template <class Ty> struct _redux_op_supported<cooperative_groups::bit_xor<Ty>, Ty> : public redux_is_logical_supported<Ty> {};
83
+
84
+ template <class Ty, template <class> class TyOp>
85
+ using redux_op_supported = _redux_op_supported<
86
+ typename details::remove_qual<TyOp<Ty>>,
87
+ Ty>;
88
+
89
+ // Groups smaller than 16 actually have worse performance characteristics when used with redux
90
+ // tiles of size 16 and 32 perform the same or better and have better code generation profiles
91
+ template <class TyGroup> struct _redux_group_optimized : public _CG_STL_NAMESPACE::false_type {};
92
+
93
+ template <unsigned int Sz, typename TyPar>
94
+ struct _redux_group_optimized<cooperative_groups::thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::integral_constant<
95
+ bool,
96
+ (Sz >= 16)> {};
97
+ template <unsigned int Sz, typename TyPar>
98
+ struct _redux_group_optimized<internal_thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::integral_constant<
99
+ bool,
100
+ (Sz >= 16)> {};
101
+ template <>
102
+ struct _redux_group_optimized<cooperative_groups::coalesced_group> : public _CG_STL_NAMESPACE::true_type {};
103
+
104
+ template <typename TyGroup>
105
+ using redux_group_optimized = _redux_group_optimized<details::remove_qual<TyGroup>>;
106
+
107
+ template <template <class> class TyOp>
108
+ _CG_STATIC_QUALIFIER int pick_redux(int mask, int val);
109
+ template <template <class> class TyOp>
110
+ _CG_STATIC_QUALIFIER unsigned int pick_redux(int mask, unsigned int val);
111
+
112
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::plus>(int mask, int val) {
113
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, (return __reduce_add_sync(mask, val);), return 0;)
114
+ }
115
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::less>(int mask, int val) {
116
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, (return __reduce_min_sync(mask, val);), return 0;)
117
+ }
118
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::greater>(int mask, int val) {
119
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, (return __reduce_max_sync(mask, val);), return 0;)
120
+ }
121
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::bit_and>(int mask, int val) {
122
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, (return static_cast<int>(__reduce_and_sync(mask, val));), return 0;)
123
+ }
124
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::bit_xor>(int mask, int val) {
125
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, (return static_cast<int>(__reduce_xor_sync(mask, val));), return 0;)
126
+ }
127
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::bit_or>(int mask, int val) {
128
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, (return static_cast<int>(__reduce_or_sync(mask, val));), return 0;)
129
+ }
130
+
131
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::plus>(int mask, unsigned int val) {
132
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, (return __reduce_add_sync(mask, val);), return 0;)
133
+ }
134
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::less>(int mask, unsigned int val) {
135
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, (return __reduce_min_sync(mask, val);), return 0;)
136
+ }
137
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::greater>(int mask, unsigned int val) {
138
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, (return __reduce_max_sync(mask, val);), return 0;)
139
+ }
140
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::bit_and>(int mask, unsigned int val) {
141
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, (return __reduce_and_sync(mask, val);), return 0;)
142
+ }
143
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::bit_xor>(int mask, unsigned int val) {
144
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, (return __reduce_xor_sync(mask, val);), return 0;)
145
+ }
146
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::bit_or>(int mask, unsigned int val) {
147
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80, (return __reduce_or_sync(mask, val);), return 0;)
148
+ }
149
+
150
+ template <typename TyVal, bool = _CG_STL_NAMESPACE::is_unsigned<TyVal>::value>
151
+ struct _accelerated_op;
152
+
153
+ // Signed type redux intrinsic dispatch
154
+ template <typename TyVal>
155
+ struct _accelerated_op<TyVal, false> {
156
+ template <template <class> class TyOp>
157
+ _CG_STATIC_QUALIFIER TyVal redux(int mask, TyVal val) {
158
+ return static_cast<TyVal>(pick_redux<TyOp>(mask, static_cast<int>(val)));
159
+ }
160
+ };
161
+
162
+ // Unsigned type redux intrinsic dispatch
163
+ template <typename TyVal>
164
+ struct _accelerated_op<TyVal, true> {
165
+ template <template <class> class TyOp>
166
+ _CG_STATIC_QUALIFIER TyVal redux(int mask, TyVal val) {
167
+ return static_cast<TyVal>(pick_redux<TyOp>(mask, static_cast<unsigned int>(val)));
168
+ }
169
+ };
170
+
171
+ template <typename TyVal>
172
+ using accelerated_op = _accelerated_op<TyVal>;
173
+
174
+
175
+ template <typename TyVal, typename TyFnInput, typename TyGroup>
176
+ class _redux_dispatch {
177
+ template <class Ty, template <class> class TyOp>
178
+ using _redux_is_usable = _CG_STL_NAMESPACE::integral_constant<bool,
179
+ redux_op_supported<Ty, TyOp>::value &&
180
+ redux_group_optimized<TyGroup>::value>;
181
+
182
+ template <class Ty, template <class> class TyOp>
183
+ using redux_is_usable = typename _CG_STL_NAMESPACE::enable_if<_redux_is_usable<Ty, TyOp>::value, void>::type*;
184
+
185
+ template <class Ty, template <class> class TyOp>
186
+ using redux_is_not_usable = typename _CG_STL_NAMESPACE::enable_if<!_redux_is_usable<Ty, TyOp>::value, void>::type*;
187
+
188
+ public:
189
+ // Dispatch to redux if the combination of op and args are supported
190
+ template<
191
+ template <class> class TyOp,
192
+ redux_is_usable<TyFnInput, TyOp> = nullptr>
193
+ _CG_STATIC_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp<TyFnInput>&& op) -> decltype(op(val, val)) {
194
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80,
195
+ // Retrieve the mask for the group and dispatch to redux
196
+ return accelerated_op<TyFnInput>::template redux<TyOp>(_coalesced_group_data_access::get_mask(group), _CG_STL_NAMESPACE::forward<TyVal>(val));
197
+ ,
198
+ // Arch does not support redux, fallback to shuffles
199
+ return coalesced_reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp<TyFnInput>>(op));
200
+ )
201
+ }
202
+
203
+ template<
204
+ template <class> class TyOp,
205
+ redux_is_usable<TyFnInput, TyOp> = nullptr>
206
+ _CG_STATIC_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp<TyFnInput>& op) -> decltype(op(val, val)) {
207
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_80,
208
+ // Retrieve the mask for the group and dispatch to redux
209
+ return accelerated_op<TyFnInput>::template redux<TyOp>(_coalesced_group_data_access::get_mask(group), _CG_STL_NAMESPACE::forward<TyVal>(val));
210
+ ,
211
+ // Arch does not support redux, fallback to shuffles
212
+ return coalesced_reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp<TyFnInput>>(op));
213
+ )
214
+ }
215
+
216
+ // Fallback shuffle sync reduction
217
+ template <
218
+ template <class> class TyOp,
219
+ redux_is_not_usable<TyFnInput, TyOp> = nullptr>
220
+ _CG_STATIC_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp<TyFnInput>&& op) -> decltype(op(val, val)) {
221
+ //Dispatch to fallback shuffle sync accelerated reduction
222
+ return coalesced_reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp<TyFnInput>>(op));
223
+ }
224
+
225
+ };
226
+
227
+ // Group support for reduce.
228
+ template <class TyGroup> struct _reduce_group_supported : public _CG_STL_NAMESPACE::false_type {};
229
+
230
+ template <unsigned int Sz, typename TyPar>
231
+ struct _reduce_group_supported<cooperative_groups::thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::true_type {};
232
+ template <unsigned int Sz, typename TyPar>
233
+ struct _reduce_group_supported<internal_thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::true_type {};
234
+ template <>
235
+ struct _reduce_group_supported<cooperative_groups::coalesced_group> : public _CG_STL_NAMESPACE::true_type {};
236
+
237
+ template <typename TyGroup>
238
+ using reduce_group_supported = _reduce_group_supported<details::remove_qual<TyGroup>>;
239
+
240
+ template <typename TyVal, typename TyFnInput, template <class> class TyOp, typename TyGroup>
241
+ _CG_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp<TyFnInput>&& op) -> decltype(op(val, val)) {
242
+ static_assert(details::is_op_type_same<TyFnInput, TyVal>::value, "Operator and argument types differ");
243
+
244
+ using dispatch = details::_redux_dispatch<TyVal, TyFnInput, TyGroup>;
245
+ return dispatch::reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp<TyFnInput>>(op));
246
+ }
247
+
248
+ template <typename TyVal, typename TyFnInput, template <class> class TyOp, typename TyGroup>
249
+ _CG_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp<TyFnInput>& op) -> decltype(op(val, val)) {
250
+ static_assert(details::is_op_type_same<TyFnInput, TyVal>::value, "Operator and argument types differ");
251
+
252
+ using dispatch = details::_redux_dispatch<TyVal, TyFnInput, TyGroup>;
253
+ return dispatch::reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp<TyFnInput>>(op));
254
+ }
255
+
256
+
257
+ template <typename TyVal, typename TyOp, typename TyGroup>
258
+ _CG_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
259
+ return details::coalesced_reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
260
+ }
261
+
262
+ template <unsigned int GroupId>
263
+ struct tile_reduce_dispatch;
264
+
265
+ template <>
266
+ struct tile_reduce_dispatch<details::coalesced_group_id> {
267
+ template <typename TyGroup, typename TyVal, typename TyFn>
268
+ _CG_STATIC_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
269
+ return details::reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
270
+ }
271
+ };
272
+
273
+ #if defined(_CG_CPP11_FEATURES)
274
+ template <>
275
+ struct tile_reduce_dispatch<details::multi_tile_group_id> {
276
+ template <unsigned int Size, typename ParentT, typename TyVal, typename TyFn>
277
+ _CG_STATIC_QUALIFIER auto reduce(const thread_block_tile<Size, ParentT>& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
278
+ using warpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base<Size>>;
279
+ using TyRet = details::remove_qual<TyVal>;
280
+ const unsigned int num_warps = Size / 32;
281
+
282
+ auto warp_lambda = [&] (const warpType& warp, TyRet* warp_scratch_location) {
283
+ *warp_scratch_location =
284
+ details::reduce(warp, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
285
+ };
286
+ auto inter_warp_lambda =
287
+ [&] (const details::internal_thread_block_tile<num_warps, warpType>& subwarp, TyRet* thread_scratch_location) {
288
+ *thread_scratch_location =
289
+ details::reduce(subwarp, *thread_scratch_location, _CG_STL_NAMESPACE::forward<TyFn>(op));
290
+ };
291
+ return details::multi_warp_collectives_helper<TyRet>(group, warp_lambda, inter_warp_lambda);
292
+ }
293
+ };
294
+
295
+ template <unsigned int GroupId>
296
+ struct tile_async_reduce_dispatch;
297
+
298
+ template <>
299
+ struct tile_async_reduce_dispatch<details::coalesced_group_id> {
300
+ template <typename GroupT, typename TyDst, typename TyVal, typename TyFn, typename TyResHandler>
301
+ _CG_STATIC_QUALIFIER void reduce(const GroupT& group, TyDst& dst, TyVal&& val, TyFn&& op, TyResHandler& res_handler) {
302
+ // Do regular, in group reduction
303
+ auto result = details::reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
304
+
305
+ // One thread stores/updates the destination
306
+ if (group.thread_rank() == 0) {
307
+ res_handler(result);
308
+ }
309
+ }
310
+ };
311
+
312
+ template <>
313
+ struct tile_async_reduce_dispatch<details::multi_tile_group_id> {
314
+ template <unsigned int TySize, typename ParentT, typename TyDst, typename TyInputVal, typename TyFn, typename TyResHandler>
315
+ _CG_STATIC_QUALIFIER void reduce(const thread_block_tile<TySize, ParentT>& group, TyDst& dst, TyInputVal&& val, TyFn&& op, TyResHandler& res_handler) {
316
+ using TyVal = remove_qual<TyInputVal>;
317
+ const unsigned int num_warps = TySize / 32;
318
+ details::barrier_t* sync_location = multi_warp_sync_location_getter(group);
319
+ auto warp_scratch_location = multi_warp_scratch_location_getter<TyVal>(group, group.thread_rank() / 32);
320
+
321
+ // Do in warp reduce
322
+ auto warp = details::tiled_partition_internal<32, thread_block_tile<TySize, ParentT>>();
323
+ *warp_scratch_location = details::reduce(warp, _CG_STL_NAMESPACE::forward<TyInputVal>(val), op);
324
+
325
+ // Tile of size num_warps from the last warp to arrive does final reduction step
326
+ if (details::sync_warps_last_releases(sync_location, details::cta::thread_rank(), num_warps)) {
327
+ auto subwarp = details::tiled_partition_internal<num_warps, decltype(warp)>();
328
+ if (subwarp.meta_group_rank() == 0) {
329
+ auto thread_scratch_location = multi_warp_scratch_location_getter<TyVal>(group, subwarp.thread_rank());
330
+ auto thread_val = *thread_scratch_location;
331
+ // Release other warps, we read their contribution already.
332
+ subwarp.sync();
333
+ details::sync_warps_release(sync_location, subwarp.thread_rank() == 0, details::cta::thread_rank(), num_warps);
334
+ TyVal result = details::reduce(subwarp, thread_val, op);
335
+ // One thread stores the result or updates the atomic
336
+ if (subwarp.thread_rank() == 0) {
337
+ res_handler(result);
338
+ }
339
+ }
340
+ warp.sync();
341
+ }
342
+ }
343
+ };
344
+ #endif
345
+
346
+ template <typename TyGroup, typename TyInputVal, typename TyRetVal>
347
+ _CG_QUALIFIER void check_reduce_params() {
348
+ static_assert(details::is_op_type_same<TyInputVal, TyRetVal>::value, "Operator input and output types differ");
349
+ static_assert(details::reduce_group_supported<TyGroup>::value, "This group does not exclusively represent a tile");
350
+ };
351
+
352
+ template <typename TyGroup, typename TyDstVal, typename TyInputVal, typename TyRetVal>
353
+ _CG_QUALIFIER void check_async_reduce_params() {
354
+ check_reduce_params<TyGroup, TyInputVal, TyRetVal>();
355
+ static_assert(details::is_op_type_same<TyDstVal, TyInputVal>::value, "Destination and input types differ");
356
+ }
357
+ } // details
358
+
359
+ template <typename TyGroup, typename TyVal, typename TyFn>
360
+ _CG_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
361
+ details::check_reduce_params<TyGroup, details::remove_qual<TyVal>, decltype(op(val, val))>();
362
+
363
+ using dispatch = details::tile_reduce_dispatch<TyGroup::_group_id>;
364
+ return dispatch::reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
365
+ }
366
+
367
+ #if defined(_CG_CPP11_FEATURES)
368
+
369
+ # if defined(_CG_HAS_STL_ATOMICS)
370
+ template<typename TyGroup, typename TyVal, cuda::thread_scope Sco, typename TyInputVal, typename TyFn>
371
+ void _CG_QUALIFIER reduce_update_async(const TyGroup& group, cuda::atomic<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) {
372
+ details::check_async_reduce_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
373
+ auto update_lambda = [&] (TyVal& result) {
374
+ details::atomic_update(dst, result, op);
375
+ };
376
+ using dispatch = details::tile_async_reduce_dispatch<TyGroup::_group_id>;
377
+ dispatch::reduce(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op), update_lambda);
378
+ }
379
+
380
+ template<typename TyGroup, typename TyVal, cuda::thread_scope Sco, typename TyInputVal, typename TyFn>
381
+ void _CG_QUALIFIER reduce_update_async(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) {
382
+ details::check_async_reduce_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
383
+ auto update_lambda = [&] (TyVal& result) {
384
+ details::atomic_update(dst, result, op);
385
+ };
386
+ using dispatch = details::tile_async_reduce_dispatch<TyGroup::_group_id>;
387
+ dispatch::reduce(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op), update_lambda);
388
+ }
389
+
390
+ template<typename TyGroup, typename TyVal, cuda::thread_scope Sco, typename TyInputVal, typename TyFn>
391
+ void _CG_QUALIFIER reduce_store_async(const TyGroup& group, cuda::atomic<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) {
392
+ details::check_async_reduce_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
393
+ auto store_lambda = [&] (TyVal& result) {
394
+ details::atomic_store(dst, result);
395
+ };
396
+ using dispatch = details::tile_async_reduce_dispatch<TyGroup::_group_id>;
397
+ dispatch::reduce(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op), store_lambda);
398
+ }
399
+
400
+ template<typename TyGroup, typename TyVal, cuda::thread_scope Sco, typename TyInputVal, typename TyFn>
401
+ void _CG_QUALIFIER reduce_store_async(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) {
402
+ details::check_async_reduce_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
403
+ auto store_lambda = [&] (TyVal& result) {
404
+ details::atomic_store(dst, result);
405
+ };
406
+ using dispatch = details::tile_async_reduce_dispatch<TyGroup::_group_id>;
407
+ dispatch::reduce(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op), store_lambda);
408
+ }
409
+ # endif
410
+
411
+ template<typename TyGroup, typename TyVal, typename TyInputVal, typename TyFn>
412
+ void _CG_QUALIFIER reduce_store_async(const TyGroup& group, TyVal* dst, TyInputVal&& val, TyFn&& op) {
413
+ details::check_async_reduce_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
414
+ auto store_lambda = [&] (TyVal& result) {
415
+ *dst = result;
416
+ };
417
+ using dispatch = details::tile_async_reduce_dispatch<TyGroup::_group_id>;
418
+ dispatch::reduce(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op), store_lambda);
419
+ }
420
+ #endif
421
+
422
+ _CG_END_NAMESPACE
423
+
424
+ #endif // _CG_REDUCE_H_
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/scan.h ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_SCAN_H_
50
+ #define _CG_SCAN_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "functional.h"
55
+ #include "coalesced_scan.h"
56
+
57
+ _CG_BEGIN_NAMESPACE
58
+
59
+ namespace details {
60
+
61
+ // Group support for scan.
62
+ template <class TyGroup> struct _scan_group_supported : public _CG_STL_NAMESPACE::false_type {};
63
+
64
+ template <unsigned int Sz, typename TyPar>
65
+ struct _scan_group_supported<cooperative_groups::thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::true_type {};
66
+ template <unsigned int Sz, typename TyPar>
67
+ struct _scan_group_supported<internal_thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::true_type {};
68
+ template <>
69
+ struct _scan_group_supported<cooperative_groups::coalesced_group> : public _CG_STL_NAMESPACE::true_type {};
70
+
71
+ template <typename TyGroup>
72
+ using scan_group_supported = _scan_group_supported<details::remove_qual<TyGroup>>;
73
+
74
+ template <bool IsIntegralPlus>
75
+ struct integral_optimized_scan;
76
+
77
+ enum class ScanType { exclusive, inclusive };
78
+
79
+ template <unsigned int GroupId, ScanType TyScan>
80
+ struct scan_dispatch;
81
+
82
+ template <ScanType TyScan>
83
+ struct scan_dispatch<details::coalesced_group_id, TyScan> {
84
+ template <typename TyGroup, typename TyVal, typename TyFn>
85
+ _CG_STATIC_QUALIFIER auto scan(const TyGroup& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
86
+ auto scan_result = coalesced_inclusive_scan(group, val, op);
87
+ if (TyScan == ScanType::exclusive) {
88
+ scan_result = convert_inclusive_to_exclusive(group,
89
+ scan_result,
90
+ _CG_STL_NAMESPACE::forward<TyVal>(val),
91
+ _CG_STL_NAMESPACE::forward<TyFn>(op));
92
+ }
93
+ return scan_result;
94
+ }
95
+ };
96
+
97
+ #if defined(_CG_CPP11_FEATURES)
98
+ template <ScanType TyScan>
99
+ struct scan_dispatch<details::multi_tile_group_id, TyScan> {
100
+ template <unsigned int Size, typename ParentT, typename TyVal, typename TyFn>
101
+ _CG_STATIC_QUALIFIER auto scan(const thread_block_tile<Size, ParentT>& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
102
+ using warpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base<Size>>;
103
+ using TyRet = details::remove_qual<TyVal>;
104
+ const unsigned int num_warps = Size / 32;
105
+ // In warp scan result, calculated in warp_lambda
106
+ TyRet warp_scan;
107
+
108
+ // In warp scan, put sum in the warp_scratch_location
109
+ auto warp_lambda = [&] (const warpType& warp, TyRet* warp_scratch_location) {
110
+ warp_scan =
111
+ details::coalesced_inclusive_scan(warp, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
112
+ if (warp.thread_rank() + 1 == warp.size()) {
113
+ *warp_scratch_location = warp_scan;
114
+ }
115
+ if (TyScan == ScanType::exclusive) {
116
+ warp_scan = warp.shfl_up(warp_scan, 1);
117
+ }
118
+ };
119
+
120
+ // Tile of size num_warps performing the final scan part (exclusive scan of warp sums), other threads will add it
121
+ // to its in-warp scan result
122
+ auto inter_warp_lambda =
123
+ [&] (const details::internal_thread_block_tile<num_warps, warpType>& subwarp, TyRet* thread_scratch_location) {
124
+ auto thread_val = *thread_scratch_location;
125
+ auto result = coalesced_inclusive_scan(subwarp, thread_val, op);
126
+ *thread_scratch_location = convert_inclusive_to_exclusive(subwarp, result, thread_val, op);
127
+ };
128
+
129
+ TyRet previous_warps_sum = details::multi_warp_collectives_helper<TyRet>(group, warp_lambda, inter_warp_lambda);
130
+ if (TyScan == ScanType::exclusive && warpType::thread_rank() == 0) {
131
+ return previous_warps_sum;
132
+ }
133
+ if (warpType::meta_group_rank() == 0) {
134
+ return warp_scan;
135
+ }
136
+ else {
137
+ return op(warp_scan, previous_warps_sum);
138
+ }
139
+ }
140
+ };
141
+
142
+ #if defined(_CG_HAS_STL_ATOMICS)
143
+ template <unsigned int GroupId, ScanType TyScan>
144
+ struct scan_update_dispatch;
145
+
146
+ template <ScanType TyScan>
147
+ struct scan_update_dispatch<details::coalesced_group_id, TyScan> {
148
+ template <typename TyGroup, typename TyAtomic, typename TyVal, typename TyFn>
149
+ _CG_STATIC_QUALIFIER auto scan(const TyGroup& group, TyAtomic& dst, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
150
+ details::remove_qual<TyVal> old;
151
+
152
+ // Do regular in group scan
153
+ auto scan_result = details::coalesced_inclusive_scan(group, val, op);
154
+
155
+ // Last thread updates the atomic and distributes its old value to other threads
156
+ if (group.thread_rank() == group.size() - 1) {
157
+ old = atomic_update(dst, scan_result, _CG_STL_NAMESPACE::forward<TyFn>(op));
158
+ }
159
+ old = group.shfl(old, group.size() - 1);
160
+ if (TyScan == ScanType::exclusive) {
161
+ scan_result = convert_inclusive_to_exclusive(group, scan_result, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
162
+ }
163
+ scan_result = op(old, scan_result);
164
+ return scan_result;
165
+ }
166
+ };
167
+
168
+ template <ScanType TyScan>
169
+ struct scan_update_dispatch<details::multi_tile_group_id, TyScan> {
170
+ template <unsigned int Size, typename ParentT, typename TyAtomic, typename TyVal, typename TyFn>
171
+ _CG_STATIC_QUALIFIER auto scan(const thread_block_tile<Size, ParentT>& group, TyAtomic& dst, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
172
+ using warpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base<Size>>;
173
+ using TyRet = details::remove_qual<TyVal>;
174
+ const unsigned int num_warps = Size / 32;
175
+ // In warp scan result, calculated in warp_lambda
176
+ TyRet warp_scan;
177
+
178
+ // In warp scan, put sum in the warp_scratch_location
179
+ auto warp_lambda = [&] (const warpType& warp, TyRet* warp_scratch_location) {
180
+ warp_scan =
181
+ details::coalesced_inclusive_scan(warp, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
182
+ if (warp.thread_rank() + 1 == warp.size()) {
183
+ *warp_scratch_location = warp_scan;
184
+ }
185
+ if (TyScan == ScanType::exclusive) {
186
+ warp_scan = warp.shfl_up(warp_scan, 1);
187
+ }
188
+ };
189
+
190
+ // Tile of size num_warps performing the final scan part (exclusive scan of warp sums), other threads will add it
191
+ // to its in-warp scan result
192
+ auto inter_warp_lambda =
193
+ [&] (const details::internal_thread_block_tile<num_warps, warpType>& subwarp, TyRet* thread_scratch_location) {
194
+ auto thread_val = *thread_scratch_location;
195
+ auto scan_result = details::coalesced_inclusive_scan(subwarp, thread_val, op);
196
+ TyRet offset;
197
+ // Single thread does the atomic update with sum of all contributions and reads the old value.
198
+ if (subwarp.thread_rank() == subwarp.size() - 1) {
199
+ offset = details::atomic_update(dst, scan_result, op);
200
+ }
201
+ offset = subwarp.shfl(offset, subwarp.size() - 1);
202
+ scan_result = convert_inclusive_to_exclusive(subwarp, scan_result, thread_val, op);
203
+ // Add offset read from the atomic to the scanned warp sum.
204
+ // Skipping first thread, since it got defautly constructed value from the conversion,
205
+ // it should just return the offset received from the thread that did the atomic update.
206
+ if (subwarp.thread_rank() != 0) {
207
+ offset = op(scan_result, offset);
208
+ }
209
+ *thread_scratch_location = offset;
210
+ };
211
+
212
+ TyRet previous_warps_sum = details::multi_warp_collectives_helper<TyRet>(group, warp_lambda, inter_warp_lambda);
213
+ if (TyScan == ScanType::exclusive && warpType::thread_rank() == 0) {
214
+ return previous_warps_sum;
215
+ }
216
+ return op(warp_scan, previous_warps_sum);
217
+ }
218
+ };
219
+ #endif
220
+ #endif
221
+
222
+ template <typename TyGroup, typename TyInputVal, typename TyRetVal>
223
+ _CG_QUALIFIER void check_scan_params() {
224
+ static_assert(details::is_op_type_same<TyInputVal, TyRetVal>::value, "Operator input and output types differ");
225
+ static_assert(details::scan_group_supported<TyGroup>::value, "This group does not exclusively represent a tile");
226
+ }
227
+
228
+ #if defined(_CG_HAS_STL_ATOMICS)
229
+ template <typename TyGroup, typename TyDstVal, typename TyInputVal, typename TyRetVal>
230
+ _CG_QUALIFIER void check_scan_update_params() {
231
+ check_scan_params<TyGroup, TyInputVal, TyRetVal>();
232
+ static_assert(details::is_op_type_same<TyDstVal, TyInputVal>::value, "Destination and input types differ");
233
+ }
234
+ #endif
235
+
236
+ } // details
237
+
238
+ template <typename TyGroup, typename TyVal, typename TyFn>
239
+ _CG_QUALIFIER auto inclusive_scan(const TyGroup& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
240
+ details::check_scan_params<TyGroup, TyVal, decltype(op(val, val))>();
241
+
242
+ using dispatch = details::scan_dispatch<TyGroup::_group_id, details::ScanType::inclusive>;
243
+ return dispatch::scan(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
244
+ }
245
+
246
+ template <typename TyGroup, typename TyVal>
247
+ _CG_QUALIFIER details::remove_qual<TyVal> inclusive_scan(const TyGroup& group, TyVal&& val) {
248
+ return inclusive_scan(group, _CG_STL_NAMESPACE::forward<TyVal>(val), cooperative_groups::plus<details::remove_qual<TyVal>>());
249
+ }
250
+
251
+ template <typename TyGroup, typename TyVal, typename TyFn>
252
+ _CG_QUALIFIER auto exclusive_scan(const TyGroup& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
253
+ details::check_scan_params<TyGroup, TyVal, decltype(op(val, val))>();
254
+
255
+ using dispatch = details::scan_dispatch<TyGroup::_group_id, details::ScanType::exclusive>;
256
+ return dispatch::scan(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
257
+ }
258
+
259
+ template <typename TyGroup, typename TyVal>
260
+ _CG_QUALIFIER details::remove_qual<TyVal> exclusive_scan(const TyGroup& group, TyVal&& val) {
261
+ return exclusive_scan(group, _CG_STL_NAMESPACE::forward<TyVal>(val), cooperative_groups::plus<details::remove_qual<TyVal>>());
262
+ }
263
+
264
+ #if defined(_CG_HAS_STL_ATOMICS)
265
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco, typename TyFn>
266
+ _CG_QUALIFIER auto inclusive_scan_update(const TyGroup& group, cuda::atomic<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) -> decltype(op(val, val)) {
267
+ details::check_scan_update_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
268
+
269
+ using dispatch = details::scan_update_dispatch<TyGroup::_group_id, details::ScanType::inclusive>;
270
+ return dispatch::scan(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
271
+ }
272
+
273
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco>
274
+ _CG_QUALIFIER TyVal inclusive_scan_update(const TyGroup& group, cuda::atomic<TyVal, Sco> & dst, TyInputVal&& val) {
275
+ return inclusive_scan_update(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), cooperative_groups::plus<TyVal>());
276
+ }
277
+
278
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco, typename TyFn>
279
+ _CG_QUALIFIER auto exclusive_scan_update(const TyGroup& group, cuda::atomic<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) -> decltype(op(val, val)) {
280
+ details::check_scan_update_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
281
+
282
+ using dispatch = details::scan_update_dispatch<TyGroup::_group_id, details::ScanType::exclusive>;
283
+ return dispatch::scan(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
284
+ }
285
+
286
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco>
287
+ _CG_QUALIFIER TyVal exclusive_scan_update(const TyGroup& group, cuda::atomic<TyVal, Sco>& dst, TyInputVal&& val) {
288
+ return exclusive_scan_update(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), cooperative_groups::plus<TyVal>());
289
+ }
290
+
291
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco, typename TyFn>
292
+ _CG_QUALIFIER auto inclusive_scan_update(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) -> decltype(op(val, val)) {
293
+ details::check_scan_update_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
294
+
295
+ using dispatch = details::scan_update_dispatch<TyGroup::_group_id, details::ScanType::inclusive>;
296
+ return dispatch::scan(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
297
+ }
298
+
299
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco>
300
+ _CG_QUALIFIER TyVal inclusive_scan_update(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco> & dst, TyInputVal&& val) {
301
+ return inclusive_scan_update(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), cooperative_groups::plus<TyVal>());
302
+ }
303
+
304
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco, typename TyFn>
305
+ _CG_QUALIFIER auto exclusive_scan_update(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) -> decltype(op(val, val)) {
306
+ details::check_scan_update_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
307
+
308
+ using dispatch = details::scan_update_dispatch<TyGroup::_group_id, details::ScanType::exclusive>;
309
+ return dispatch::scan(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
310
+ }
311
+
312
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco>
313
+ _CG_QUALIFIER TyVal exclusive_scan_update(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco>& dst, TyInputVal&& val) {
314
+ return exclusive_scan_update(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), cooperative_groups::plus<TyVal>());
315
+ }
316
+ #endif
317
+
318
+ _CG_END_NAMESPACE
319
+
320
+ #endif // _CG_SCAN_H_
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/sync.h ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_GRID_H
50
+ #define _CG_GRID_H
51
+
52
+ #include "info.h"
53
+
54
+ _CG_BEGIN_NAMESPACE
55
+
56
+ namespace details
57
+ {
58
+ typedef unsigned int barrier_t;
59
+
60
+ _CG_STATIC_QUALIFIER bool bar_has_flipped(unsigned int old_arrive, unsigned int current_arrive) {
61
+ return (((old_arrive ^ current_arrive) & 0x80000000) != 0);
62
+ }
63
+
64
+ _CG_STATIC_QUALIFIER bool is_cta_master() {
65
+ return (threadIdx.x + threadIdx.y + threadIdx.z == 0);
66
+ }
67
+
68
+ _CG_STATIC_QUALIFIER unsigned int sync_grids_arrive(volatile barrier_t *arrived) {
69
+ unsigned int oldArrive = 0;
70
+
71
+ __barrier_sync(0);
72
+
73
+ if (is_cta_master()) {
74
+ unsigned int expected = gridDim.x * gridDim.y * gridDim.z;
75
+ bool gpu_master = (blockIdx.x + blockIdx.y + blockIdx.z == 0);
76
+ unsigned int nb = 1;
77
+
78
+ if (gpu_master) {
79
+ nb = 0x80000000 - (expected - 1);
80
+ }
81
+
82
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_70,
83
+ // Barrier update with release; polling with acquire
84
+ asm volatile("atom.add.release.gpu.u32 %0,[%1],%2;" : "=r"(oldArrive) : _CG_ASM_PTR_CONSTRAINT((unsigned int*)arrived), "r"(nb) : "memory");
85
+ ,
86
+ // Fence; barrier update; volatile polling; fence
87
+ __threadfence();
88
+ oldArrive = atomicAdd((unsigned int*)arrived, nb);
89
+ );
90
+ }
91
+
92
+ return oldArrive;
93
+ }
94
+
95
+
96
+ _CG_STATIC_QUALIFIER void sync_grids_wait(unsigned int oldArrive, volatile barrier_t *arrived) {
97
+ if (is_cta_master()) {
98
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_70,
99
+ unsigned int current_arrive;
100
+ do {
101
+ asm volatile("ld.acquire.gpu.u32 %0,[%1];" : "=r"(current_arrive) : _CG_ASM_PTR_CONSTRAINT((unsigned int *)arrived) : "memory");
102
+ } while (!bar_has_flipped(oldArrive, current_arrive));
103
+ ,
104
+ while (!bar_has_flipped(oldArrive, *arrived));
105
+ __threadfence();
106
+ );
107
+ }
108
+
109
+ __barrier_sync(0);
110
+ }
111
+
112
+ /* - Multi warp groups synchronization routines - */
113
+
114
+ #ifdef _CG_CPP11_FEATURES
115
+ // Need both acquire and release for the last warp, since it won't be able to acquire with red.and
116
+ _CG_STATIC_QUALIFIER unsigned int atom_or_acq_rel_cta(unsigned int *addr, unsigned int val) {
117
+ unsigned int old;
118
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_70,
119
+ (asm volatile("atom.or.acq_rel.cta.b32 %0,[%1],%2;" : "=r"(old) : _CG_ASM_PTR_CONSTRAINT(addr), "r"(val) : "memory");)
120
+ ,
121
+ (__threadfence_block();
122
+ old = atomicOr(addr, val);)
123
+ );
124
+ return old;
125
+ }
126
+
127
+ // Special case where barrier is arrived, but not waited on
128
+ _CG_STATIC_QUALIFIER void red_or_release_cta(unsigned int *addr, unsigned int val) {
129
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_70,
130
+ (asm volatile("red.or.release.cta.b32 [%0],%1;" :: _CG_ASM_PTR_CONSTRAINT(addr), "r"(val) : "memory");)
131
+ ,
132
+ (__threadfence_block();
133
+ atomicOr(addr, val);)
134
+ );
135
+ }
136
+
137
+ // Usually called by last arriving warp to released other warps, can be relaxed, since or was already acq_rel
138
+ _CG_STATIC_QUALIFIER void red_and_relaxed_cta(unsigned int *addr, unsigned int val) {
139
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_70,
140
+ (asm volatile("red.and.relaxed.cta.b32 [%0],%1;" :: _CG_ASM_PTR_CONSTRAINT(addr), "r"(val) : "memory");)
141
+ ,
142
+ (atomicAnd(addr, val);)
143
+ );
144
+ }
145
+
146
+ // Special case of release, where last warp was doing extra work before releasing others, need to be release
147
+ // to ensure that extra work is visible
148
+ _CG_STATIC_QUALIFIER void red_and_release_cta(unsigned int *addr, unsigned int val) {
149
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_70,
150
+ (asm volatile("red.and.release.cta.b32 [%0],%1;" :: _CG_ASM_PTR_CONSTRAINT(addr), "r"(val) : "memory");)
151
+ ,
152
+ (__threadfence_block();
153
+ atomicAnd(addr, val);)
154
+ );
155
+ }
156
+
157
+ // Read the barrier, acquire to ensure all memory operations following the sync are correctly performed after it is released
158
+ _CG_STATIC_QUALIFIER unsigned int ld_acquire_cta(unsigned int *addr) {
159
+ unsigned int val;
160
+ NV_IF_ELSE_TARGET(NV_PROVIDES_SM_70,
161
+ (asm volatile("ld.acquire.cta.u32 %0,[%1];" : "=r"(val) : _CG_ASM_PTR_CONSTRAINT(addr) : "memory");)
162
+ ,
163
+ (val = *((volatile unsigned int*) addr);
164
+ __threadfence_block();)
165
+ );
166
+ return val;
167
+ }
168
+
169
+ // Get synchronization bit mask of my thread_block_tile of size num_warps. Thread ranks 0..31 have the first bit assigned to them,
170
+ // thread ranks 32..63 second etc
171
+ // Bit masks are unique for each group, groups of the same size will have the same number of bits set, but on different positions
172
+ _CG_STATIC_QUALIFIER unsigned int get_group_mask(unsigned int thread_rank, unsigned int num_warps) {
173
+ return num_warps == 32 ? ~0 : ((1 << num_warps) - 1) << (num_warps * (thread_rank / (num_warps * 32)));
174
+ }
175
+
176
+ _CG_STATIC_QUALIFIER void barrier_wait(barrier_t *arrived, unsigned int warp_bit) {
177
+ while(ld_acquire_cta(arrived) & warp_bit);
178
+ }
179
+
180
+ // Default blocking sync.
181
+ _CG_STATIC_QUALIFIER void sync_warps(barrier_t *arrived, unsigned int thread_rank, unsigned int num_warps) {
182
+ unsigned int warp_id = thread_rank / 32;
183
+ bool warp_master = (thread_rank % 32 == 0);
184
+ unsigned int warp_bit = 1 << warp_id;
185
+ unsigned int group_mask = get_group_mask(thread_rank, num_warps);
186
+
187
+ __syncwarp(0xFFFFFFFF);
188
+
189
+ if (warp_master) {
190
+ unsigned int old = atom_or_acq_rel_cta(arrived, warp_bit);
191
+ if (((old | warp_bit) & group_mask) == group_mask) {
192
+ red_and_relaxed_cta(arrived, ~group_mask);
193
+ }
194
+ else {
195
+ barrier_wait(arrived, warp_bit);
196
+ }
197
+ }
198
+
199
+ __syncwarp(0xFFFFFFFF);
200
+ }
201
+
202
+ // Blocking sync, except the last arriving warp, that releases other warps, returns to do other stuff first.
203
+ // Warp returning true from this function needs to call sync_warps_release.
204
+ _CG_STATIC_QUALIFIER bool sync_warps_last_releases(barrier_t *arrived, unsigned int thread_rank, unsigned int num_warps) {
205
+ unsigned int warp_id = thread_rank / 32;
206
+ bool warp_master = (thread_rank % 32 == 0);
207
+ unsigned int warp_bit = 1 << warp_id;
208
+ unsigned int group_mask = get_group_mask(thread_rank, num_warps);
209
+
210
+ __syncwarp(0xFFFFFFFF);
211
+
212
+ unsigned int old = 0;
213
+ if (warp_master) {
214
+ old = atom_or_acq_rel_cta(arrived, warp_bit);
215
+ }
216
+ old = __shfl_sync(0xFFFFFFFF, old, 0);
217
+ if (((old | warp_bit) & group_mask) == group_mask) {
218
+ return true;
219
+ }
220
+ barrier_wait(arrived, warp_bit);
221
+
222
+ return false;
223
+ }
224
+
225
+ // Release my group from the barrier.
226
+ _CG_STATIC_QUALIFIER void sync_warps_release(barrier_t *arrived, bool is_master, unsigned int thread_rank, unsigned int num_warps) {
227
+ unsigned int group_mask = get_group_mask(thread_rank, num_warps);
228
+ if (is_master) {
229
+ red_and_release_cta(arrived, ~group_mask);
230
+ }
231
+ }
232
+
233
+ // Arrive at my group barrier, but don't block or release the barrier, even if every one arrives.
234
+ // sync_warps_release needs to be called by some warp after this one to reset the barrier.
235
+ _CG_STATIC_QUALIFIER void sync_warps_arrive(barrier_t *arrived, unsigned int thread_rank, unsigned int num_warps) {
236
+ unsigned int warp_id = thread_rank / 32;
237
+ bool warp_master = (thread_rank % 32 == 0);
238
+ unsigned int warp_bit = 1 << warp_id;
239
+ unsigned int group_mask = get_group_mask(thread_rank, num_warps);
240
+
241
+ __syncwarp(0xFFFFFFFF);
242
+
243
+ if (warp_master) {
244
+ red_or_release_cta(arrived, warp_bit);
245
+ }
246
+ }
247
+
248
+ // Wait for my warp to be released from the barrier. Warp must have arrived first.
249
+ _CG_STATIC_QUALIFIER void sync_warps_wait(barrier_t *arrived, unsigned int thread_rank) {
250
+ unsigned int warp_id = thread_rank / 32;
251
+ unsigned int warp_bit = 1 << warp_id;
252
+
253
+ barrier_wait(arrived, warp_bit);
254
+ }
255
+
256
+ // Wait for specific warp to arrive at the barrier
257
+ _CG_QUALIFIER void sync_warps_wait_for_specific_warp(barrier_t *arrived, unsigned int wait_warp_id) {
258
+ unsigned int wait_mask = 1 << wait_warp_id;
259
+ while((ld_acquire_cta(arrived) & wait_mask) != wait_mask);
260
+ }
261
+
262
+ // Initialize the bit corresponding to my warp in the barrier
263
+ _CG_QUALIFIER void sync_warps_reset(barrier_t *arrived, unsigned int thread_rank) {
264
+ unsigned int warp_id = thread_rank / 32;
265
+ unsigned int warp_bit = 1 << warp_id;
266
+
267
+ __syncwarp(0xFFFFFFFF);
268
+
269
+ if (thread_rank % 32 == 0) {
270
+ red_and_release_cta(arrived, ~warp_bit);
271
+ }
272
+ // No need to sync after the atomic, there will be a sync of the group that is being partitioned right after this.
273
+ }
274
+
275
+ #endif
276
+
277
+ } // details
278
+
279
+ _CG_END_NAMESPACE
280
+
281
+ #endif // _CG_GRID_H
tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/include/cooperative_groups/memcpy_async.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _COOPERATIVE_GROUPS_MEMCPY_ASYNC
50
+ #define _COOPERATIVE_GROUPS_MEMCPY_ASYNC
51
+
52
+ #include "../cooperative_groups.h"
53
+ #include "details/info.h"
54
+
55
+ #ifdef _CG_CPP11_FEATURES
56
+ # include "details/async.h"
57
+ #else
58
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
59
+ -std=c++11 compiler option.
60
+ #endif
61
+
62
+ #endif // _COOPERATIVE_GROUPS_MEMCPY_ASYNC