ZTWHHH commited on
Commit
0af0f05
·
verified ·
1 Parent(s): eb2ab97

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/Openacc/cupti_openacc.h +98 -0
  3. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/Openmp/cupti_openmp.h +100 -0
  4. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/cuda_stdint.h +112 -0
  5. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/cupti_metrics.h +825 -0
  6. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/cupti_result.h +328 -0
  7. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/cupti_runtime_cbid.h +443 -0
  8. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/cupti_version.h +129 -0
  9. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/generated_nvtx_meta.h +247 -0
  10. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/nvperf_common.h +273 -0
  11. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/cupti_openmp.h +100 -0
  12. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__init__.py +0 -0
  13. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cuda_stdint.h +112 -0
  14. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti.h +123 -0
  15. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_activity.h +0 -0
  16. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_activity_deprecated.h +0 -0
  17. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_callbacks.h +860 -0
  18. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_checkpoint.h +127 -0
  19. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_common.h +93 -0
  20. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_driver_cbid.h +767 -0
  21. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_nvtx_cbid.h +111 -0
  22. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling.h +936 -0
  23. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling_util.h +402 -0
  24. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_runtime_cbid.h +481 -0
  25. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_sass_metrics.h +436 -0
  26. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_target.h +43 -0
  27. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudaGL_meta.h +116 -0
  28. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudaVDPAU_meta.h +46 -0
  29. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_gl_interop_meta.h +71 -0
  30. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_runtime_api_meta.h +2288 -0
  31. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_vdpau_interop_meta.h +38 -0
  32. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudart_removed_meta.h +162 -0
  33. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_nvtx_meta.h +247 -0
  34. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_common.h +393 -0
  35. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_cuda_host.h +197 -0
  36. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_host.h +1578 -0
  37. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_target.h +597 -0
  38. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/__init__.py +0 -0
  39. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.11.7 +3 -0
  40. mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 +3 -0
  41. mplug_owl2/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_heuristic.so.9 +3 -0
  42. mplug_owl2/lib/python3.10/site-packages/pygments/lexers/__pycache__/lisp.cpython-310.pyc +3 -0
  43. mplug_owl2/lib/python3.10/site-packages/pygments/styles/__pycache__/lovelace.cpython-310.pyc +0 -0
  44. mplug_owl2/lib/python3.10/site-packages/pygments/styles/__pycache__/vim.cpython-310.pyc +0 -0
  45. openflamingo/lib/python3.10/site-packages/wandb/proto/v3/__init__.py +0 -0
  46. openflamingo/lib/python3.10/site-packages/wandb/proto/v3/__pycache__/wandb_server_pb2.cpython-310.pyc +0 -0
  47. openflamingo/lib/python3.10/site-packages/wandb/proto/v4/__init__.py +0 -0
  48. openflamingo/lib/python3.10/site-packages/wandb/proto/v4/wandb_base_pb2.py +30 -0
  49. openflamingo/lib/python3.10/site-packages/wandb/proto/v4/wandb_server_pb2.py +67 -0
  50. openflamingo/lib/python3.10/site-packages/wandb/proto/v4/wandb_settings_pb2.py +47 -0
.gitattributes CHANGED
@@ -712,3 +712,7 @@ mplug_owl2/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-31
712
  openflamingo/compiler_compat/ld filter=lfs diff=lfs merge=lfs -text
713
  openflamingo/bin/openssl filter=lfs diff=lfs merge=lfs -text
714
  mplug_owl2/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
712
  openflamingo/compiler_compat/ld filter=lfs diff=lfs merge=lfs -text
713
  openflamingo/bin/openssl filter=lfs diff=lfs merge=lfs -text
714
  mplug_owl2/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
715
+ mplug_owl2/lib/python3.10/site-packages/pygments/lexers/__pycache__/lisp.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
716
+ mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text
717
+ mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.11.7 filter=lfs diff=lfs merge=lfs -text
718
+ mplug_owl2/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_heuristic.so.9 filter=lfs diff=lfs merge=lfs -text
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/Openacc/cupti_openacc.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #include <cuda_stdint.h>
51
+
52
+ #if !defined(_CUPTI_OPENACC_H_)
53
+ #define _CUPTI_OPENACC_H_
54
+
55
+ #ifndef CUPTIAPI
56
+ #ifdef _WIN32
57
+ #define CUPTIAPI __stdcall
58
+ #else
59
+ #define CUPTIAPI
60
+ #endif
61
+ #endif
62
+
63
+ #if defined(__LP64__)
64
+ #define CUPTILP64 1
65
+ #elif defined(_WIN64)
66
+ #define CUPTILP64 1
67
+ #else
68
+ #undef CUPTILP64
69
+ #endif
70
+
71
+ #if defined(__cplusplus)
72
+ extern "C" {
73
+ #endif
74
+
75
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
76
+ #pragma GCC visibility push(default)
77
+ #endif
78
+
79
+ /**
80
+ * \brief Initialize OpenACC support
81
+ *
82
+ * \param profRegister function of type acc_prof_reg as obtained from acc_register_library
83
+ * \param profUnregister function of type acc_prof_reg as obtained from acc_register_library
84
+ * \param profLookup function of type acc_prof_lookup as obtained from acc_register_library
85
+ */
86
+ CUptiResult CUPTIAPI
87
+ cuptiOpenACCInitialize(void *profRegister, void *profUnregister, void *profLookup);
88
+
89
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
90
+ #pragma GCC visibility pop
91
+ #endif
92
+
93
+ #if defined(__cplusplus)
94
+ }
95
+ #endif
96
+
97
+ #endif /*_CUPTI_OPENACC_H_*/
98
+
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/Openmp/cupti_openmp.h ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #include <cuda_stdint.h>
51
+ #include "Openmp/omp-tools.h"
52
+
53
+ #if !defined(_CUPTI_OPENMP_H_)
54
+ #define _CUPTI_OPENMP_H_
55
+
56
+ #ifndef CUPTIAPI
57
+ #ifdef _WIN32
58
+ #define CUPTIAPI __stdcall
59
+ #else
60
+ #define CUPTIAPI
61
+ #endif
62
+ #endif
63
+
64
+ #if defined(__LP64__)
65
+ #define CUPTILP64 1
66
+ #elif defined(_WIN64)
67
+ #define CUPTILP64 1
68
+ #else
69
+ #undef CUPTILP64
70
+ #endif
71
+
72
+ #if defined(__cplusplus)
73
+ extern "C" {
74
+ #endif
75
+
76
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
77
+ #pragma GCC visibility push(default)
78
+ #endif
79
+
80
+ /**
81
+ * \brief Initialize OPENMP support (deprecated, used before OpenMP 5.0)
82
+ *
83
+ */
84
+ int CUPTIAPI cuptiOpenMpInitialize(ompt_function_lookup_t ompt_fn_lookup, const char *runtime_version, unsigned int ompt_version);
85
+
86
+ /**
87
+ * \brief Initialize OPENMP support
88
+ *
89
+ */
90
+ int CUPTIAPI cuptiOpenMpInitialize_v2(ompt_function_lookup_t lookup, int initial_device_num, ompt_data_t *tool_data);
91
+
92
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
93
+ #pragma GCC visibility pop
94
+ #endif
95
+
96
+ #if defined(__cplusplus)
97
+ }
98
+ #endif
99
+
100
+ #endif /*_CUPTI_OPENMP_H_*/
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/cuda_stdint.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2009-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions
6
+ * are met:
7
+ * * Redistributions of source code must retain the above copyright
8
+ * notice, this list of conditions and the following disclaimer.
9
+ * * Redistributions in binary form must reproduce the above copyright
10
+ * notice, this list of conditions and the following disclaimer in the
11
+ * documentation and/or other materials provided with the distribution.
12
+ * * Neither the name of NVIDIA CORPORATION nor the names of its
13
+ * contributors may be used to endorse or promote products derived
14
+ * from this software without specific prior written permission.
15
+ *
16
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
17
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
20
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
24
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+ */
28
+
29
+ #ifndef __cuda_stdint_h__
30
+ #define __cuda_stdint_h__
31
+
32
+ // Compiler-specific treatment for C99's stdint.h
33
+ //
34
+ // By default, this header will use the standard headers (so it
35
+ // is your responsibility to make sure they are available), except
36
+ // on MSVC before Visual Studio 2010, when they were not provided.
37
+ // To support old MSVC, a few of the commonly-used definitions are
38
+ // provided here. If more definitions are needed, add them here,
39
+ // or replace these definitions with a complete implementation,
40
+ // such as the ones available from Google, Boost, or MSVC10. You
41
+ // can prevent the definition of any of these types (in order to
42
+ // use your own) by #defining CU_STDINT_TYPES_ALREADY_DEFINED.
43
+
44
+ #if !defined(CU_STDINT_TYPES_ALREADY_DEFINED)
45
+
46
+ // In VS including stdint.h forces the C++ runtime dep - provide an opt-out
47
+ // (CU_STDINT_VS_FORCE_NO_STDINT_H) for users that care (notably static
48
+ // cudart).
49
+ #if defined(_MSC_VER) && ((_MSC_VER < 1600) || defined(CU_STDINT_VS_FORCE_NO_STDINT_H))
50
+
51
+ // These definitions can be used with MSVC 8 and 9,
52
+ // which don't ship with stdint.h:
53
+
54
+ typedef unsigned char uint8_t;
55
+
56
+ typedef short int16_t;
57
+ typedef unsigned short uint16_t;
58
+
59
+ // To keep it consistent with all MSVC build. define those types
60
+ // in the exact same way they are defined with the MSVC headers
61
+ #if defined(_MSC_VER)
62
+ typedef signed char int8_t;
63
+
64
+ typedef int int32_t;
65
+ typedef unsigned int uint32_t;
66
+
67
+ typedef long long int64_t;
68
+ typedef unsigned long long uint64_t;
69
+ #else
70
+ typedef char int8_t;
71
+
72
+ typedef long int32_t;
73
+ typedef unsigned long uint32_t;
74
+
75
+ typedef __int64 int64_t;
76
+ typedef unsigned __int64 uint64_t;
77
+ #endif
78
+
79
+ #elif defined(__DJGPP__)
80
+
81
+ // These definitions can be used when compiling
82
+ // C code with DJGPP, which only provides stdint.h
83
+ // when compiling C++ code with TR1 enabled.
84
+
85
+ typedef char int8_t;
86
+ typedef unsigned char uint8_t;
87
+
88
+ typedef short int16_t;
89
+ typedef unsigned short uint16_t;
90
+
91
+ typedef long int32_t;
92
+ typedef unsigned long uint32_t;
93
+
94
+ typedef long long int64_t;
95
+ typedef unsigned long long uint64_t;
96
+
97
+ #else
98
+
99
+ // Use standard headers, as specified by C99 and C++ TR1.
100
+ // Known to be provided by:
101
+ // - gcc/glibc, supported by all versions of glibc
102
+ // - djgpp, supported since 2001
103
+ // - MSVC, supported by Visual Studio 2010 and later
104
+
105
+ #include <stdint.h>
106
+
107
+ #endif
108
+
109
+ #endif // !defined(CU_STDINT_TYPES_ALREADY_DEFINED)
110
+
111
+
112
+ #endif // file guard
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/cupti_metrics.h ADDED
@@ -0,0 +1,825 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2011-2020 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_METRIC_H_)
51
+ #define _CUPTI_METRIC_H_
52
+
53
+ #include <cuda.h>
54
+ #include <string.h>
55
+ #include <cuda_stdint.h>
56
+ #include <cupti_result.h>
57
+
58
+ #ifndef CUPTIAPI
59
+ #ifdef _WIN32
60
+ #define CUPTIAPI __stdcall
61
+ #else
62
+ #define CUPTIAPI
63
+ #endif
64
+ #endif
65
+
66
+ #if defined(__cplusplus)
67
+ extern "C" {
68
+ #endif
69
+
70
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
71
+ #pragma GCC visibility push(default)
72
+ #endif
73
+
74
+ /**
75
+ * \defgroup CUPTI_METRIC_API CUPTI Metric API
76
+ * Functions, types, and enums that implement the CUPTI Metric API.
77
+ *
78
+ * \note CUPTI metric API from the header cupti_metrics.h are not supported on devices
79
+ * with compute capability 7.5 and higher (i.e. Turing and later GPU architectures).
80
+ * These API will be deprecated in a future CUDA release. These are replaced by
81
+ * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API
82
+ * in the headers nvperf_host.h and nvperf_target.h which are supported on
83
+ * devices with compute capability 7.0 and higher (i.e. Volta and later GPU
84
+ * architectures).
85
+ *
86
+ * @{
87
+ */
88
+
89
+ /**
90
+ * \brief ID for a metric.
91
+ *
92
+ * A metric provides a measure of some aspect of the device.
93
+ */
94
+ typedef uint32_t CUpti_MetricID;
95
+
96
+ /**
97
+ * \brief A metric category.
98
+ *
99
+ * Each metric is assigned to a category that represents the general
100
+ * type of the metric. A metric's category is accessed using \ref
101
+ * cuptiMetricGetAttribute and the CUPTI_METRIC_ATTR_CATEGORY
102
+ * attribute.
103
+ */
104
+ typedef enum {
105
+ /**
106
+ * A memory related metric.
107
+ */
108
+ CUPTI_METRIC_CATEGORY_MEMORY = 0,
109
+ /**
110
+ * An instruction related metric.
111
+ */
112
+ CUPTI_METRIC_CATEGORY_INSTRUCTION = 1,
113
+ /**
114
+ * A multiprocessor related metric.
115
+ */
116
+ CUPTI_METRIC_CATEGORY_MULTIPROCESSOR = 2,
117
+ /**
118
+ * A cache related metric.
119
+ */
120
+ CUPTI_METRIC_CATEGORY_CACHE = 3,
121
+ /**
122
+ * A texture related metric.
123
+ */
124
+ CUPTI_METRIC_CATEGORY_TEXTURE = 4,
125
+ /**
126
+ *A Nvlink related metric.
127
+ */
128
+ CUPTI_METRIC_CATEGORY_NVLINK = 5,
129
+ /**
130
+ *A PCIe related metric.
131
+ */
132
+ CUPTI_METRIC_CATEGORY_PCIE = 6,
133
+ CUPTI_METRIC_CATEGORY_FORCE_INT = 0x7fffffff,
134
+ } CUpti_MetricCategory;
135
+
136
+ /**
137
+ * \brief A metric evaluation mode.
138
+ *
139
+ * A metric can be evaluated per hardware instance to know the load balancing
140
+ * across instances of a domain or the metric can be evaluated in aggregate mode
141
+ * when the events involved in metric evaluation are from different event
142
+ * domains. It might be possible to evaluate some metrics in both
143
+ * modes for convenience. A metric's evaluation mode is accessed using \ref
144
+ * CUpti_MetricEvaluationMode and the CUPTI_METRIC_ATTR_EVALUATION_MODE
145
+ * attribute.
146
+ */
147
+ typedef enum {
148
+ /**
149
+ * If this bit is set, the metric can be profiled for each instance of the
150
+ * domain. The event values passed to \ref cuptiMetricGetValue can contain
151
+ * values for one instance of the domain. And \ref cuptiMetricGetValue can
152
+ * be called for each instance.
153
+ */
154
+ CUPTI_METRIC_EVALUATION_MODE_PER_INSTANCE = 1,
155
+ /**
156
+ * If this bit is set, the metric can be profiled over all instances. The
157
+ * event values passed to \ref cuptiMetricGetValue can be aggregated values
158
+ * of events for all instances of the domain.
159
+ */
160
+ CUPTI_METRIC_EVALUATION_MODE_AGGREGATE = 1 << 1,
161
+ CUPTI_METRIC_EVALUATION_MODE_FORCE_INT = 0x7fffffff,
162
+ } CUpti_MetricEvaluationMode;
163
+
164
+ /**
165
+ * \brief Kinds of metric values.
166
+ *
167
+ * Metric values can be one of several different kinds. Corresponding
168
+ * to each kind is a member of the CUpti_MetricValue union. The metric
169
+ * value returned by \ref cuptiMetricGetValue should be accessed using
170
+ * the appropriate member of that union based on its value kind.
171
+ */
172
+ typedef enum {
173
+ /**
174
+ * The metric value is a 64-bit double.
175
+ */
176
+ CUPTI_METRIC_VALUE_KIND_DOUBLE = 0,
177
+ /**
178
+ * The metric value is a 64-bit unsigned integer.
179
+ */
180
+ CUPTI_METRIC_VALUE_KIND_UINT64 = 1,
181
+ /**
182
+ * The metric value is a percentage represented by a 64-bit
183
+ * double. For example, 57.5% is represented by the value 57.5.
184
+ */
185
+ CUPTI_METRIC_VALUE_KIND_PERCENT = 2,
186
+ /**
187
+ * The metric value is a throughput represented by a 64-bit
188
+ * integer. The unit for throughput values is bytes/second.
189
+ */
190
+ CUPTI_METRIC_VALUE_KIND_THROUGHPUT = 3,
191
+ /**
192
+ * The metric value is a 64-bit signed integer.
193
+ */
194
+ CUPTI_METRIC_VALUE_KIND_INT64 = 4,
195
+ /**
196
+ * The metric value is a utilization level, as represented by
197
+ * CUpti_MetricValueUtilizationLevel.
198
+ */
199
+ CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL = 5,
200
+
201
+ CUPTI_METRIC_VALUE_KIND_FORCE_INT = 0x7fffffff
202
+ } CUpti_MetricValueKind;
203
+
204
+ /**
205
+ * \brief Enumeration of utilization levels for metrics values of kind
206
+ * CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL. Utilization values can
207
+ * vary from IDLE (0) to MAX (10) but the enumeration only provides
208
+ * specific names for a few values.
209
+ */
210
+ typedef enum {
211
+ CUPTI_METRIC_VALUE_UTILIZATION_IDLE = 0,
212
+ CUPTI_METRIC_VALUE_UTILIZATION_LOW = 2,
213
+ CUPTI_METRIC_VALUE_UTILIZATION_MID = 5,
214
+ CUPTI_METRIC_VALUE_UTILIZATION_HIGH = 8,
215
+ CUPTI_METRIC_VALUE_UTILIZATION_MAX = 10,
216
+ CUPTI_METRIC_VALUE_UTILIZATION_FORCE_INT = 0x7fffffff
217
+ } CUpti_MetricValueUtilizationLevel;
218
+
219
+ /**
220
+ * \brief Metric attributes.
221
+ *
222
+ * Metric attributes describe properties of a metric. These attributes
223
+ * can be read using \ref cuptiMetricGetAttribute.
224
+ */
225
+ typedef enum {
226
+ /**
227
+ * Metric name. Value is a null terminated const c-string.
228
+ */
229
+ CUPTI_METRIC_ATTR_NAME = 0,
230
+ /**
231
+ * Short description of metric. Value is a null terminated const c-string.
232
+ */
233
+ CUPTI_METRIC_ATTR_SHORT_DESCRIPTION = 1,
234
+ /**
235
+ * Long description of metric. Value is a null terminated const c-string.
236
+ */
237
+ CUPTI_METRIC_ATTR_LONG_DESCRIPTION = 2,
238
+ /**
239
+ * Category of the metric. Value is of type CUpti_MetricCategory.
240
+ */
241
+ CUPTI_METRIC_ATTR_CATEGORY = 3,
242
+ /**
243
+ * Value type of the metric. Value is of type CUpti_MetricValueKind.
244
+ */
245
+ CUPTI_METRIC_ATTR_VALUE_KIND = 4,
246
+ /**
247
+ * Metric evaluation mode. Value is of type CUpti_MetricEvaluationMode.
248
+ */
249
+ CUPTI_METRIC_ATTR_EVALUATION_MODE = 5,
250
+ CUPTI_METRIC_ATTR_FORCE_INT = 0x7fffffff,
251
+ } CUpti_MetricAttribute;
252
+
253
+ /**
254
+ * \brief A metric value.
255
+ *
256
+ * Metric values can be one of several different kinds. Corresponding
257
+ * to each kind is a member of the CUpti_MetricValue union. The metric
258
+ * value returned by \ref cuptiMetricGetValue should be accessed using
259
+ * the appropriate member of that union based on its value kind.
260
+ */
261
+ typedef union {
262
+ /*
263
+ * Value for CUPTI_METRIC_VALUE_KIND_DOUBLE.
264
+ */
265
+ double metricValueDouble;
266
+ /*
267
+ * Value for CUPTI_METRIC_VALUE_KIND_UINT64.
268
+ */
269
+ uint64_t metricValueUint64;
270
+ /*
271
+ * Value for CUPTI_METRIC_VALUE_KIND_INT64.
272
+ */
273
+ int64_t metricValueInt64;
274
+ /*
275
+ * Value for CUPTI_METRIC_VALUE_KIND_PERCENT. For example, 57.5% is
276
+ * represented by the value 57.5.
277
+ */
278
+ double metricValuePercent;
279
+ /*
280
+ * Value for CUPTI_METRIC_VALUE_KIND_THROUGHPUT. The unit for
281
+ * throughput values is bytes/second.
282
+ */
283
+ uint64_t metricValueThroughput;
284
+ /*
285
+ * Value for CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL.
286
+ */
287
+ CUpti_MetricValueUtilizationLevel metricValueUtilizationLevel;
288
+ } CUpti_MetricValue;
289
+
290
+ /**
291
+ * \brief Device class.
292
+ *
293
+ * Enumeration of device classes for metric property
294
+ * CUPTI_METRIC_PROPERTY_DEVICE_CLASS.
295
+ */
296
+ typedef enum {
297
+ CUPTI_METRIC_PROPERTY_DEVICE_CLASS_TESLA = 0,
298
+ CUPTI_METRIC_PROPERTY_DEVICE_CLASS_QUADRO = 1,
299
+ CUPTI_METRIC_PROPERTY_DEVICE_CLASS_GEFORCE = 2,
300
+ CUPTI_METRIC_PROPERTY_DEVICE_CLASS_TEGRA = 3,
301
+ } CUpti_MetricPropertyDeviceClass;
302
+
303
+ /**
304
+ * \brief Metric device properties.
305
+ *
306
+ * Metric device properties describe device properties which are needed for a metric.
307
+ * Some of these properties can be collected using cuDeviceGetAttribute.
308
+ */
309
+ typedef enum {
310
+ /*
311
+ * Number of multiprocessors on a device. This can be collected
312
+ * using value of \param CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT of
313
+ * cuDeviceGetAttribute.
314
+ */
315
+ CUPTI_METRIC_PROPERTY_MULTIPROCESSOR_COUNT,
316
+ /*
317
+ * Maximum number of warps on a multiprocessor. This can be
318
+ * collected using ratio of value of \param
319
+ * CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR and \param
320
+ * CU_DEVICE_ATTRIBUTE_WARP_SIZE of cuDeviceGetAttribute.
321
+ */
322
+ CUPTI_METRIC_PROPERTY_WARPS_PER_MULTIPROCESSOR,
323
+ /*
324
+ * GPU Time for kernel in ns. This should be profiled using CUPTI
325
+ * Activity API.
326
+ */
327
+ CUPTI_METRIC_PROPERTY_KERNEL_GPU_TIME,
328
+ /*
329
+ * Clock rate for device in KHz. This should be collected using
330
+ * value of \param CU_DEVICE_ATTRIBUTE_CLOCK_RATE of
331
+ * cuDeviceGetAttribute.
332
+ */
333
+ CUPTI_METRIC_PROPERTY_CLOCK_RATE,
334
+ /*
335
+ * Number of Frame buffer units for device. This should be collected
336
+ * using value of \param CUPTI_DEVICE_ATTRIBUTE_MAX_FRAME_BUFFERS of
337
+ * cuptiDeviceGetAttribute.
338
+ */
339
+ CUPTI_METRIC_PROPERTY_FRAME_BUFFER_COUNT,
340
+ /*
341
+ * Global memory bandwidth in KBytes/sec. This should be collected
342
+ * using value of \param CUPTI_DEVICE_ATTR_GLOBAL_MEMORY_BANDWIDTH
343
+ * of cuptiDeviceGetAttribute.
344
+ */
345
+ CUPTI_METRIC_PROPERTY_GLOBAL_MEMORY_BANDWIDTH,
346
+ /*
347
+ * PCIE link rate in Mega bits/sec. This should be collected using
348
+ * value of \param CUPTI_DEVICE_ATTR_PCIE_LINK_RATE of
349
+ * cuptiDeviceGetAttribute.
350
+ */
351
+ CUPTI_METRIC_PROPERTY_PCIE_LINK_RATE,
352
+ /*
353
+ * PCIE link width for device. This should be collected using
354
+ * value of \param CUPTI_DEVICE_ATTR_PCIE_LINK_WIDTH of
355
+ * cuptiDeviceGetAttribute.
356
+ */
357
+ CUPTI_METRIC_PROPERTY_PCIE_LINK_WIDTH,
358
+ /*
359
+ * PCIE generation for device. This should be collected using
360
+ * value of \param CUPTI_DEVICE_ATTR_PCIE_GEN of
361
+ * cuptiDeviceGetAttribute.
362
+ */
363
+ CUPTI_METRIC_PROPERTY_PCIE_GEN,
364
+ /*
365
+ * The device class. This should be collected using
366
+ * value of \param CUPTI_DEVICE_ATTR_DEVICE_CLASS of
367
+ * cuptiDeviceGetAttribute.
368
+ */
369
+ CUPTI_METRIC_PROPERTY_DEVICE_CLASS,
370
+ /*
371
+ * Peak single precision floating point operations that
372
+ * can be performed in one cycle by the device.
373
+ * This should be collected using value of
374
+ * \param CUPTI_DEVICE_ATTR_FLOP_SP_PER_CYCLE of
375
+ * cuptiDeviceGetAttribute.
376
+ */
377
+ CUPTI_METRIC_PROPERTY_FLOP_SP_PER_CYCLE,
378
+ /*
379
+ * Peak double precision floating point operations that
380
+ * can be performed in one cycle by the device.
381
+ * This should be collected using value of
382
+ * \param CUPTI_DEVICE_ATTR_FLOP_DP_PER_CYCLE of
383
+ * cuptiDeviceGetAttribute.
384
+ */
385
+ CUPTI_METRIC_PROPERTY_FLOP_DP_PER_CYCLE,
386
+ /*
387
+ * Number of L2 units on a device. This can be collected
388
+ * using value of \param CUPTI_DEVICE_ATTR_MAX_L2_UNITS of
389
+ * cuDeviceGetAttribute.
390
+ */
391
+ CUPTI_METRIC_PROPERTY_L2_UNITS,
392
+ /*
393
+ * Whether ECC support is enabled on the device. This can be
394
+ * collected using value of \param CU_DEVICE_ATTRIBUTE_ECC_ENABLED of
395
+ * cuDeviceGetAttribute.
396
+ */
397
+ CUPTI_METRIC_PROPERTY_ECC_ENABLED,
398
+ /*
399
+ * Peak half precision floating point operations that
400
+ * can be performed in one cycle by the device.
401
+ * This should be collected using value of
402
+ * \param CUPTI_DEVICE_ATTR_FLOP_HP_PER_CYCLE of
403
+ * cuptiDeviceGetAttribute.
404
+ */
405
+ CUPTI_METRIC_PROPERTY_FLOP_HP_PER_CYCLE,
406
+ /*
407
+ * NVLINK Bandwitdh for device. This should be collected
408
+ * using value of \param CUPTI_DEVICE_ATTR_GPU_CPU_NVLINK_BW of
409
+ * cuptiDeviceGetAttribute.
410
+ */
411
+ CUPTI_METRIC_PROPERTY_GPU_CPU_NVLINK_BANDWIDTH,
412
+ } CUpti_MetricPropertyID;
413
+
414
+ /**
415
+ * \brief Get the total number of metrics available on any device.
416
+ *
417
+ * Returns the total number of metrics available on any CUDA-capable
418
+ * devices.
419
+ *
420
+ * \param numMetrics Returns the number of metrics
421
+ *
422
+ * \retval CUPTI_SUCCESS
423
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numMetrics is NULL
424
+ */
425
+ CUptiResult CUPTIAPI cuptiGetNumMetrics(uint32_t *numMetrics);
426
+
427
+ /**
428
+ * \brief Get all the metrics available on any device.
429
+ *
430
+ * Returns the metric IDs in \p metricArray for all CUDA-capable
431
+ * devices. The size of the \p metricArray buffer is given by \p
432
+ * *arraySizeBytes. The size of the \p metricArray buffer must be at
433
+ * least \p numMetrics * sizeof(CUpti_MetricID) or all metric IDs will
434
+ * not be returned. The value returned in \p *arraySizeBytes contains
435
+ * the number of bytes returned in \p metricArray.
436
+ *
437
+ * \param arraySizeBytes The size of \p metricArray in bytes, and
438
+ * returns the number of bytes written to \p metricArray
439
+ * \param metricArray Returns the IDs of the metrics
440
+ *
441
+ * \retval CUPTI_SUCCESS
442
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or
443
+ * \p metricArray are NULL
444
+ */
445
+ CUptiResult CUPTIAPI cuptiEnumMetrics(size_t *arraySizeBytes,
446
+ CUpti_MetricID *metricArray);
447
+
448
+ /**
449
+ * \brief Get the number of metrics for a device.
450
+ *
451
+ * Returns the number of metrics available for a device.
452
+ *
453
+ * \param device The CUDA device
454
+ * \param numMetrics Returns the number of metrics available for the
455
+ * device
456
+ *
457
+ * \retval CUPTI_SUCCESS
458
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
459
+ * \retval CUPTI_ERROR_INVALID_DEVICE
460
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numMetrics is NULL
461
+ */
462
+ CUptiResult CUPTIAPI cuptiDeviceGetNumMetrics(CUdevice device,
463
+ uint32_t *numMetrics);
464
+
465
+ /**
466
+ * \brief Get the metrics for a device.
467
+ *
468
+ * Returns the metric IDs in \p metricArray for a device. The size of
469
+ * the \p metricArray buffer is given by \p *arraySizeBytes. The size
470
+ * of the \p metricArray buffer must be at least \p numMetrics *
471
+ * sizeof(CUpti_MetricID) or else all metric IDs will not be
472
+ * returned. The value returned in \p *arraySizeBytes contains the
473
+ * number of bytes returned in \p metricArray.
474
+ *
475
+ * \param device The CUDA device
476
+ * \param arraySizeBytes The size of \p metricArray in bytes, and
477
+ * returns the number of bytes written to \p metricArray
478
+ * \param metricArray Returns the IDs of the metrics for the device
479
+ *
480
+ * \retval CUPTI_SUCCESS
481
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
482
+ * \retval CUPTI_ERROR_INVALID_DEVICE
483
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or
484
+ * \p metricArray are NULL
485
+ */
486
+ CUptiResult CUPTIAPI cuptiDeviceEnumMetrics(CUdevice device,
487
+ size_t *arraySizeBytes,
488
+ CUpti_MetricID *metricArray);
489
+
490
+ /**
491
+ * \brief Get a metric attribute.
492
+ *
493
+ * Returns a metric attribute in \p *value. The size of the \p
494
+ * value buffer is given by \p *valueSize. The value returned in \p
495
+ * *valueSize contains the number of bytes returned in \p value.
496
+ *
497
+ * If the attribute value is a c-string that is longer than \p
498
+ * *valueSize, then only the first \p *valueSize characters will be
499
+ * returned and there will be no terminating null byte.
500
+ *
501
+ * \param metric ID of the metric
502
+ * \param attrib The metric attribute to read
503
+ * \param valueSize The size of the \p value buffer in bytes, and
504
+ * returns the number of bytes written to \p value
505
+ * \param value Returns the attribute's value
506
+ *
507
+ * \retval CUPTI_SUCCESS
508
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
509
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
510
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
511
+ * is NULL, or if \p attrib is not a metric attribute
512
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
513
+ * attribute values, indicates that the \p value buffer is too small
514
+ * to hold the attribute value.
515
+ */
516
+ CUptiResult CUPTIAPI cuptiMetricGetAttribute(CUpti_MetricID metric,
517
+ CUpti_MetricAttribute attrib,
518
+ size_t *valueSize,
519
+ void *value);
520
+
521
+ /**
522
+ * \brief Find an metric by name.
523
+ *
524
+ * Find a metric by name and return the metric ID in \p *metric.
525
+ *
526
+ * \param device The CUDA device
527
+ * \param metricName The name of metric to find
528
+ * \param metric Returns the ID of the found metric or undefined if
529
+ * unable to find the metric
530
+ *
531
+ * \retval CUPTI_SUCCESS
532
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
533
+ * \retval CUPTI_ERROR_INVALID_DEVICE
534
+ * \retval CUPTI_ERROR_INVALID_METRIC_NAME if unable to find a metric
535
+ * with name \p metricName. In this case \p *metric is undefined
536
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricName or \p
537
+ * metric are NULL.
538
+ */
539
+ CUptiResult CUPTIAPI cuptiMetricGetIdFromName(CUdevice device,
540
+ const char *metricName,
541
+ CUpti_MetricID *metric);
542
+
543
+ /**
544
+ * \brief Get number of events required to calculate a metric.
545
+ *
546
+ * Returns the number of events in \p numEvents that are required to
547
+ * calculate a metric.
548
+ *
549
+ * \param metric ID of the metric
550
+ * \param numEvents Returns the number of events required for the metric
551
+ *
552
+ * \retval CUPTI_SUCCESS
553
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
554
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
555
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numEvents is NULL
556
+ */
557
+ CUptiResult CUPTIAPI cuptiMetricGetNumEvents(CUpti_MetricID metric,
558
+ uint32_t *numEvents);
559
+
560
+ /**
561
+ * \brief Get the events required to calculating a metric.
562
+ *
563
+ * Gets the event IDs in \p eventIdArray required to calculate a \p
564
+ * metric. The size of the \p eventIdArray buffer is given by \p
565
+ * *eventIdArraySizeBytes and must be at least \p numEvents *
566
+ * sizeof(CUpti_EventID) or all events will not be returned. The value
567
+ * returned in \p *eventIdArraySizeBytes contains the number of bytes
568
+ * returned in \p eventIdArray.
569
+ *
570
+ * \param metric ID of the metric
571
+ * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes,
572
+ * and returns the number of bytes written to \p eventIdArray
573
+ * \param eventIdArray Returns the IDs of the events required to
574
+ * calculate \p metric
575
+ *
576
+ * \retval CUPTI_SUCCESS
577
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
578
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
579
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventIdArraySizeBytes or \p
580
+ * eventIdArray are NULL.
581
+ */
582
+ CUptiResult CUPTIAPI cuptiMetricEnumEvents(CUpti_MetricID metric,
583
+ size_t *eventIdArraySizeBytes,
584
+ CUpti_EventID *eventIdArray);
585
+
586
+ /**
587
+ * \brief Get number of properties required to calculate a metric.
588
+ *
589
+ * Returns the number of properties in \p numProp that are required to
590
+ * calculate a metric.
591
+ *
592
+ * \param metric ID of the metric
593
+ * \param numProp Returns the number of properties required for the
594
+ * metric
595
+ *
596
+ * \retval CUPTI_SUCCESS
597
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
598
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
599
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numProp is NULL
600
+ */
601
+ CUptiResult CUPTIAPI cuptiMetricGetNumProperties(CUpti_MetricID metric,
602
+ uint32_t *numProp);
603
+
604
+ /**
605
+ * \brief Get the properties required to calculating a metric.
606
+ *
607
+ * Gets the property IDs in \p propIdArray required to calculate a \p
608
+ * metric. The size of the \p propIdArray buffer is given by \p
609
+ * *propIdArraySizeBytes and must be at least \p numProp *
610
+ * sizeof(CUpti_DeviceAttribute) or all properties will not be
611
+ * returned. The value returned in \p *propIdArraySizeBytes contains
612
+ * the number of bytes returned in \p propIdArray.
613
+ *
614
+ * \param metric ID of the metric
615
+ * \param propIdArraySizeBytes The size of \p propIdArray in bytes,
616
+ * and returns the number of bytes written to \p propIdArray
617
+ * \param propIdArray Returns the IDs of the properties required to
618
+ * calculate \p metric
619
+ *
620
+ * \retval CUPTI_SUCCESS
621
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
622
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
623
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p propIdArraySizeBytes or \p
624
+ * propIdArray are NULL.
625
+ */
626
+ CUptiResult CUPTIAPI cuptiMetricEnumProperties(CUpti_MetricID metric,
627
+ size_t *propIdArraySizeBytes,
628
+ CUpti_MetricPropertyID *propIdArray);
629
+
630
+
631
+ /**
632
+ * \brief For a metric get the groups of events that must be collected
633
+ * in the same pass.
634
+ *
635
+ * For a metric get the groups of events that must be collected in the
636
+ * same pass to ensure that the metric is calculated correctly. If the
637
+ * events are not collected as specified then the metric value may be
638
+ * inaccurate.
639
+ *
640
+ * The function returns NULL if a metric does not have any required
641
+ * event group. In this case the events needed for the metric can be
642
+ * grouped in any manner for collection.
643
+ *
644
+ * \param context The context for event collection
645
+ * \param metric The metric ID
646
+ * \param eventGroupSets Returns a CUpti_EventGroupSets object that
647
+ * indicates the events that must be collected in the same pass to
648
+ * ensure the metric is calculated correctly. Returns NULL if no
649
+ * grouping is required for metric
650
+ * \retval CUPTI_SUCCESS
651
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
652
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
653
+ */
654
+ CUptiResult CUPTIAPI cuptiMetricGetRequiredEventGroupSets(CUcontext context,
655
+ CUpti_MetricID metric,
656
+ CUpti_EventGroupSets **eventGroupSets);
657
+
658
+ /**
659
+ * \brief For a set of metrics, get the grouping that indicates the
660
+ * number of passes and the event groups necessary to collect the
661
+ * events required for those metrics.
662
+ *
663
+ * For a set of metrics, get the grouping that indicates the number of
664
+ * passes and the event groups necessary to collect the events
665
+ * required for those metrics.
666
+ *
667
+ * \see cuptiEventGroupSetsCreate for details on event group set
668
+ * creation.
669
+ *
670
+ * \param context The context for event collection
671
+ * \param metricIdArraySizeBytes Size of the metricIdArray in bytes
672
+ * \param metricIdArray Array of metric IDs
673
+ * \param eventGroupPasses Returns a CUpti_EventGroupSets object that
674
+ * indicates the number of passes required to collect the events and
675
+ * the events to collect on each pass
676
+ *
677
+ * \retval CUPTI_SUCCESS
678
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
679
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
680
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
681
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricIdArray or
682
+ * \p eventGroupPasses is NULL
683
+ */
684
+ CUptiResult CUPTIAPI cuptiMetricCreateEventGroupSets(CUcontext context,
685
+ size_t metricIdArraySizeBytes,
686
+ CUpti_MetricID *metricIdArray,
687
+ CUpti_EventGroupSets **eventGroupPasses);
688
+
689
+ /**
690
+ * \brief Calculate the value for a metric.
691
+ *
692
+ * Use the events collected for a metric to calculate the metric
693
+ * value. Metric value evaluation depends on the evaluation mode
694
+ * \ref CUpti_MetricEvaluationMode that the metric supports.
695
+ * If a metric has evaluation mode as CUPTI_METRIC_EVALUATION_MODE_PER_INSTANCE,
696
+ * then it assumes that the input event value is for one domain instance.
697
+ * If a metric has evaluation mode as CUPTI_METRIC_EVALUATION_MODE_AGGREGATE,
698
+ * it assumes that input event values are
699
+ * normalized to represent all domain instances on a device. For the
700
+ * most accurate metric collection, the events required for the metric
701
+ * should be collected for all profiled domain instances. For example,
702
+ * to collect all instances of an event, set the
703
+ * CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES attribute on
704
+ * the group containing the event to 1. The normalized value for the
705
+ * event is then: (\p sum_event_values * \p totalInstanceCount) / \p
706
+ * instanceCount, where \p sum_event_values is the summation of the
707
+ * event values across all profiled domain instances, \p
708
+ * totalInstanceCount is obtained from querying
709
+ * CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT and \p instanceCount
710
+ * is obtained from querying CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT (or
711
+ * CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT).
712
+ *
713
+ * \param device The CUDA device that the metric is being calculated for
714
+ * \param metric The metric ID
715
+ * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes
716
+ * \param eventIdArray The event IDs required to calculate \p metric
717
+ * \param eventValueArraySizeBytes The size of \p eventValueArray in bytes
718
+ * \param eventValueArray The normalized event values required to
719
+ * calculate \p metric. The values must be order to match the order of
720
+ * events in \p eventIdArray
721
+ * \param timeDuration The duration over which the events were
722
+ * collected, in ns
723
+ * \param metricValue Returns the value for the metric
724
+ *
725
+ * \retval CUPTI_SUCCESS
726
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
727
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
728
+ * \retval CUPTI_ERROR_INVALID_OPERATION
729
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if the
730
+ * eventIdArray does not contain all the events needed for metric
731
+ * \retval CUPTI_ERROR_INVALID_EVENT_VALUE if any of the
732
+ * event values required for the metric is CUPTI_EVENT_OVERFLOW
733
+ * \retval CUPTI_ERROR_INVALID_METRIC_VALUE if the computed metric value
734
+ * cannot be represented in the metric's value type. For example,
735
+ * if the metric value type is unsigned and the computed metric value is negative
736
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricValue,
737
+ * \p eventIdArray or \p eventValueArray is NULL
738
+ */
739
+ CUptiResult CUPTIAPI cuptiMetricGetValue(CUdevice device,
740
+ CUpti_MetricID metric,
741
+ size_t eventIdArraySizeBytes,
742
+ CUpti_EventID *eventIdArray,
743
+ size_t eventValueArraySizeBytes,
744
+ uint64_t *eventValueArray,
745
+ uint64_t timeDuration,
746
+ CUpti_MetricValue *metricValue);
747
+
748
+ /**
749
+ * \brief Calculate the value for a metric.
750
+ *
751
+ * Use the events and properties collected for a metric to calculate
752
+ * the metric value. Metric value evaluation depends on the evaluation
753
+ * mode \ref CUpti_MetricEvaluationMode that the metric supports. If
754
+ * a metric has evaluation mode as
755
+ * CUPTI_METRIC_EVALUATION_MODE_PER_INSTANCE, then it assumes that the
756
+ * input event value is for one domain instance. If a metric has
757
+ * evaluation mode as CUPTI_METRIC_EVALUATION_MODE_AGGREGATE, it
758
+ * assumes that input event values are normalized to represent all
759
+ * domain instances on a device. For the most accurate metric
760
+ * collection, the events required for the metric should be collected
761
+ * for all profiled domain instances. For example, to collect all
762
+ * instances of an event, set the
763
+ * CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES attribute on
764
+ * the group containing the event to 1. The normalized value for the
765
+ * event is then: (\p sum_event_values * \p totalInstanceCount) / \p
766
+ * instanceCount, where \p sum_event_values is the summation of the
767
+ * event values across all profiled domain instances, \p
768
+ * totalInstanceCount is obtained from querying
769
+ * CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT and \p instanceCount
770
+ * is obtained from querying CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT (or
771
+ * CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT).
772
+ *
773
+ * \param metric The metric ID
774
+ * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes
775
+ * \param eventIdArray The event IDs required to calculate \p metric
776
+ * \param eventValueArraySizeBytes The size of \p eventValueArray in bytes
777
+ * \param eventValueArray The normalized event values required to
778
+ * calculate \p metric. The values must be order to match the order of
779
+ * events in \p eventIdArray
780
+ * \param propIdArraySizeBytes The size of \p propIdArray in bytes
781
+ * \param propIdArray The metric property IDs required to calculate \p metric
782
+ * \param propValueArraySizeBytes The size of \p propValueArray in bytes
783
+ * \param propValueArray The metric property values required to
784
+ * calculate \p metric. The values must be order to match the order of
785
+ * metric properties in \p propIdArray
786
+ * \param metricValue Returns the value for the metric
787
+ *
788
+ * \retval CUPTI_SUCCESS
789
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
790
+ * \retval CUPTI_ERROR_INVALID_METRIC_ID
791
+ * \retval CUPTI_ERROR_INVALID_OPERATION
792
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if the
793
+ * eventIdArray does not contain all the events needed for metric
794
+ * \retval CUPTI_ERROR_INVALID_EVENT_VALUE if any of the
795
+ * event values required for the metric is CUPTI_EVENT_OVERFLOW
796
+ * \retval CUPTI_ERROR_NOT_COMPATIBLE if the computed metric value
797
+ * cannot be represented in the metric's value type. For example,
798
+ * if the metric value type is unsigned and the computed metric value is negative
799
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p metricValue,
800
+ * \p eventIdArray or \p eventValueArray is NULL
801
+ */
802
+ CUptiResult CUPTIAPI cuptiMetricGetValue2(CUpti_MetricID metric,
803
+ size_t eventIdArraySizeBytes,
804
+ CUpti_EventID *eventIdArray,
805
+ size_t eventValueArraySizeBytes,
806
+ uint64_t *eventValueArray,
807
+ size_t propIdArraySizeBytes,
808
+ CUpti_MetricPropertyID *propIdArray,
809
+ size_t propValueArraySizeBytes,
810
+ uint64_t *propValueArray,
811
+ CUpti_MetricValue *metricValue);
812
+
813
+ /** @} */ /* END CUPTI_METRIC_API */
814
+
815
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
816
+ #pragma GCC visibility pop
817
+ #endif
818
+
819
+ #if defined(__cplusplus)
820
+ }
821
+ #endif
822
+
823
+ #endif /*_CUPTI_METRIC_H_*/
824
+
825
+
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/cupti_result.h ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_RESULT_H_)
51
+ #define _CUPTI_RESULT_H_
52
+
53
+ #ifndef CUPTIAPI
54
+ #ifdef _WIN32
55
+ #define CUPTIAPI __stdcall
56
+ #else
57
+ #define CUPTIAPI
58
+ #endif
59
+ #endif
60
+
61
+ #if defined(__cplusplus)
62
+ extern "C" {
63
+ #endif
64
+
65
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
66
+ #pragma GCC visibility push(default)
67
+ #endif
68
+
69
+ /**
70
+ * \defgroup CUPTI_RESULT_API CUPTI Result Codes
71
+ * Error and result codes returned by CUPTI functions.
72
+ * @{
73
+ */
74
+
75
+ /**
76
+ * \brief CUPTI result codes.
77
+ *
78
+ * Error and result codes returned by CUPTI functions.
79
+ */
80
+ typedef enum {
81
+ /**
82
+ * No error.
83
+ */
84
+ CUPTI_SUCCESS = 0,
85
+ /**
86
+ * One or more of the parameters is invalid.
87
+ */
88
+ CUPTI_ERROR_INVALID_PARAMETER = 1,
89
+ /**
90
+ * The device does not correspond to a valid CUDA device.
91
+ */
92
+ CUPTI_ERROR_INVALID_DEVICE = 2,
93
+ /**
94
+ * The context is NULL or not valid.
95
+ */
96
+ CUPTI_ERROR_INVALID_CONTEXT = 3,
97
+ /**
98
+ * The event domain id is invalid.
99
+ */
100
+ CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID = 4,
101
+ /**
102
+ * The event id is invalid.
103
+ */
104
+ CUPTI_ERROR_INVALID_EVENT_ID = 5,
105
+ /**
106
+ * The event name is invalid.
107
+ */
108
+ CUPTI_ERROR_INVALID_EVENT_NAME = 6,
109
+ /**
110
+ * The current operation cannot be performed due to dependency on
111
+ * other factors.
112
+ */
113
+ CUPTI_ERROR_INVALID_OPERATION = 7,
114
+ /**
115
+ * Unable to allocate enough memory to perform the requested
116
+ * operation.
117
+ */
118
+ CUPTI_ERROR_OUT_OF_MEMORY = 8,
119
+ /**
120
+ * An error occurred on the performance monitoring hardware.
121
+ */
122
+ CUPTI_ERROR_HARDWARE = 9,
123
+ /**
124
+ * The output buffer size is not sufficient to return all
125
+ * requested data.
126
+ */
127
+ CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT = 10,
128
+ /**
129
+ * API is not implemented.
130
+ */
131
+ CUPTI_ERROR_API_NOT_IMPLEMENTED = 11,
132
+ /**
133
+ * The maximum limit is reached.
134
+ */
135
+ CUPTI_ERROR_MAX_LIMIT_REACHED = 12,
136
+ /**
137
+ * The object is not yet ready to perform the requested operation.
138
+ */
139
+ CUPTI_ERROR_NOT_READY = 13,
140
+ /**
141
+ * The current operation is not compatible with the current state
142
+ * of the object
143
+ */
144
+ CUPTI_ERROR_NOT_COMPATIBLE = 14,
145
+ /**
146
+ * CUPTI is unable to initialize its connection to the CUDA
147
+ * driver.
148
+ */
149
+ CUPTI_ERROR_NOT_INITIALIZED = 15,
150
+ /**
151
+ * The metric id is invalid.
152
+ */
153
+ CUPTI_ERROR_INVALID_METRIC_ID = 16,
154
+ /**
155
+ * The metric name is invalid.
156
+ */
157
+ CUPTI_ERROR_INVALID_METRIC_NAME = 17,
158
+ /**
159
+ * The queue is empty.
160
+ */
161
+ CUPTI_ERROR_QUEUE_EMPTY = 18,
162
+ /**
163
+ * Invalid handle (internal?).
164
+ */
165
+ CUPTI_ERROR_INVALID_HANDLE = 19,
166
+ /**
167
+ * Invalid stream.
168
+ */
169
+ CUPTI_ERROR_INVALID_STREAM = 20,
170
+ /**
171
+ * Invalid kind.
172
+ */
173
+ CUPTI_ERROR_INVALID_KIND = 21,
174
+ /**
175
+ * Invalid event value.
176
+ */
177
+ CUPTI_ERROR_INVALID_EVENT_VALUE = 22,
178
+ /**
179
+ * CUPTI is disabled due to conflicts with other enabled profilers
180
+ */
181
+ CUPTI_ERROR_DISABLED = 23,
182
+ /**
183
+ * Invalid module.
184
+ */
185
+ CUPTI_ERROR_INVALID_MODULE = 24,
186
+ /**
187
+ * Invalid metric value.
188
+ */
189
+ CUPTI_ERROR_INVALID_METRIC_VALUE = 25,
190
+ /**
191
+ * The performance monitoring hardware is in use by other client.
192
+ */
193
+ CUPTI_ERROR_HARDWARE_BUSY = 26,
194
+ /**
195
+ * The attempted operation is not supported on the current
196
+ * system or device.
197
+ */
198
+ CUPTI_ERROR_NOT_SUPPORTED = 27,
199
+ /**
200
+ * Unified memory profiling is not supported on the system.
201
+ * Potential reason could be unsupported OS or architecture.
202
+ */
203
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED = 28,
204
+ /**
205
+ * Unified memory profiling is not supported on the device
206
+ */
207
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_DEVICE = 29,
208
+ /**
209
+ * Unified memory profiling is not supported on a multi-GPU
210
+ * configuration without P2P support between any pair of devices
211
+ */
212
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_NON_P2P_DEVICES = 30,
213
+ /**
214
+ * Unified memory profiling is not supported under the
215
+ * Multi-Process Service (MPS) environment. CUDA 7.5 removes this
216
+ * restriction.
217
+ */
218
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_WITH_MPS = 31,
219
+ /**
220
+ * In CUDA 9.0, devices with compute capability 7.0 don't
221
+ * support CDP tracing
222
+ */
223
+ CUPTI_ERROR_CDP_TRACING_NOT_SUPPORTED = 32,
224
+ /**
225
+ * Profiling on virtualized GPU is not supported.
226
+ */
227
+ CUPTI_ERROR_VIRTUALIZED_DEVICE_NOT_SUPPORTED = 33,
228
+ /**
229
+ * Profiling results might be incorrect for CUDA applications
230
+ * compiled with nvcc version older than 9.0 for devices with
231
+ * compute capability 6.0 and 6.1.
232
+ * Profiling session will continue and CUPTI will notify it using this error code.
233
+ * User is advised to recompile the application code with nvcc version 9.0 or later.
234
+ * Ignore this warning if code is already compiled with the recommended nvcc version.
235
+ */
236
+ CUPTI_ERROR_CUDA_COMPILER_NOT_COMPATIBLE = 34,
237
+ /**
238
+ * User doesn't have sufficient privileges which are required to
239
+ * start the profiling session.
240
+ * One possible reason for this may be that the NVIDIA driver or your system
241
+ * administrator may have restricted access to the NVIDIA GPU performance counters.
242
+ * To learn how to resolve this issue and find more information, please visit
243
+ * https://developer.nvidia.com/CUPTI_ERROR_INSUFFICIENT_PRIVILEGES
244
+ */
245
+ CUPTI_ERROR_INSUFFICIENT_PRIVILEGES = 35,
246
+ /**
247
+ * Legacy CUPTI Profiling API i.e. event API from the header cupti_events.h and
248
+ * metric API from the header cupti_metrics.h are not compatible with the
249
+ * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API
250
+ * in the headers nvperf_host.h and nvperf_target.h.
251
+ */
252
+ CUPTI_ERROR_OLD_PROFILER_API_INITIALIZED = 36,
253
+ /**
254
+ * Missing definition of the OpenACC API routine in the linked OpenACC library.
255
+ *
256
+ * One possible reason is that OpenACC library is linked statically in the
257
+ * user application, which might not have the definition of all the OpenACC
258
+ * API routines needed for the OpenACC profiling, as compiler might ignore
259
+ * definitions for the functions not used in the application. This issue
260
+ * can be mitigated by linking the OpenACC library dynamically.
261
+ */
262
+ CUPTI_ERROR_OPENACC_UNDEFINED_ROUTINE = 37,
263
+ /**
264
+ * Legacy CUPTI Profiling API i.e. event API from the header cupti_events.h and
265
+ * metric API from the header cupti_metrics.h are not supported on devices with
266
+ * compute capability 7.5 and higher (i.e. Turing and later GPU architectures).
267
+ * These API will be deprecated in a future CUDA release. These are replaced by
268
+ * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API
269
+ * in the headers nvperf_host.h and nvperf_target.h.
270
+ */
271
+ CUPTI_ERROR_LEGACY_PROFILER_NOT_SUPPORTED = 38,
272
+ /**
273
+ * CUPTI doesn't allow multiple callback subscribers. Only a single subscriber
274
+ * can be registered at a time.
275
+ * Same error code is used when application is launched using NVIDIA tools
276
+ * like nvprof, Visual Profiler, Nsight Systems, Nsight Compute, cuda-gdb and
277
+ * cuda-memcheck.
278
+ */
279
+ CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED = 39,
280
+ /**
281
+ * Profiling on virtualized GPU is not allowed by hypervisor.
282
+ */
283
+ CUPTI_ERROR_VIRTUALIZED_DEVICE_INSUFFICIENT_PRIVILEGES = 40,
284
+ /**
285
+ * Profiling and tracing are not allowed when confidential computing mode
286
+ * is enabled.
287
+ */
288
+ CUPTI_ERROR_CONFIDENTIAL_COMPUTING_NOT_SUPPORTED = 41,
289
+ /**
290
+ * CUPTI does not support NVIDIA Crypto Mining Processors (CMP).
291
+ * For more information, please visit https://developer.nvidia.com/ERR_NVCMPGPU
292
+ */
293
+ CUPTI_ERROR_CMP_DEVICE_NOT_SUPPORTED = 42,
294
+ /**
295
+ * An unknown internal error has occurred.
296
+ */
297
+ CUPTI_ERROR_UNKNOWN = 999,
298
+ CUPTI_ERROR_FORCE_INT = 0x7fffffff
299
+ } CUptiResult;
300
+
301
+ /**
302
+ * \brief Get the descriptive string for a CUptiResult.
303
+ *
304
+ * Return the descriptive string for a CUptiResult in \p *str.
305
+ * \note \b Thread-safety: this function is thread safe.
306
+ *
307
+ * \param result The result to get the string for
308
+ * \param str Returns the string
309
+ *
310
+ * \retval CUPTI_SUCCESS on success
311
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p str is NULL or \p
312
+ * result is not a valid CUptiResult
313
+ */
314
+ CUptiResult CUPTIAPI cuptiGetResultString(CUptiResult result, const char **str);
315
+
316
+ /** @} */ /* END CUPTI_RESULT_API */
317
+
318
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
319
+ #pragma GCC visibility pop
320
+ #endif
321
+
322
+ #if defined(__cplusplus)
323
+ }
324
+ #endif
325
+
326
+ #endif /*_CUPTI_RESULT_H_*/
327
+
328
+
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/cupti_runtime_cbid.h ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ // *************************************************************************
3
+ // Definitions of indices for API functions, unique across entire API
4
+ // *************************************************************************
5
+
6
+ // This file is generated. Any changes you make will be lost during the next clean build.
7
+ // CUDA public interface, for type definitions and cu* function prototypes
8
+
9
+ typedef enum CUpti_runtime_api_trace_cbid_enum {
10
+ CUPTI_RUNTIME_TRACE_CBID_INVALID = 0,
11
+ CUPTI_RUNTIME_TRACE_CBID_cudaDriverGetVersion_v3020 = 1,
12
+ CUPTI_RUNTIME_TRACE_CBID_cudaRuntimeGetVersion_v3020 = 2,
13
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceCount_v3020 = 3,
14
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceProperties_v3020 = 4,
15
+ CUPTI_RUNTIME_TRACE_CBID_cudaChooseDevice_v3020 = 5,
16
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetChannelDesc_v3020 = 6,
17
+ CUPTI_RUNTIME_TRACE_CBID_cudaCreateChannelDesc_v3020 = 7,
18
+ CUPTI_RUNTIME_TRACE_CBID_cudaConfigureCall_v3020 = 8,
19
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetupArgument_v3020 = 9,
20
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetLastError_v3020 = 10,
21
+ CUPTI_RUNTIME_TRACE_CBID_cudaPeekAtLastError_v3020 = 11,
22
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetErrorString_v3020 = 12,
23
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020 = 13,
24
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetCacheConfig_v3020 = 14,
25
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncGetAttributes_v3020 = 15,
26
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetDevice_v3020 = 16,
27
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDevice_v3020 = 17,
28
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetValidDevices_v3020 = 18,
29
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetDeviceFlags_v3020 = 19,
30
+ CUPTI_RUNTIME_TRACE_CBID_cudaMalloc_v3020 = 20,
31
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocPitch_v3020 = 21,
32
+ CUPTI_RUNTIME_TRACE_CBID_cudaFree_v3020 = 22,
33
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocArray_v3020 = 23,
34
+ CUPTI_RUNTIME_TRACE_CBID_cudaFreeArray_v3020 = 24,
35
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocHost_v3020 = 25,
36
+ CUPTI_RUNTIME_TRACE_CBID_cudaFreeHost_v3020 = 26,
37
+ CUPTI_RUNTIME_TRACE_CBID_cudaHostAlloc_v3020 = 27,
38
+ CUPTI_RUNTIME_TRACE_CBID_cudaHostGetDevicePointer_v3020 = 28,
39
+ CUPTI_RUNTIME_TRACE_CBID_cudaHostGetFlags_v3020 = 29,
40
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemGetInfo_v3020 = 30,
41
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020 = 31,
42
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2D_v3020 = 32,
43
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArray_v3020 = 33,
44
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArray_v3020 = 34,
45
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArray_v3020 = 35,
46
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArray_v3020 = 36,
47
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyArrayToArray_v3020 = 37,
48
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DArrayToArray_v3020 = 38,
49
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbol_v3020 = 39,
50
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbol_v3020 = 40,
51
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyAsync_v3020 = 41,
52
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArrayAsync_v3020 = 42,
53
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArrayAsync_v3020 = 43,
54
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DAsync_v3020 = 44,
55
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArrayAsync_v3020 = 45,
56
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArrayAsync_v3020 = 46,
57
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbolAsync_v3020 = 47,
58
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbolAsync_v3020 = 48,
59
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset_v3020 = 49,
60
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset2D_v3020 = 50,
61
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemsetAsync_v3020 = 51,
62
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset2DAsync_v3020 = 52,
63
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetSymbolAddress_v3020 = 53,
64
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetSymbolSize_v3020 = 54,
65
+ CUPTI_RUNTIME_TRACE_CBID_cudaBindTexture_v3020 = 55,
66
+ CUPTI_RUNTIME_TRACE_CBID_cudaBindTexture2D_v3020 = 56,
67
+ CUPTI_RUNTIME_TRACE_CBID_cudaBindTextureToArray_v3020 = 57,
68
+ CUPTI_RUNTIME_TRACE_CBID_cudaUnbindTexture_v3020 = 58,
69
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureAlignmentOffset_v3020 = 59,
70
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureReference_v3020 = 60,
71
+ CUPTI_RUNTIME_TRACE_CBID_cudaBindSurfaceToArray_v3020 = 61,
72
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetSurfaceReference_v3020 = 62,
73
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLSetGLDevice_v3020 = 63,
74
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLRegisterBufferObject_v3020 = 64,
75
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLMapBufferObject_v3020 = 65,
76
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLUnmapBufferObject_v3020 = 66,
77
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLUnregisterBufferObject_v3020 = 67,
78
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLSetBufferObjectMapFlags_v3020 = 68,
79
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLMapBufferObjectAsync_v3020 = 69,
80
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLUnmapBufferObjectAsync_v3020 = 70,
81
+ CUPTI_RUNTIME_TRACE_CBID_cudaWGLGetDevice_v3020 = 71,
82
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsGLRegisterImage_v3020 = 72,
83
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsGLRegisterBuffer_v3020 = 73,
84
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsUnregisterResource_v3020 = 74,
85
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceSetMapFlags_v3020 = 75,
86
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsMapResources_v3020 = 76,
87
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsUnmapResources_v3020 = 77,
88
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedPointer_v3020 = 78,
89
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsSubResourceGetMappedArray_v3020 = 79,
90
+ CUPTI_RUNTIME_TRACE_CBID_cudaVDPAUGetDevice_v3020 = 80,
91
+ CUPTI_RUNTIME_TRACE_CBID_cudaVDPAUSetVDPAUDevice_v3020 = 81,
92
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsVDPAURegisterVideoSurface_v3020 = 82,
93
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsVDPAURegisterOutputSurface_v3020 = 83,
94
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDevice_v3020 = 84,
95
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDevices_v3020 = 85,
96
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D11SetDirect3DDevice_v3020 = 86,
97
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D11RegisterResource_v3020 = 87,
98
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDevice_v3020 = 88,
99
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDevices_v3020 = 89,
100
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10SetDirect3DDevice_v3020 = 90,
101
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D10RegisterResource_v3020 = 91,
102
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10RegisterResource_v3020 = 92,
103
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10UnregisterResource_v3020 = 93,
104
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10MapResources_v3020 = 94,
105
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10UnmapResources_v3020 = 95,
106
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceSetMapFlags_v3020 = 96,
107
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetSurfaceDimensions_v3020 = 97,
108
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedArray_v3020 = 98,
109
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedPointer_v3020 = 99,
110
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedSize_v3020 = 100,
111
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedPitch_v3020 = 101,
112
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDevice_v3020 = 102,
113
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDevices_v3020 = 103,
114
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9SetDirect3DDevice_v3020 = 104,
115
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDirect3DDevice_v3020 = 105,
116
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D9RegisterResource_v3020 = 106,
117
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9RegisterResource_v3020 = 107,
118
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnregisterResource_v3020 = 108,
119
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9MapResources_v3020 = 109,
120
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnmapResources_v3020 = 110,
121
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceSetMapFlags_v3020 = 111,
122
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetSurfaceDimensions_v3020 = 112,
123
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedArray_v3020 = 113,
124
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedPointer_v3020 = 114,
125
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedSize_v3020 = 115,
126
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedPitch_v3020 = 116,
127
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9Begin_v3020 = 117,
128
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9End_v3020 = 118,
129
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9RegisterVertexBuffer_v3020 = 119,
130
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnregisterVertexBuffer_v3020 = 120,
131
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9MapVertexBuffer_v3020 = 121,
132
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnmapVertexBuffer_v3020 = 122,
133
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadExit_v3020 = 123,
134
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetDoubleForDevice_v3020 = 124,
135
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetDoubleForHost_v3020 = 125,
136
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadSynchronize_v3020 = 126,
137
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadGetLimit_v3020 = 127,
138
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadSetLimit_v3020 = 128,
139
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreate_v3020 = 129,
140
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamDestroy_v3020 = 130,
141
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSynchronize_v3020 = 131,
142
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamQuery_v3020 = 132,
143
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventCreate_v3020 = 133,
144
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventCreateWithFlags_v3020 = 134,
145
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventRecord_v3020 = 135,
146
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventDestroy_v3020 = 136,
147
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventSynchronize_v3020 = 137,
148
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventQuery_v3020 = 138,
149
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventElapsedTime_v3020 = 139,
150
+ CUPTI_RUNTIME_TRACE_CBID_cudaMalloc3D_v3020 = 140,
151
+ CUPTI_RUNTIME_TRACE_CBID_cudaMalloc3DArray_v3020 = 141,
152
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset3D_v3020 = 142,
153
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset3DAsync_v3020 = 143,
154
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3D_v3020 = 144,
155
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DAsync_v3020 = 145,
156
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadSetCacheConfig_v3020 = 146,
157
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamWaitEvent_v3020 = 147,
158
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDirect3DDevice_v3020 = 148,
159
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDirect3DDevice_v3020 = 149,
160
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadGetCacheConfig_v3020 = 150,
161
+ CUPTI_RUNTIME_TRACE_CBID_cudaPointerGetAttributes_v4000 = 151,
162
+ CUPTI_RUNTIME_TRACE_CBID_cudaHostRegister_v4000 = 152,
163
+ CUPTI_RUNTIME_TRACE_CBID_cudaHostUnregister_v4000 = 153,
164
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceCanAccessPeer_v4000 = 154,
165
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceEnablePeerAccess_v4000 = 155,
166
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceDisablePeerAccess_v4000 = 156,
167
+ CUPTI_RUNTIME_TRACE_CBID_cudaPeerRegister_v4000 = 157,
168
+ CUPTI_RUNTIME_TRACE_CBID_cudaPeerUnregister_v4000 = 158,
169
+ CUPTI_RUNTIME_TRACE_CBID_cudaPeerGetDevicePointer_v4000 = 159,
170
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyPeer_v4000 = 160,
171
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyPeerAsync_v4000 = 161,
172
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeer_v4000 = 162,
173
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeerAsync_v4000 = 163,
174
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceReset_v3020 = 164,
175
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSynchronize_v3020 = 165,
176
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetLimit_v3020 = 166,
177
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetLimit_v3020 = 167,
178
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetCacheConfig_v3020 = 168,
179
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetCacheConfig_v3020 = 169,
180
+ CUPTI_RUNTIME_TRACE_CBID_cudaProfilerInitialize_v4000 = 170,
181
+ CUPTI_RUNTIME_TRACE_CBID_cudaProfilerStart_v4000 = 171,
182
+ CUPTI_RUNTIME_TRACE_CBID_cudaProfilerStop_v4000 = 172,
183
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetByPCIBusId_v4010 = 173,
184
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetPCIBusId_v4010 = 174,
185
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLGetDevices_v4010 = 175,
186
+ CUPTI_RUNTIME_TRACE_CBID_cudaIpcGetEventHandle_v4010 = 176,
187
+ CUPTI_RUNTIME_TRACE_CBID_cudaIpcOpenEventHandle_v4010 = 177,
188
+ CUPTI_RUNTIME_TRACE_CBID_cudaIpcGetMemHandle_v4010 = 178,
189
+ CUPTI_RUNTIME_TRACE_CBID_cudaIpcOpenMemHandle_v4010 = 179,
190
+ CUPTI_RUNTIME_TRACE_CBID_cudaIpcCloseMemHandle_v4010 = 180,
191
+ CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetInfo_v4010 = 181,
192
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetSharedMemConfig_v4020 = 182,
193
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetSharedMemConfig_v4020 = 183,
194
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetSharedMemConfig_v4020 = 184,
195
+ CUPTI_RUNTIME_TRACE_CBID_cudaCreateTextureObject_v5000 = 185,
196
+ CUPTI_RUNTIME_TRACE_CBID_cudaDestroyTextureObject_v5000 = 186,
197
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectResourceDesc_v5000 = 187,
198
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectTextureDesc_v5000 = 188,
199
+ CUPTI_RUNTIME_TRACE_CBID_cudaCreateSurfaceObject_v5000 = 189,
200
+ CUPTI_RUNTIME_TRACE_CBID_cudaDestroySurfaceObject_v5000 = 190,
201
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetSurfaceObjectResourceDesc_v5000 = 191,
202
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocMipmappedArray_v5000 = 192,
203
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetMipmappedArrayLevel_v5000 = 193,
204
+ CUPTI_RUNTIME_TRACE_CBID_cudaFreeMipmappedArray_v5000 = 194,
205
+ CUPTI_RUNTIME_TRACE_CBID_cudaBindTextureToMipmappedArray_v5000 = 195,
206
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedMipmappedArray_v5000 = 196,
207
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamAddCallback_v5000 = 197,
208
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreateWithFlags_v5000 = 198,
209
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectResourceViewDesc_v5000 = 199,
210
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetAttribute_v5000 = 200,
211
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamDestroy_v5050 = 201,
212
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreateWithPriority_v5050 = 202,
213
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetPriority_v5050 = 203,
214
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetFlags_v5050 = 204,
215
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetStreamPriorityRange_v5050 = 205,
216
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocManaged_v6000 = 206,
217
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000 = 207,
218
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamAttachMemAsync_v6000 = 208,
219
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetErrorName_v6050 = 209,
220
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050 = 210,
221
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000 = 211,
222
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceFlags_v7000 = 212,
223
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_ptsz_v7000 = 213,
224
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_ptsz_v7000 = 214,
225
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_ptds_v7000 = 215,
226
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2D_ptds_v7000 = 216,
227
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArray_ptds_v7000 = 217,
228
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArray_ptds_v7000 = 218,
229
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArray_ptds_v7000 = 219,
230
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArray_ptds_v7000 = 220,
231
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyArrayToArray_ptds_v7000 = 221,
232
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DArrayToArray_ptds_v7000 = 222,
233
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbol_ptds_v7000 = 223,
234
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbol_ptds_v7000 = 224,
235
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyAsync_ptsz_v7000 = 225,
236
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArrayAsync_ptsz_v7000 = 226,
237
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArrayAsync_ptsz_v7000 = 227,
238
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DAsync_ptsz_v7000 = 228,
239
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArrayAsync_ptsz_v7000 = 229,
240
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArrayAsync_ptsz_v7000 = 230,
241
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbolAsync_ptsz_v7000 = 231,
242
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbolAsync_ptsz_v7000 = 232,
243
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset_ptds_v7000 = 233,
244
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset2D_ptds_v7000 = 234,
245
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemsetAsync_ptsz_v7000 = 235,
246
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset2DAsync_ptsz_v7000 = 236,
247
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetPriority_ptsz_v7000 = 237,
248
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetFlags_ptsz_v7000 = 238,
249
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSynchronize_ptsz_v7000 = 239,
250
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamQuery_ptsz_v7000 = 240,
251
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamAttachMemAsync_ptsz_v7000 = 241,
252
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventRecord_ptsz_v7000 = 242,
253
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset3D_ptds_v7000 = 243,
254
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset3DAsync_ptsz_v7000 = 244,
255
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3D_ptds_v7000 = 245,
256
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DAsync_ptsz_v7000 = 246,
257
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamWaitEvent_ptsz_v7000 = 247,
258
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamAddCallback_ptsz_v7000 = 248,
259
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeer_ptds_v7000 = 249,
260
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeerAsync_ptsz_v7000 = 250,
261
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000 = 251,
262
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_v8000 = 252,
263
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_ptsz_v8000 = 253,
264
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemAdvise_v8000 = 254,
265
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetP2PAttribute_v8000 = 255,
266
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsEGLRegisterImage_v7000 = 256,
267
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerConnect_v7000 = 257,
268
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerDisconnect_v7000 = 258,
269
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerAcquireFrame_v7000 = 259,
270
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerReleaseFrame_v7000 = 260,
271
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerConnect_v7000 = 261,
272
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerDisconnect_v7000 = 262,
273
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerPresentFrame_v7000 = 263,
274
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerReturnFrame_v7000 = 264,
275
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedEglFrame_v7000 = 265,
276
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemRangeGetAttribute_v8000 = 266,
277
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemRangeGetAttributes_v8000 = 267,
278
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerConnectWithFlags_v7000 = 268,
279
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernel_v9000 = 269,
280
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernel_ptsz_v9000 = 270,
281
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventCreateFromEGLSync_v9000 = 271,
282
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernelMultiDevice_v9000 = 272,
283
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetAttribute_v9000 = 273,
284
+ CUPTI_RUNTIME_TRACE_CBID_cudaImportExternalMemory_v10000 = 274,
285
+ CUPTI_RUNTIME_TRACE_CBID_cudaExternalMemoryGetMappedBuffer_v10000 = 275,
286
+ CUPTI_RUNTIME_TRACE_CBID_cudaExternalMemoryGetMappedMipmappedArray_v10000 = 276,
287
+ CUPTI_RUNTIME_TRACE_CBID_cudaDestroyExternalMemory_v10000 = 277,
288
+ CUPTI_RUNTIME_TRACE_CBID_cudaImportExternalSemaphore_v10000 = 278,
289
+ CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v10000 = 279,
290
+ CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_ptsz_v10000 = 280,
291
+ CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v10000 = 281,
292
+ CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_ptsz_v10000 = 282,
293
+ CUPTI_RUNTIME_TRACE_CBID_cudaDestroyExternalSemaphore_v10000 = 283,
294
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchHostFunc_v10000 = 284,
295
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchHostFunc_ptsz_v10000 = 285,
296
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphCreate_v10000 = 286,
297
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeGetParams_v10000 = 287,
298
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeSetParams_v10000 = 288,
299
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddKernelNode_v10000 = 289,
300
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNode_v10000 = 290,
301
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeGetParams_v10000 = 291,
302
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParams_v10000 = 292,
303
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemsetNode_v10000 = 293,
304
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemsetNodeGetParams_v10000 = 294,
305
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemsetNodeSetParams_v10000 = 295,
306
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddHostNode_v10000 = 296,
307
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphHostNodeGetParams_v10000 = 297,
308
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddChildGraphNode_v10000 = 298,
309
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphChildGraphNodeGetGraph_v10000 = 299,
310
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEmptyNode_v10000 = 300,
311
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphClone_v10000 = 301,
312
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeFindInClone_v10000 = 302,
313
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetType_v10000 = 303,
314
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetRootNodes_v10000 = 304,
315
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependencies_v10000 = 305,
316
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependentNodes_v10000 = 306,
317
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddDependencies_v10000 = 307,
318
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphRemoveDependencies_v10000 = 308,
319
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphDestroyNode_v10000 = 309,
320
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiate_v10000 = 310,
321
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphLaunch_v10000 = 311,
322
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphLaunch_ptsz_v10000 = 312,
323
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecDestroy_v10000 = 313,
324
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphDestroy_v10000 = 314,
325
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCapture_v10000 = 315,
326
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCapture_ptsz_v10000 = 316,
327
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamIsCapturing_v10000 = 317,
328
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamIsCapturing_ptsz_v10000 = 318,
329
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamEndCapture_v10000 = 319,
330
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamEndCapture_ptsz_v10000 = 320,
331
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphHostNodeSetParams_v10000 = 321,
332
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetNodes_v10000 = 322,
333
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetEdges_v10000 = 323,
334
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v10010 = 324,
335
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_ptsz_v10010 = 325,
336
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecKernelNodeSetParams_v10010 = 326,
337
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadExchangeStreamCaptureMode_v10010 = 327,
338
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetNvSciSyncAttributes_v10020 = 328,
339
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyAvailableDynamicSMemPerBlock_v10200 = 329,
340
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetFlags_v10200 = 330,
341
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetFlags_ptsz_v10200 = 331,
342
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParams_v10020 = 332,
343
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemsetNodeSetParams_v10020 = 333,
344
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecHostNodeSetParams_v10020 = 334,
345
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecUpdate_v10020 = 335,
346
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetFuncBySymbol_v11000 = 336,
347
+ CUPTI_RUNTIME_TRACE_CBID_cudaCtxResetPersistingL2Cache_v11000 = 337,
348
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeCopyAttributes_v11000 = 338,
349
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeGetAttribute_v11000 = 339,
350
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeSetAttribute_v11000 = 340,
351
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamCopyAttributes_v11000 = 341,
352
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamCopyAttributes_ptsz_v11000 = 342,
353
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetAttribute_v11000 = 343,
354
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetAttribute_ptsz_v11000 = 344,
355
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetAttribute_v11000 = 345,
356
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetAttribute_ptsz_v11000 = 346,
357
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetTexture1DLinearMaxWidth_v11010 = 347,
358
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphUpload_v10000 = 348,
359
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphUpload_ptsz_v10000 = 349,
360
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNodeToSymbol_v11010 = 350,
361
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNodeFromSymbol_v11010 = 351,
362
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNode1D_v11010 = 352,
363
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParamsToSymbol_v11010 = 353,
364
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParamsFromSymbol_v11010 = 354,
365
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParams1D_v11010 = 355,
366
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010 = 356,
367
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010 = 357,
368
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParams1D_v11010 = 358,
369
+ CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetSparseProperties_v11010 = 359,
370
+ CUPTI_RUNTIME_TRACE_CBID_cudaMipmappedArrayGetSparseProperties_v11010 = 360,
371
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecChildGraphNodeSetParams_v11010 = 361,
372
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEventRecordNode_v11010 = 362,
373
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventRecordNodeGetEvent_v11010 = 363,
374
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventRecordNodeSetEvent_v11010 = 364,
375
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEventWaitNode_v11010 = 365,
376
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventWaitNodeGetEvent_v11010 = 366,
377
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventWaitNodeSetEvent_v11010 = 367,
378
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecEventRecordNodeSetEvent_v11010 = 368,
379
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecEventWaitNodeSetEvent_v11010 = 369,
380
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventRecordWithFlags_v11010 = 370,
381
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventRecordWithFlags_ptsz_v11010 = 371,
382
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetDefaultMemPool_v11020 = 372,
383
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocAsync_v11020 = 373,
384
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocAsync_ptsz_v11020 = 374,
385
+ CUPTI_RUNTIME_TRACE_CBID_cudaFreeAsync_v11020 = 375,
386
+ CUPTI_RUNTIME_TRACE_CBID_cudaFreeAsync_ptsz_v11020 = 376,
387
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolTrimTo_v11020 = 377,
388
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolSetAttribute_v11020 = 378,
389
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolGetAttribute_v11020 = 379,
390
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolSetAccess_v11020 = 380,
391
+ CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetPlane_v11020 = 381,
392
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolGetAccess_v11020 = 382,
393
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolCreate_v11020 = 383,
394
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolDestroy_v11020 = 384,
395
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetMemPool_v11020 = 385,
396
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetMemPool_v11020 = 386,
397
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolExportToShareableHandle_v11020 = 387,
398
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolImportFromShareableHandle_v11020 = 388,
399
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolExportPointer_v11020 = 389,
400
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolImportPointer_v11020 = 390,
401
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocFromPoolAsync_v11020 = 391,
402
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocFromPoolAsync_ptsz_v11020 = 392,
403
+ CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v2_v11020 = 393,
404
+ CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020 = 394,
405
+ CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v2_v11020 = 395,
406
+ CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020 = 396,
407
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddExternalSemaphoresSignalNode_v11020 = 397,
408
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresSignalNodeGetParams_v11020 = 398,
409
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresSignalNodeSetParams_v11020 = 399,
410
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddExternalSemaphoresWaitNode_v11020 = 400,
411
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresWaitNodeGetParams_v11020 = 401,
412
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresWaitNodeSetParams_v11020 = 402,
413
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020 = 403,
414
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020 = 404,
415
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceFlushGPUDirectRDMAWrites_v11030 = 405,
416
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPoint_v11030 = 406,
417
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPoint_ptsz_v11030 = 407,
418
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphDebugDotPrint_v11030 = 408,
419
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v2_v11030 = 409,
420
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v2_ptsz_v11030 = 410,
421
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_v11030 = 411,
422
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_ptsz_v11030 = 412,
423
+ CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectCreate_v11030 = 413,
424
+ CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectRetain_v11030 = 414,
425
+ CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectRelease_v11030 = 415,
426
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphRetainUserObject_v11030 = 416,
427
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphReleaseUserObject_v11030 = 417,
428
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithFlags_v11040 = 418,
429
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemAllocNode_v11040 = 419,
430
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemAllocNodeGetParams_v11040 = 420,
431
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemFreeNode_v11040 = 421,
432
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemFreeNodeGetParams_v11040 = 422,
433
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGraphMemTrim_v11040 = 423,
434
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetGraphMemAttribute_v11040 = 424,
435
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetGraphMemAttribute_v11040 = 425,
436
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeSetEnabled_v11060 = 426,
437
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetEnabled_v11060 = 427,
438
+ CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetMemoryRequirements_v11060 = 428,
439
+ CUPTI_RUNTIME_TRACE_CBID_cudaMipmappedArrayGetMemoryRequirements_v11060 = 429,
440
+ CUPTI_RUNTIME_TRACE_CBID_SIZE = 430,
441
+ CUPTI_RUNTIME_TRACE_CBID_FORCE_INT = 0x7fffffff
442
+ } CUpti_runtime_api_trace_cbid;
443
+
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/cupti_version.h ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_VERSION_H_)
51
+ #define _CUPTI_VERSION_H_
52
+
53
+ #include <cuda_stdint.h>
54
+ #include <cupti_result.h>
55
+
56
+ #ifndef CUPTIAPI
57
+ #ifdef _WIN32
58
+ #define CUPTIAPI __stdcall
59
+ #else
60
+ #define CUPTIAPI
61
+ #endif
62
+ #endif
63
+
64
+ #if defined(__cplusplus)
65
+ extern "C" {
66
+ #endif
67
+
68
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
69
+ #pragma GCC visibility push(default)
70
+ #endif
71
+
72
+ /**
73
+ * \defgroup CUPTI_VERSION_API CUPTI Version
74
+ * Function and macro to determine the CUPTI version.
75
+ * @{
76
+ */
77
+
78
+ /**
79
+ * \brief The API version for this implementation of CUPTI.
80
+ *
81
+ * The API version for this implementation of CUPTI. This define along
82
+ * with \ref cuptiGetVersion can be used to dynamically detect if the
83
+ * version of CUPTI compiled against matches the version of the loaded
84
+ * CUPTI library.
85
+ *
86
+ * v1 : CUDAToolsSDK 4.0
87
+ * v2 : CUDAToolsSDK 4.1
88
+ * v3 : CUDA Toolkit 5.0
89
+ * v4 : CUDA Toolkit 5.5
90
+ * v5 : CUDA Toolkit 6.0
91
+ * v6 : CUDA Toolkit 6.5
92
+ * v7 : CUDA Toolkit 6.5(with sm_52 support)
93
+ * v8 : CUDA Toolkit 7.0
94
+ * v9 : CUDA Toolkit 8.0
95
+ * v10 : CUDA Toolkit 9.0
96
+ * v11 : CUDA Toolkit 9.1
97
+ * v12 : CUDA Toolkit 10.0, 10.1 and 10.2
98
+ * v13 : CUDA Toolkit 11.0
99
+ * v14 : CUDA Toolkit 11.1
100
+ * v15 : CUDA Toolkit 11.2, 11.3 and 11.4
101
+ * v16 : CUDA Toolkit 11.5
102
+ * v17 : CUDA Toolkit 11.6
103
+ */
104
+ #define CUPTI_API_VERSION 17
105
+
106
+ /**
107
+ * \brief Get the CUPTI API version.
108
+ *
109
+ * Return the API version in \p *version.
110
+ *
111
+ * \param version Returns the version
112
+ *
113
+ * \retval CUPTI_SUCCESS on success
114
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p version is NULL
115
+ * \sa CUPTI_API_VERSION
116
+ */
117
+ CUptiResult CUPTIAPI cuptiGetVersion(uint32_t *version);
118
+
119
+ /** @} */ /* END CUPTI_VERSION_API */
120
+
121
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
122
+ #pragma GCC visibility pop
123
+ #endif
124
+
125
+ #if defined(__cplusplus)
126
+ }
127
+ #endif
128
+
129
+ #endif /*_CUPTI_VERSION_H_*/
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/generated_nvtx_meta.h ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2013-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
51
+ #pragma GCC visibility push(default)
52
+ #endif
53
+
54
+ // *************************************************************************
55
+ // Definitions of structs to hold parameters for each function
56
+ // *************************************************************************
57
+
58
+ typedef struct nvtxMarkEx_params_st {
59
+ const nvtxEventAttributes_t* eventAttrib;
60
+ } nvtxMarkEx_params;
61
+
62
+ typedef struct nvtxMarkA_params_st {
63
+ const char* message;
64
+ } nvtxMarkA_params;
65
+
66
+ typedef struct nvtxMarkW_params_st {
67
+ const wchar_t* message;
68
+ } nvtxMarkW_params;
69
+
70
+ typedef struct nvtxRangeStartEx_params_st {
71
+ const nvtxEventAttributes_t* eventAttrib;
72
+ } nvtxRangeStartEx_params;
73
+
74
+ typedef struct nvtxRangeStartA_params_st {
75
+ const char* message;
76
+ } nvtxRangeStartA_params;
77
+
78
+ typedef struct nvtxRangeStartW_params_st {
79
+ const wchar_t* message;
80
+ } nvtxRangeStartW_params;
81
+
82
+ typedef struct nvtxRangeEnd_params_st {
83
+ nvtxRangeId_t id;
84
+ } nvtxRangeEnd_params;
85
+
86
+ typedef struct nvtxRangePushEx_params_st {
87
+ const nvtxEventAttributes_t* eventAttrib;
88
+ } nvtxRangePushEx_params;
89
+
90
+ typedef struct nvtxRangePushA_params_st {
91
+ const char* message;
92
+ } nvtxRangePushA_params;
93
+
94
+ typedef struct nvtxRangePushW_params_st {
95
+ const wchar_t* message;
96
+ } nvtxRangePushW_params;
97
+
98
+ typedef struct nvtxRangePop_params_st {
99
+ /* WAR: Windows compiler doesn't allow empty structs */
100
+ /* This field shouldn't be used */
101
+ void *dummy;
102
+ } nvtxRangePop_params;
103
+
104
+ typedef struct nvtxNameCategoryA_params_st {
105
+ uint32_t category;
106
+ const char* name;
107
+ } nvtxNameCategoryA_params;
108
+
109
+ typedef struct nvtxNameCategoryW_params_st {
110
+ uint32_t category;
111
+ const wchar_t* name;
112
+ } nvtxNameCategoryW_params;
113
+
114
+ typedef struct nvtxNameOsThreadA_params_st {
115
+ uint32_t threadId;
116
+ const char* name;
117
+ } nvtxNameOsThreadA_params;
118
+
119
+ typedef struct nvtxNameOsThreadW_params_st {
120
+ uint32_t threadId;
121
+ const wchar_t* name;
122
+ } nvtxNameOsThreadW_params;
123
+
124
+ typedef struct nvtxNameCuDeviceA_params_st {
125
+ CUdevice device;
126
+ const char* name;
127
+ } nvtxNameCuDeviceA_params;
128
+
129
+ typedef struct nvtxNameCuDeviceW_params_st {
130
+ CUdevice device;
131
+ const wchar_t* name;
132
+ } nvtxNameCuDeviceW_params;
133
+
134
+ typedef struct nvtxNameCuContextA_params_st {
135
+ CUcontext context;
136
+ const char* name;
137
+ } nvtxNameCuContextA_params;
138
+
139
+ typedef struct nvtxNameCuContextW_params_st {
140
+ CUcontext context;
141
+ const wchar_t* name;
142
+ } nvtxNameCuContextW_params;
143
+
144
+ typedef struct nvtxNameCuStreamA_params_st {
145
+ CUstream stream;
146
+ const char* name;
147
+ } nvtxNameCuStreamA_params;
148
+
149
+ typedef struct nvtxNameCuStreamW_params_st {
150
+ CUstream stream;
151
+ const wchar_t* name;
152
+ } nvtxNameCuStreamW_params;
153
+
154
+ typedef struct nvtxNameCuEventA_params_st {
155
+ CUevent event;
156
+ const char* name;
157
+ } nvtxNameCuEventA_params;
158
+
159
+ typedef struct nvtxNameCuEventW_params_st {
160
+ CUevent event;
161
+ const wchar_t* name;
162
+ } nvtxNameCuEventW_params;
163
+
164
+ typedef struct nvtxNameCudaDeviceA_params_st {
165
+ int device;
166
+ const char* name;
167
+ } nvtxNameCudaDeviceA_params;
168
+
169
+ typedef struct nvtxNameCudaDeviceW_params_st {
170
+ int device;
171
+ const wchar_t* name;
172
+ } nvtxNameCudaDeviceW_params;
173
+
174
+ typedef struct nvtxNameCudaStreamA_params_st {
175
+ cudaStream_t stream;
176
+ const char* name;
177
+ } nvtxNameCudaStreamA_params;
178
+
179
+ typedef struct nvtxNameCudaStreamW_params_st {
180
+ cudaStream_t stream;
181
+ const wchar_t* name;
182
+ } nvtxNameCudaStreamW_params;
183
+
184
+ typedef struct nvtxNameCudaEventA_params_st {
185
+ cudaEvent_t event;
186
+ const char* name;
187
+ } nvtxNameCudaEventA_params;
188
+
189
+ typedef struct nvtxNameCudaEventW_params_st {
190
+ cudaEvent_t event;
191
+ const wchar_t* name;
192
+ } nvtxNameCudaEventW_params;
193
+
194
+ typedef struct nvtxDomainCreateA_params_st {
195
+ const char* name;
196
+ } nvtxDomainCreateA_params;
197
+
198
+ typedef struct nvtxDomainDestroy_params_st {
199
+ nvtxDomainHandle_t domain;
200
+ } nvtxDomainDestroy_params;
201
+
202
+ typedef struct nvtxDomainMarkEx_params_st {
203
+ nvtxDomainHandle_t domain;
204
+ nvtxMarkEx_params core;
205
+ } nvtxDomainMarkEx_params;
206
+
207
+ typedef struct nvtxDomainRangeStartEx_params_st {
208
+ nvtxDomainHandle_t domain;
209
+ nvtxRangeStartEx_params core;
210
+ } nvtxDomainRangeStartEx_params;
211
+
212
+ typedef struct nvtxDomainRangeEnd_params_st {
213
+ nvtxDomainHandle_t domain;
214
+ nvtxRangeEnd_params core;
215
+ } nvtxDomainRangeEnd_params;
216
+
217
+ typedef struct nvtxDomainRangePushEx_params_st {
218
+ nvtxDomainHandle_t domain;
219
+ nvtxRangePushEx_params core;
220
+ } nvtxDomainRangePushEx_params;
221
+
222
+ typedef struct nvtxDomainRangePop_params_st {
223
+ nvtxDomainHandle_t domain;
224
+ } nvtxDomainRangePop_params;
225
+
226
+ typedef struct nvtxSyncUserCreate_params_st {
227
+ nvtxDomainHandle_t domain;
228
+ const nvtxSyncUserAttributes_t* attribs;
229
+ } nvtxSyncUserCreate_params;
230
+
231
+ typedef struct nvtxSyncUserCommon_params_st {
232
+ nvtxSyncUser_t handle;
233
+ } nvtxSyncUserCommon_params;
234
+
235
+ typedef struct nvtxDomainRegisterStringA_params_st {
236
+ nvtxDomainHandle_t domain;
237
+ const char* string;
238
+ } nvtxDomainRegisterStringA_params;
239
+
240
+ typedef struct nvtxDomainRegisterStringW_params_st {
241
+ nvtxDomainHandle_t domain;
242
+ const char* string;
243
+ } nvtxDomainRegisterStringW_params;
244
+
245
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
246
+ #pragma GCC visibility pop
247
+ #endif
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/extras/CUPTI/include/nvperf_common.h ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NVPERF_COMMON_H
2
+ #define NVPERF_COMMON_H
3
+
4
+ /*
5
+ * Copyright 2014-2022 NVIDIA Corporation. All rights reserved.
6
+ *
7
+ * NOTICE TO USER:
8
+ *
9
+ * This source code is subject to NVIDIA ownership rights under U.S. and
10
+ * international Copyright laws.
11
+ *
12
+ * This software and the information contained herein is PROPRIETARY and
13
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
14
+ * of a form of NVIDIA software license agreement.
15
+ *
16
+ * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
17
+ * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
18
+ * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
19
+ * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
20
+ * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
21
+ * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
22
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
23
+ * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
24
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
25
+ * OR PERFORMANCE OF THIS SOURCE CODE.
26
+ *
27
+ * U.S. Government End Users. This source code is a "commercial item" as
28
+ * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
29
+ * "commercial computer software" and "commercial computer software
30
+ * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
31
+ * and is provided to the U.S. Government only as a commercial end item.
32
+ * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
33
+ * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
34
+ * source code with only those rights set forth herein.
35
+ *
36
+ * Any use of this source code in individual and commercial software must
37
+ * include, in the user documentation and internal comments to the code,
38
+ * the above Disclaimer and U.S. Government End Users Notice.
39
+ */
40
+
41
+ #include <stddef.h>
42
+ #include <stdint.h>
43
+
44
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
45
+ #pragma GCC visibility push(default)
46
+ #if !defined(NVPW_LOCAL)
47
+ #define NVPW_LOCAL __attribute__ ((visibility ("hidden")))
48
+ #endif
49
+ #else
50
+ #if !defined(NVPW_LOCAL)
51
+ #define NVPW_LOCAL
52
+ #endif
53
+ #endif
54
+
55
+ #ifdef __cplusplus
56
+ extern "C" {
57
+ #endif
58
+
59
+ /**
60
+ * @file nvperf_common.h
61
+ */
62
+
63
+ #ifndef NVPERF_NVPA_STATUS_DEFINED
64
+ #define NVPERF_NVPA_STATUS_DEFINED
65
+
66
+ /// Error codes.
67
+ typedef enum NVPA_Status
68
+ {
69
+ /// Success
70
+ NVPA_STATUS_SUCCESS = 0,
71
+ /// Generic error.
72
+ NVPA_STATUS_ERROR = 1,
73
+ /// Internal error. Please file a bug!
74
+ NVPA_STATUS_INTERNAL_ERROR = 2,
75
+ /// NVPW_InitializeTarget() has not been called yet.
76
+ NVPA_STATUS_NOT_INITIALIZED = 3,
77
+ /// The NvPerf DLL/DSO could not be loaded during NVPW_Initialize*.
78
+ NVPA_STATUS_NOT_LOADED = 4,
79
+ /// The function was not found in this version of the NvPerf DLL/DSO.
80
+ NVPA_STATUS_FUNCTION_NOT_FOUND = 5,
81
+ /// The request was intentionally not supported.
82
+ NVPA_STATUS_NOT_SUPPORTED = 6,
83
+ /// The request was not implemented by this version.
84
+ NVPA_STATUS_NOT_IMPLEMENTED = 7,
85
+ /// Invalid argument.
86
+ NVPA_STATUS_INVALID_ARGUMENT = 8,
87
+ /// UNUSED
88
+ NVPA_STATUS_INVALID_METRIC_ID = 9,
89
+ /// No driver has been loaded via NVPW_*_LoadDriver().
90
+ NVPA_STATUS_DRIVER_NOT_LOADED = 10,
91
+ /// Failed memory allocation.
92
+ NVPA_STATUS_OUT_OF_MEMORY = 11,
93
+ /// UNUSED
94
+ NVPA_STATUS_INVALID_THREAD_STATE = 12,
95
+ /// UNUSED
96
+ NVPA_STATUS_FAILED_CONTEXT_ALLOC = 13,
97
+ /// The specified GPU is not supported.
98
+ NVPA_STATUS_UNSUPPORTED_GPU = 14,
99
+ /// The installed NVIDIA driver is too old.
100
+ NVPA_STATUS_INSUFFICIENT_DRIVER_VERSION = 15,
101
+ /// UNUSED
102
+ NVPA_STATUS_OBJECT_NOT_REGISTERED = 16,
103
+ /// Profiling permission not granted; see https://developer.nvidia.com/nvidia-development-tools-solutions-
104
+ /// ERR_NVGPUCTRPERM-permission-issue-performance-counters
105
+ NVPA_STATUS_INSUFFICIENT_PRIVILEGE = 17,
106
+ /// UNUSED
107
+ NVPA_STATUS_INVALID_CONTEXT_STATE = 18,
108
+ /// UNUSED
109
+ NVPA_STATUS_INVALID_OBJECT_STATE = 19,
110
+ /// The request could not be fulfilled because a system resource is already in use.
111
+ NVPA_STATUS_RESOURCE_UNAVAILABLE = 20,
112
+ /// UNUSED
113
+ NVPA_STATUS_DRIVER_LOADED_TOO_LATE = 21,
114
+ /// The provided buffer is not large enough.
115
+ NVPA_STATUS_INSUFFICIENT_SPACE = 22,
116
+ /// UNUSED
117
+ NVPA_STATUS_OBJECT_MISMATCH = 23,
118
+ /// Virtualized GPU (vGPU) is not supported.
119
+ NVPA_STATUS_VIRTUALIZED_DEVICE_NOT_SUPPORTED = 24,
120
+ /// Profiling permission on a vGPU was not granted.
121
+ NVPA_STATUS_PROFILING_NOT_ALLOWED = 25,
122
+ NVPA_STATUS__COUNT
123
+ } NVPA_Status;
124
+
125
+
126
+ #endif // NVPERF_NVPA_STATUS_DEFINED
127
+
128
+
129
+ #ifndef NVPERF_NVPA_ACTIVITY_KIND_DEFINED
130
+ #define NVPERF_NVPA_ACTIVITY_KIND_DEFINED
131
+
132
+ /// The configuration's activity-kind dictates which types of data may be collected.
133
+ typedef enum NVPA_ActivityKind
134
+ {
135
+ /// Invalid value.
136
+ NVPA_ACTIVITY_KIND_INVALID = 0,
137
+ /// A workload-centric activity for serialized and pipelined collection.
138
+ ///
139
+ /// Profiler is capable of collecting both serialized and pipelined metrics. The library introduces any
140
+ /// synchronization required to collect serialized metrics.
141
+ NVPA_ACTIVITY_KIND_PROFILER,
142
+ /// A realtime activity for sampling counters from the CPU or GPU.
143
+ NVPA_ACTIVITY_KIND_REALTIME_SAMPLED,
144
+ /// A realtime activity for profiling counters from the CPU or GPU without CPU/GPU synchronizations.
145
+ NVPA_ACTIVITY_KIND_REALTIME_PROFILER,
146
+ NVPA_ACTIVITY_KIND__COUNT
147
+ } NVPA_ActivityKind;
148
+
149
+
150
+ #endif // NVPERF_NVPA_ACTIVITY_KIND_DEFINED
151
+
152
+
153
+ #ifndef NVPERF_NVPA_BOOL_DEFINED
154
+ #define NVPERF_NVPA_BOOL_DEFINED
155
+ /// The type used for boolean values.
156
+ typedef uint8_t NVPA_Bool;
157
+ #endif // NVPERF_NVPA_BOOL_DEFINED
158
+
159
+ #ifndef NVPA_STRUCT_SIZE
160
+ #define NVPA_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
161
+ #endif // NVPA_STRUCT_SIZE
162
+
163
+ #ifndef NVPW_FIELD_EXISTS
164
+ #define NVPW_FIELD_EXISTS(pParams_, name_) \
165
+ ((pParams_)->structSize >= (size_t)((const uint8_t*)(&(pParams_)->name_) + sizeof(pParams_)->name_ - (const uint8_t*)(pParams_)))
166
+ #endif // NVPW_FIELD_EXISTS
167
+
168
+
169
+ #ifndef NVPERF_NVPA_GETPROCADDRESS_DEFINED
170
+ #define NVPERF_NVPA_GETPROCADDRESS_DEFINED
171
+
172
+ typedef NVPA_Status (*NVPA_GenericFn)(void);
173
+
174
+
175
+ ///
176
+ /// Gets the address of an NvPerf API function.
177
+ ///
178
+ /// \return A function pointer to the function, or NULL if the function is not available.
179
+ ///
180
+ /// \param pFunctionName [in] Name of the function to retrieve.
181
+ NVPA_GenericFn NVPA_GetProcAddress(const char* pFunctionName);
182
+
183
+ #endif
184
+
185
+ #ifndef NVPERF_NVPW_SETLIBRARYLOADPATHS_DEFINED
186
+ #define NVPERF_NVPW_SETLIBRARYLOADPATHS_DEFINED
187
+
188
+
189
+ typedef struct NVPW_SetLibraryLoadPaths_Params
190
+ {
191
+ /// [in]
192
+ size_t structSize;
193
+ /// [in] assign to NULL
194
+ void* pPriv;
195
+ /// [in] number of paths in ppPaths
196
+ size_t numPaths;
197
+ /// [in] array of null-terminated paths
198
+ const char** ppPaths;
199
+ } NVPW_SetLibraryLoadPaths_Params;
200
+ #define NVPW_SetLibraryLoadPaths_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_SetLibraryLoadPaths_Params, ppPaths)
201
+
202
+ /// Sets library search path for \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget().
203
+ /// \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget load the NvPerf DLL/DSO. This function sets
204
+ /// ordered paths that will be searched with the LoadLibrary() or dlopen() call.
205
+ /// If load paths are set by this function, the default set of load paths
206
+ /// will not be attempted.
207
+ /// Each path must point at a directory (not a file name).
208
+ /// This function is not thread-safe.
209
+ /// Example Usage:
210
+ /// \code
211
+ /// const char* paths[] = {
212
+ /// "path1", "path2", etc
213
+ /// };
214
+ /// NVPW_SetLibraryLoadPaths_Params params{NVPW_SetLibraryLoadPaths_Params_STRUCT_SIZE};
215
+ /// params.numPaths = sizeof(paths)/sizeof(paths[0]);
216
+ /// params.ppPaths = paths;
217
+ /// NVPW_SetLibraryLoadPaths(&params);
218
+ /// NVPW_InitializeHost();
219
+ /// params.numPaths = 0;
220
+ /// params.ppPaths = NULL;
221
+ /// NVPW_SetLibraryLoadPaths(&params);
222
+ /// \endcode
223
+ NVPA_Status NVPW_SetLibraryLoadPaths(NVPW_SetLibraryLoadPaths_Params* pParams);
224
+
225
+ typedef struct NVPW_SetLibraryLoadPathsW_Params
226
+ {
227
+ /// [in]
228
+ size_t structSize;
229
+ /// [in] assign to NULL
230
+ void* pPriv;
231
+ /// [in] number of paths in ppwPaths
232
+ size_t numPaths;
233
+ /// [in] array of null-terminated paths
234
+ const wchar_t** ppwPaths;
235
+ } NVPW_SetLibraryLoadPathsW_Params;
236
+ #define NVPW_SetLibraryLoadPathsW_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_SetLibraryLoadPathsW_Params, ppwPaths)
237
+
238
+ /// Sets library search path for \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget().
239
+ /// \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget load the NvPerf DLL/DSO. This function sets
240
+ /// ordered paths that will be searched with the LoadLibrary() or dlopen() call.
241
+ /// If load paths are set by this function, the default set of load paths
242
+ /// will not be attempted.
243
+ /// Each path must point at a directory (not a file name).
244
+ /// This function is not thread-safe.
245
+ /// Example Usage:
246
+ /// \code
247
+ /// const wchar_t* wpaths[] = {
248
+ /// L"path1", L"path2", etc
249
+ /// };
250
+ /// NVPW_SetLibraryLoadPathsW_Params params{NVPW_SetLibraryLoadPathsW_Params_STRUCT_SIZE};
251
+ /// params.numPaths = sizeof(wpaths)/sizeof(wpaths[0]);
252
+ /// params.ppwPaths = wpaths;
253
+ /// NVPW_SetLibraryLoadPathsW(&params);
254
+ /// NVPW_InitializeHost();
255
+ /// params.numPaths = 0;
256
+ /// params.ppwPaths = NULL;
257
+ /// NVPW_SetLibraryLoadPathsW(&params);
258
+ /// \endcode
259
+ NVPA_Status NVPW_SetLibraryLoadPathsW(NVPW_SetLibraryLoadPathsW_Params* pParams);
260
+
261
+ #endif
262
+
263
+
264
+
265
+ #ifdef __cplusplus
266
+ } // extern "C"
267
+ #endif
268
+
269
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
270
+ #pragma GCC visibility pop
271
+ #endif
272
+
273
+ #endif // NVPERF_COMMON_H
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/Openmp/cupti_openmp.h ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #include <cuda_stdint.h>
51
+ #include "Openmp/omp-tools.h"
52
+
53
+ #if !defined(_CUPTI_OPENMP_H_)
54
+ #define _CUPTI_OPENMP_H_
55
+
56
+ #ifndef CUPTIAPI
57
+ #ifdef _WIN32
58
+ #define CUPTIAPI __stdcall
59
+ #else
60
+ #define CUPTIAPI
61
+ #endif
62
+ #endif
63
+
64
+ #if defined(__LP64__)
65
+ #define CUPTILP64 1
66
+ #elif defined(_WIN64)
67
+ #define CUPTILP64 1
68
+ #else
69
+ #undef CUPTILP64
70
+ #endif
71
+
72
+ #if defined(__cplusplus)
73
+ extern "C" {
74
+ #endif
75
+
76
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
77
+ #pragma GCC visibility push(default)
78
+ #endif
79
+
80
+ /**
81
+ * \brief Initialize OPENMP support (deprecated, used before OpenMP 5.0)
82
+ *
83
+ */
84
+ int CUPTIAPI cuptiOpenMpInitialize(ompt_function_lookup_t ompt_fn_lookup, const char *runtime_version, unsigned int ompt_version);
85
+
86
+ /**
87
+ * \brief Initialize OPENMP support
88
+ *
89
+ */
90
+ int CUPTIAPI cuptiOpenMpInitialize_v2(ompt_function_lookup_t lookup, int initial_device_num, ompt_data_t *tool_data);
91
+
92
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
93
+ #pragma GCC visibility pop
94
+ #endif
95
+
96
+ #if defined(__cplusplus)
97
+ }
98
+ #endif
99
+
100
+ #endif /*_CUPTI_OPENMP_H_*/
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__init__.py ADDED
File without changes
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cuda_stdint.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2009-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions
6
+ * are met:
7
+ * * Redistributions of source code must retain the above copyright
8
+ * notice, this list of conditions and the following disclaimer.
9
+ * * Redistributions in binary form must reproduce the above copyright
10
+ * notice, this list of conditions and the following disclaimer in the
11
+ * documentation and/or other materials provided with the distribution.
12
+ * * Neither the name of NVIDIA CORPORATION nor the names of its
13
+ * contributors may be used to endorse or promote products derived
14
+ * from this software without specific prior written permission.
15
+ *
16
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
17
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
20
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
24
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+ */
28
+
29
+ #ifndef __cuda_stdint_h__
30
+ #define __cuda_stdint_h__
31
+
32
+ // Compiler-specific treatment for C99's stdint.h
33
+ //
34
+ // By default, this header will use the standard headers (so it
35
+ // is your responsibility to make sure they are available), except
36
+ // on MSVC before Visual Studio 2010, when they were not provided.
37
+ // To support old MSVC, a few of the commonly-used definitions are
38
+ // provided here. If more definitions are needed, add them here,
39
+ // or replace these definitions with a complete implementation,
40
+ // such as the ones available from Google, Boost, or MSVC10. You
41
+ // can prevent the definition of any of these types (in order to
42
+ // use your own) by #defining CU_STDINT_TYPES_ALREADY_DEFINED.
43
+
44
+ #if !defined(CU_STDINT_TYPES_ALREADY_DEFINED)
45
+
46
+ // In VS including stdint.h forces the C++ runtime dep - provide an opt-out
47
+ // (CU_STDINT_VS_FORCE_NO_STDINT_H) for users that care (notably static
48
+ // cudart).
49
+ #if defined(_MSC_VER) && ((_MSC_VER < 1600) || defined(CU_STDINT_VS_FORCE_NO_STDINT_H))
50
+
51
+ // These definitions can be used with MSVC 8 and 9,
52
+ // which don't ship with stdint.h:
53
+
54
+ typedef unsigned char uint8_t;
55
+
56
+ typedef short int16_t;
57
+ typedef unsigned short uint16_t;
58
+
59
+ // To keep it consistent with all MSVC build. define those types
60
+ // in the exact same way they are defined with the MSVC headers
61
+ #if defined(_MSC_VER)
62
+ typedef signed char int8_t;
63
+
64
+ typedef int int32_t;
65
+ typedef unsigned int uint32_t;
66
+
67
+ typedef long long int64_t;
68
+ typedef unsigned long long uint64_t;
69
+ #else
70
+ typedef char int8_t;
71
+
72
+ typedef long int32_t;
73
+ typedef unsigned long uint32_t;
74
+
75
+ typedef __int64 int64_t;
76
+ typedef unsigned __int64 uint64_t;
77
+ #endif
78
+
79
+ #elif defined(__DJGPP__)
80
+
81
+ // These definitions can be used when compiling
82
+ // C code with DJGPP, which only provides stdint.h
83
+ // when compiling C++ code with TR1 enabled.
84
+
85
+ typedef char int8_t;
86
+ typedef unsigned char uint8_t;
87
+
88
+ typedef short int16_t;
89
+ typedef unsigned short uint16_t;
90
+
91
+ typedef long int32_t;
92
+ typedef unsigned long uint32_t;
93
+
94
+ typedef long long int64_t;
95
+ typedef unsigned long long uint64_t;
96
+
97
+ #else
98
+
99
+ // Use standard headers, as specified by C99 and C++ TR1.
100
+ // Known to be provided by:
101
+ // - gcc/glibc, supported by all versions of glibc
102
+ // - djgpp, supported since 2001
103
+ // - MSVC, supported by Visual Studio 2010 and later
104
+
105
+ #include <stdint.h>
106
+
107
+ #endif
108
+
109
+ #endif // !defined(CU_STDINT_TYPES_ALREADY_DEFINED)
110
+
111
+
112
+ #endif // file guard
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti.h ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_H_)
51
+ #define _CUPTI_H_
52
+
53
+ #ifdef _WIN32
54
+ #ifndef WIN32_LEAN_AND_MEAN
55
+ #define WIN32_LEAN_AND_MEAN
56
+ #endif
57
+ #ifdef NOMINMAX
58
+ #include <windows.h>
59
+ #else
60
+ #define NOMINMAX
61
+ #include <windows.h>
62
+ #undef NOMINMAX
63
+ #endif
64
+ #endif
65
+
66
+ #include <cuda.h>
67
+ #include <cupti_result.h>
68
+ #include <cupti_version.h>
69
+
70
+ /* Activity, callback, event and metric APIs */
71
+ #include <cupti_activity.h>
72
+ #include <cupti_callbacks.h>
73
+ #include <cupti_events.h>
74
+ #include <cupti_metrics.h>
75
+
76
+ /* Runtime, driver, and nvtx function identifiers */
77
+ #include <cupti_driver_cbid.h>
78
+ #include <cupti_runtime_cbid.h>
79
+ #include <cupti_nvtx_cbid.h>
80
+
81
+ /* To support function parameter structures for obsoleted API. See
82
+ cuda.h for the actual definition of these structures. */
83
+ typedef unsigned int CUdeviceptr_v1;
84
+ typedef struct CUDA_MEMCPY2D_v1_st { int dummy; } CUDA_MEMCPY2D_v1;
85
+ typedef struct CUDA_MEMCPY3D_v1_st { int dummy; } CUDA_MEMCPY3D_v1;
86
+ typedef struct CUDA_ARRAY_DESCRIPTOR_v1_st { int dummy; } CUDA_ARRAY_DESCRIPTOR_v1;
87
+ typedef struct CUDA_ARRAY3D_DESCRIPTOR_v1_st { int dummy; } CUDA_ARRAY3D_DESCRIPTOR_v1;
88
+
89
+ /* Function parameter structures */
90
+ #include <generated_cuda_runtime_api_meta.h>
91
+ #include <generated_cuda_meta.h>
92
+
93
+ /* The following parameter structures cannot be included unless a
94
+ header that defines GL_VERSION is included before including them.
95
+ If these are needed then make sure such a header is included
96
+ already. */
97
+ #ifdef GL_VERSION
98
+ #include <generated_cuda_gl_interop_meta.h>
99
+ #include <generated_cudaGL_meta.h>
100
+ #endif
101
+
102
+ //#include <generated_nvtx_meta.h>
103
+
104
+ /* The following parameter structures cannot be included by default as
105
+ they are not guaranteed to be available on all systems. Uncomment
106
+ the includes that are available, or use the include explicitly. */
107
+ #if defined(__linux__)
108
+ //#include <generated_cuda_vdpau_interop_meta.h>
109
+ //#include <generated_cudaVDPAU_meta.h>
110
+ #endif
111
+
112
+ #ifdef _WIN32
113
+ //#include <generated_cuda_d3d9_interop_meta.h>
114
+ //#include <generated_cuda_d3d10_interop_meta.h>
115
+ //#include <generated_cuda_d3d11_interop_meta.h>
116
+ //#include <generated_cudaD3D9_meta.h>
117
+ //#include <generated_cudaD3D10_meta.h>
118
+ //#include <generated_cudaD3D11_meta.h>
119
+ #endif
120
+
121
+ #endif /*_CUPTI_H_*/
122
+
123
+
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_activity.h ADDED
The diff for this file is too large to render. See raw diff
 
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_activity_deprecated.h ADDED
The diff for this file is too large to render. See raw diff
 
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_callbacks.h ADDED
@@ -0,0 +1,860 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUPTI_CALLBACKS_H__)
51
+ #define __CUPTI_CALLBACKS_H__
52
+
53
+ #include <cuda.h>
54
+ #include <builtin_types.h>
55
+ #include <string.h>
56
+ #include <cuda_stdint.h>
57
+ #include <cupti_result.h>
58
+
59
+ #ifndef CUPTIAPI
60
+ #ifdef _WIN32
61
+ #define CUPTIAPI __stdcall
62
+ #else
63
+ #define CUPTIAPI
64
+ #endif
65
+ #endif
66
+
67
+ #if defined(__cplusplus)
68
+ extern "C" {
69
+ #endif
70
+
71
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
72
+ #pragma GCC visibility push(default)
73
+ #endif
74
+
75
+ /**
76
+ * \defgroup CUPTI_CALLBACK_API CUPTI Callback API
77
+ * Functions, types, and enums that implement the CUPTI Callback API.
78
+ * @{
79
+ */
80
+
81
+ /**
82
+ * \brief Specifies the point in an API call that a callback is issued.
83
+ *
84
+ * Specifies the point in an API call that a callback is issued. This
85
+ * value is communicated to the callback function via \ref
86
+ * CUpti_CallbackData::callbackSite.
87
+ */
88
+ typedef enum {
89
+ /**
90
+ * The callback is at the entry of the API call.
91
+ */
92
+ CUPTI_API_ENTER = 0,
93
+ /**
94
+ * The callback is at the exit of the API call.
95
+ */
96
+ CUPTI_API_EXIT = 1,
97
+ CUPTI_API_CBSITE_FORCE_INT = 0x7fffffff
98
+ } CUpti_ApiCallbackSite;
99
+
100
+ /**
101
+ * \brief Callback domains.
102
+ *
103
+ * Callback domains. Each domain represents callback points for a
104
+ * group of related API functions or CUDA driver activity.
105
+ */
106
+ typedef enum {
107
+ /**
108
+ * Invalid domain.
109
+ */
110
+ CUPTI_CB_DOMAIN_INVALID = 0,
111
+ /**
112
+ * Domain containing callback points for all driver API functions.
113
+ */
114
+ CUPTI_CB_DOMAIN_DRIVER_API = 1,
115
+ /**
116
+ * Domain containing callback points for all runtime API
117
+ * functions.
118
+ */
119
+ CUPTI_CB_DOMAIN_RUNTIME_API = 2,
120
+ /**
121
+ * Domain containing callback points for CUDA resource tracking.
122
+ */
123
+ CUPTI_CB_DOMAIN_RESOURCE = 3,
124
+ /**
125
+ * Domain containing callback points for CUDA synchronization.
126
+ */
127
+ CUPTI_CB_DOMAIN_SYNCHRONIZE = 4,
128
+ /**
129
+ * Domain containing callback points for NVTX API functions.
130
+ */
131
+ CUPTI_CB_DOMAIN_NVTX = 5,
132
+ /**
133
+ * Domain containing callback points for various states.
134
+ */
135
+ CUPTI_CB_DOMAIN_STATE,
136
+ CUPTI_CB_DOMAIN_SIZE,
137
+
138
+ CUPTI_CB_DOMAIN_FORCE_INT = 0x7fffffff
139
+ } CUpti_CallbackDomain;
140
+
141
+ /**
142
+ * \brief Callback IDs for resource domain.
143
+ *
144
+ * Callback IDs for resource domain, CUPTI_CB_DOMAIN_RESOURCE. This
145
+ * value is communicated to the callback function via the \p cbid
146
+ * parameter.
147
+ */
148
+ typedef enum {
149
+ /**
150
+ * Invalid resource callback ID.
151
+ */
152
+ CUPTI_CBID_RESOURCE_INVALID = 0,
153
+ /**
154
+ * A new context has been created.
155
+ */
156
+ CUPTI_CBID_RESOURCE_CONTEXT_CREATED = 1,
157
+ /**
158
+ * A context is about to be destroyed.
159
+ */
160
+ CUPTI_CBID_RESOURCE_CONTEXT_DESTROY_STARTING = 2,
161
+ /**
162
+ * A new stream has been created.
163
+ */
164
+ CUPTI_CBID_RESOURCE_STREAM_CREATED = 3,
165
+ /**
166
+ * A stream is about to be destroyed.
167
+ */
168
+ CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING = 4,
169
+ /**
170
+ * The driver has finished initializing.
171
+ */
172
+ CUPTI_CBID_RESOURCE_CU_INIT_FINISHED = 5,
173
+ /**
174
+ * A module has been loaded.
175
+ */
176
+ CUPTI_CBID_RESOURCE_MODULE_LOADED = 6,
177
+ /**
178
+ * A module is about to be unloaded.
179
+ */
180
+ CUPTI_CBID_RESOURCE_MODULE_UNLOAD_STARTING = 7,
181
+ /**
182
+ * The current module which is being profiled.
183
+ */
184
+ CUPTI_CBID_RESOURCE_MODULE_PROFILED = 8,
185
+ /**
186
+ * CUDA graph has been created.
187
+ */
188
+ CUPTI_CBID_RESOURCE_GRAPH_CREATED = 9,
189
+ /**
190
+ * CUDA graph is about to be destroyed.
191
+ */
192
+ CUPTI_CBID_RESOURCE_GRAPH_DESTROY_STARTING = 10,
193
+ /**
194
+ * CUDA graph is cloned.
195
+ */
196
+ CUPTI_CBID_RESOURCE_GRAPH_CLONED = 11,
197
+ /**
198
+ * CUDA graph node is about to be created
199
+ */
200
+ CUPTI_CBID_RESOURCE_GRAPHNODE_CREATE_STARTING = 12,
201
+ /**
202
+ * CUDA graph node is created.
203
+ */
204
+ CUPTI_CBID_RESOURCE_GRAPHNODE_CREATED = 13,
205
+ /**
206
+ * CUDA graph node is about to be destroyed.
207
+ */
208
+ CUPTI_CBID_RESOURCE_GRAPHNODE_DESTROY_STARTING = 14,
209
+ /**
210
+ * Dependency on a CUDA graph node is created.
211
+ */
212
+ CUPTI_CBID_RESOURCE_GRAPHNODE_DEPENDENCY_CREATED = 15,
213
+ /**
214
+ * Dependency on a CUDA graph node is destroyed.
215
+ */
216
+ CUPTI_CBID_RESOURCE_GRAPHNODE_DEPENDENCY_DESTROY_STARTING = 16,
217
+ /**
218
+ * An executable CUDA graph is about to be created.
219
+ */
220
+ CUPTI_CBID_RESOURCE_GRAPHEXEC_CREATE_STARTING = 17,
221
+ /**
222
+ * An executable CUDA graph is created.
223
+ */
224
+ CUPTI_CBID_RESOURCE_GRAPHEXEC_CREATED = 18,
225
+ /**
226
+ * An executable CUDA graph is about to be destroyed.
227
+ */
228
+ CUPTI_CBID_RESOURCE_GRAPHEXEC_DESTROY_STARTING = 19,
229
+ /**
230
+ * CUDA graph node is cloned.
231
+ */
232
+ CUPTI_CBID_RESOURCE_GRAPHNODE_CLONED = 20,
233
+ /**
234
+ * CUDA stream attribute is changed.
235
+ */
236
+ CUPTI_CBID_RESOURCE_STREAM_ATTRIBUTE_CHANGED = 21,
237
+
238
+ CUPTI_CBID_RESOURCE_SIZE,
239
+ CUPTI_CBID_RESOURCE_FORCE_INT = 0x7fffffff
240
+ } CUpti_CallbackIdResource;
241
+
242
+ /**
243
+ * \brief Callback IDs for synchronization domain.
244
+ *
245
+ * Callback IDs for synchronization domain,
246
+ * CUPTI_CB_DOMAIN_SYNCHRONIZE. This value is communicated to the
247
+ * callback function via the \p cbid parameter.
248
+ */
249
+ typedef enum {
250
+ /**
251
+ * Invalid synchronize callback ID.
252
+ */
253
+ CUPTI_CBID_SYNCHRONIZE_INVALID = 0,
254
+ /**
255
+ * Stream synchronization has completed for the stream.
256
+ */
257
+ CUPTI_CBID_SYNCHRONIZE_STREAM_SYNCHRONIZED = 1,
258
+ /**
259
+ * Context synchronization has completed for the context.
260
+ */
261
+ CUPTI_CBID_SYNCHRONIZE_CONTEXT_SYNCHRONIZED = 2,
262
+ CUPTI_CBID_SYNCHRONIZE_SIZE,
263
+ CUPTI_CBID_SYNCHRONIZE_FORCE_INT = 0x7fffffff
264
+ } CUpti_CallbackIdSync;
265
+
266
+ /**
267
+ * \brief Callback IDs for state domain.
268
+ *
269
+ * Callback IDs for state domain,
270
+ * CUPTI_CB_DOMAIN_STATE. This value is communicated to the
271
+ * callback function via the \p cbid parameter.
272
+ */
273
+ typedef enum {
274
+ /**
275
+ * Invalid state callback ID.
276
+ */
277
+ CUPTI_CBID_STATE_INVALID = 0,
278
+ /**
279
+ * Notification of fatal errors - high impact, non-recoverable
280
+ * When encountered, CUPTI automatically invokes cuptiFinalize()
281
+ * User can control behavior of the application in future from
282
+ * receiving this callback - such as continuing without profiling, or
283
+ * terminating the whole application.
284
+ */
285
+ CUPTI_CBID_STATE_FATAL_ERROR = 1,
286
+ /**
287
+ * Notification of non fatal errors - high impact, but recoverable
288
+ * This notification is not issued in the current release.
289
+ */
290
+ CUPTI_CBID_STATE_ERROR = 2,
291
+ /**
292
+ * Notification of warnings - low impact, recoverable
293
+ * This notification is not issued in the current release.
294
+ */
295
+ CUPTI_CBID_STATE_WARNING = 3,
296
+
297
+ CUPTI_CBID_STATE_SIZE,
298
+ CUPTI_CBID_STATE_FORCE_INT = 0x7fffffff
299
+ } CUpti_CallbackIdState;
300
+
301
+ /**
302
+ * \brief Data passed into a runtime or driver API callback function.
303
+ *
304
+ * Data passed into a runtime or driver API callback function as the
305
+ * \p cbdata argument to \ref CUpti_CallbackFunc. The \p cbdata will
306
+ * be this type for \p domain equal to CUPTI_CB_DOMAIN_DRIVER_API or
307
+ * CUPTI_CB_DOMAIN_RUNTIME_API. The callback data is valid only within
308
+ * the invocation of the callback function that is passed the data. If
309
+ * you need to retain some data for use outside of the callback, you
310
+ * must make a copy of that data. For example, if you make a shallow
311
+ * copy of CUpti_CallbackData within a callback, you cannot
312
+ * dereference \p functionParams outside of that callback to access
313
+ * the function parameters. \p functionName is an exception: the
314
+ * string pointed to by \p functionName is a global constant and so
315
+ * may be accessed outside of the callback.
316
+ */
317
+ typedef struct {
318
+ /**
319
+ * Point in the runtime or driver function from where the callback
320
+ * was issued.
321
+ */
322
+ CUpti_ApiCallbackSite callbackSite;
323
+
324
+ /**
325
+ * Name of the runtime or driver API function which issued the
326
+ * callback. This string is a global constant and so may be
327
+ * accessed outside of the callback.
328
+ */
329
+ const char *functionName;
330
+
331
+ /**
332
+ * Pointer to the arguments passed to the runtime or driver API
333
+ * call. See generated_cuda_runtime_api_meta.h and
334
+ * generated_cuda_meta.h for structure definitions for the
335
+ * parameters for each runtime and driver API function.
336
+ */
337
+ const void *functionParams;
338
+
339
+ /**
340
+ * Pointer to the return value of the runtime or driver API
341
+ * call. This field is only valid within the exit::CUPTI_API_EXIT
342
+ * callback. For a runtime API \p functionReturnValue points to a
343
+ * \p cudaError_t. For a driver API \p functionReturnValue points
344
+ * to a \p CUresult.
345
+ */
346
+ void *functionReturnValue;
347
+
348
+ /**
349
+ * Name of the symbol operated on by the runtime or driver API
350
+ * function which issued the callback. This entry is valid only for
351
+ * driver and runtime launch callbacks, where it returns the name of
352
+ * the kernel.
353
+ */
354
+ const char *symbolName;
355
+
356
+ /**
357
+ * Driver context current to the thread, or null if no context is
358
+ * current. This value can change from the entry to exit callback
359
+ * of a runtime API function if the runtime initializes a context.
360
+ */
361
+ CUcontext context;
362
+
363
+ /**
364
+ * Unique ID for the CUDA context associated with the thread. The
365
+ * UIDs are assigned sequentially as contexts are created and are
366
+ * unique within a process.
367
+ */
368
+ uint32_t contextUid;
369
+
370
+ /**
371
+ * Pointer to data shared between the entry and exit callbacks of
372
+ * a given runtime or drive API function invocation. This field
373
+ * can be used to pass 64-bit values from the entry callback to
374
+ * the corresponding exit callback.
375
+ */
376
+ uint64_t *correlationData;
377
+
378
+ /**
379
+ * The activity record correlation ID for this callback. For a
380
+ * driver domain callback (i.e. \p domain
381
+ * CUPTI_CB_DOMAIN_DRIVER_API) this ID will equal the correlation ID
382
+ * in the CUpti_ActivityAPI record corresponding to the CUDA driver
383
+ * function call. For a runtime domain callback (i.e. \p domain
384
+ * CUPTI_CB_DOMAIN_RUNTIME_API) this ID will equal the correlation
385
+ * ID in the CUpti_ActivityAPI record corresponding to the CUDA
386
+ * runtime function call. Within the callback, this ID can be
387
+ * recorded to correlate user data with the activity record. This
388
+ * field is new in 4.1.
389
+ */
390
+ uint32_t correlationId;
391
+
392
+ } CUpti_CallbackData;
393
+
394
+ /**
395
+ * \brief Data passed into a resource callback function.
396
+ *
397
+ * Data passed into a resource callback function as the \p cbdata
398
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
399
+ * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The callback
400
+ * data is valid only within the invocation of the callback function
401
+ * that is passed the data. If you need to retain some data for use
402
+ * outside of the callback, you must make a copy of that data.
403
+ */
404
+ typedef struct {
405
+ /**
406
+ * For CUPTI_CBID_RESOURCE_CONTEXT_CREATED and
407
+ * CUPTI_CBID_RESOURCE_CONTEXT_DESTROY_STARTING, the context being
408
+ * created or destroyed. For CUPTI_CBID_RESOURCE_STREAM_CREATED and
409
+ * CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING, the context
410
+ * containing the stream being created or destroyed.
411
+ */
412
+ CUcontext context;
413
+
414
+ union {
415
+ /**
416
+ * For CUPTI_CBID_RESOURCE_STREAM_CREATED and
417
+ * CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING, the stream being
418
+ * created or destroyed.
419
+ */
420
+ CUstream stream;
421
+ } resourceHandle;
422
+
423
+ /**
424
+ * Reserved for future use.
425
+ */
426
+ void *resourceDescriptor;
427
+ } CUpti_ResourceData;
428
+
429
+
430
+ /**
431
+ * \brief Module data passed into a resource callback function.
432
+ *
433
+ * CUDA module data passed into a resource callback function as the \p cbdata
434
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
435
+ * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The module
436
+ * data is valid only within the invocation of the callback function
437
+ * that is passed the data. If you need to retain some data for use
438
+ * outside of the callback, you must make a copy of that data.
439
+ */
440
+
441
+ typedef struct {
442
+ /**
443
+ * Identifier to associate with the CUDA module.
444
+ */
445
+ uint32_t moduleId;
446
+
447
+ /**
448
+ * The size of the cubin.
449
+ */
450
+ size_t cubinSize;
451
+
452
+ /**
453
+ * Pointer to the associated cubin.
454
+ */
455
+ const char *pCubin;
456
+ } CUpti_ModuleResourceData;
457
+
458
+ /**
459
+ * \brief CUDA graphs data passed into a resource callback function.
460
+ *
461
+ * CUDA graphs data passed into a resource callback function as the \p cbdata
462
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
463
+ * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The graph
464
+ * data is valid only within the invocation of the callback function
465
+ * that is passed the data. If you need to retain some data for use
466
+ * outside of the callback, you must make a copy of that data.
467
+ */
468
+
469
+ typedef struct {
470
+ /**
471
+ * CUDA graph
472
+ */
473
+ CUgraph graph;
474
+ /**
475
+ * The original CUDA graph from which \param graph is cloned
476
+ */
477
+ CUgraph originalGraph;
478
+ /**
479
+ * CUDA graph node
480
+ */
481
+ CUgraphNode node;
482
+ /**
483
+ * The original CUDA graph node from which \param node is cloned
484
+ */
485
+ CUgraphNode originalNode;
486
+ /**
487
+ * Type of the \param node
488
+ */
489
+ CUgraphNodeType nodeType;
490
+ /**
491
+ * The dependent graph node
492
+ * The size of the array is \param numDependencies.
493
+ */
494
+ CUgraphNode dependency;
495
+ /**
496
+ * CUDA executable graph
497
+ */
498
+ CUgraphExec graphExec;
499
+ } CUpti_GraphData;
500
+
501
+ /**
502
+ * \brief Data passed into a synchronize callback function.
503
+ *
504
+ * Data passed into a synchronize callback function as the \p cbdata
505
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
506
+ * type for \p domain equal to CUPTI_CB_DOMAIN_SYNCHRONIZE. The
507
+ * callback data is valid only within the invocation of the callback
508
+ * function that is passed the data. If you need to retain some data
509
+ * for use outside of the callback, you must make a copy of that data.
510
+ */
511
+ typedef struct {
512
+ /**
513
+ * The context of the stream being synchronized.
514
+ */
515
+ CUcontext context;
516
+ /**
517
+ * The stream being synchronized.
518
+ */
519
+ CUstream stream;
520
+ } CUpti_SynchronizeData;
521
+
522
+ /**
523
+ * \brief Data passed into a NVTX callback function.
524
+ *
525
+ * Data passed into a NVTX callback function as the \p cbdata argument
526
+ * to \ref CUpti_CallbackFunc. The \p cbdata will be this type for \p
527
+ * domain equal to CUPTI_CB_DOMAIN_NVTX. Unless otherwise notes, the
528
+ * callback data is valid only within the invocation of the callback
529
+ * function that is passed the data. If you need to retain some data
530
+ * for use outside of the callback, you must make a copy of that data.
531
+ */
532
+ typedef struct {
533
+ /**
534
+ * Name of the NVTX API function which issued the callback. This
535
+ * string is a global constant and so may be accessed outside of the
536
+ * callback.
537
+ */
538
+ const char *functionName;
539
+
540
+ /**
541
+ * Pointer to the arguments passed to the NVTX API call. See
542
+ * generated_nvtx_meta.h for structure definitions for the
543
+ * parameters for each NVTX API function.
544
+ */
545
+ const void *functionParams;
546
+
547
+ /**
548
+ * Pointer to the return value of the NVTX API call. See
549
+ * nvToolsExt.h for each NVTX API function's return value.
550
+ */
551
+ const void *functionReturnValue;
552
+ } CUpti_NvtxData;
553
+
554
+ /**
555
+ * \brief Stream attribute data passed into a resource callback function
556
+ * for CUPTI_CBID_RESOURCE_STREAM_ATTRIBUTE_CHANGED callback
557
+
558
+ * Data passed into a resource callback function as the \p cbdata
559
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
560
+ * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The
561
+ * stream attribute data is valid only within the invocation of the callback
562
+ * function that is passed the data. If you need to retain some data
563
+ * for use outside of the callback, you must make a copy of that data.
564
+ */
565
+ typedef struct {
566
+ /**
567
+ * The CUDA stream handle for the attribute
568
+ */
569
+ CUstream stream;
570
+
571
+ /**
572
+ * The type of the CUDA stream attribute
573
+ */
574
+ CUstreamAttrID attr;
575
+
576
+ /**
577
+ * The value of the CUDA stream attribute
578
+ */
579
+ const CUstreamAttrValue *value;
580
+ } CUpti_StreamAttrData;
581
+
582
+ /**
583
+ * \brief Data passed into a State callback function.
584
+ *
585
+ * Data passed into a State callback function as the \p cbdata argument
586
+ * to \ref CUpti_CallbackFunc. The \p cbdata will be this type for \p
587
+ * domain equal to CUPTI_CB_DOMAIN_STATE and callback Ids belonging to CUpti_CallbackIdState.
588
+ * Unless otherwise noted, the callback data is valid only within the invocation of the callback
589
+ * function that is passed the data. If you need to retain some data
590
+ * for use outside of the callback, you must make a copy of that data.
591
+ */
592
+ typedef struct {
593
+ union {
594
+ /**
595
+ * Data passed along with the callback Ids
596
+ * Enum CUpti_CallbackIdState used to denote callback ids
597
+ */
598
+ struct {
599
+ /**
600
+ * Error code
601
+ */
602
+ CUptiResult result;
603
+ /**
604
+ * String containing more details. It can be NULL.
605
+ */
606
+ const char *message;
607
+ } notification;
608
+ };
609
+ } CUpti_StateData;
610
+ /**
611
+ * \brief An ID for a driver API, runtime API, resource or
612
+ * synchronization callback.
613
+ *
614
+ * An ID for a driver API, runtime API, resource or synchronization
615
+ * callback. Within a driver API callback this should be interpreted
616
+ * as a CUpti_driver_api_trace_cbid value (these values are defined in
617
+ * cupti_driver_cbid.h). Within a runtime API callback this should be
618
+ * interpreted as a CUpti_runtime_api_trace_cbid value (these values
619
+ * are defined in cupti_runtime_cbid.h). Within a resource API
620
+ * callback this should be interpreted as a \ref
621
+ * CUpti_CallbackIdResource value. Within a synchronize API callback
622
+ * this should be interpreted as a \ref CUpti_CallbackIdSync value.
623
+ */
624
+ typedef uint32_t CUpti_CallbackId;
625
+
626
+ /**
627
+ * \brief Function type for a callback.
628
+ *
629
+ * Function type for a callback. The type of the data passed to the
630
+ * callback in \p cbdata depends on the \p domain. If \p domain is
631
+ * CUPTI_CB_DOMAIN_DRIVER_API or CUPTI_CB_DOMAIN_RUNTIME_API the type
632
+ * of \p cbdata will be CUpti_CallbackData. If \p domain is
633
+ * CUPTI_CB_DOMAIN_RESOURCE the type of \p cbdata will be
634
+ * CUpti_ResourceData. If \p domain is CUPTI_CB_DOMAIN_SYNCHRONIZE the
635
+ * type of \p cbdata will be CUpti_SynchronizeData. If \p domain is
636
+ * CUPTI_CB_DOMAIN_NVTX the type of \p cbdata will be CUpti_NvtxData.
637
+ *
638
+ * \param userdata User data supplied at subscription of the callback
639
+ * \param domain The domain of the callback
640
+ * \param cbid The ID of the callback
641
+ * \param cbdata Data passed to the callback.
642
+ */
643
+ typedef void (CUPTIAPI *CUpti_CallbackFunc)(
644
+ void *userdata,
645
+ CUpti_CallbackDomain domain,
646
+ CUpti_CallbackId cbid,
647
+ const void *cbdata);
648
+
649
+ /**
650
+ * \brief A callback subscriber.
651
+ */
652
+ typedef struct CUpti_Subscriber_st *CUpti_SubscriberHandle;
653
+
654
+ /**
655
+ * \brief Pointer to an array of callback domains.
656
+ */
657
+ typedef CUpti_CallbackDomain *CUpti_DomainTable;
658
+
659
+ /**
660
+ * \brief Get the available callback domains.
661
+ *
662
+ * Returns in \p *domainTable an array of size \p *domainCount of all
663
+ * the available callback domains.
664
+ * \note \b Thread-safety: this function is thread safe.
665
+ *
666
+ * \param domainCount Returns number of callback domains
667
+ * \param domainTable Returns pointer to array of available callback domains
668
+ *
669
+ * \retval CUPTI_SUCCESS on success
670
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialize CUPTI
671
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p domainCount or \p domainTable are NULL
672
+ */
673
+ CUptiResult CUPTIAPI cuptiSupportedDomains(size_t *domainCount,
674
+ CUpti_DomainTable *domainTable);
675
+
676
+ /**
677
+ * \brief Initialize a callback subscriber with a callback function
678
+ * and user data.
679
+ *
680
+ * Initializes a callback subscriber with a callback function and
681
+ * (optionally) a pointer to user data. The returned subscriber handle
682
+ * can be used to enable and disable the callback for specific domains
683
+ * and callback IDs.
684
+ * \note Only a single subscriber can be registered at a time. To ensure
685
+ * that no other CUPTI client interrupts the profiling session, it's the
686
+ * responsibility of all the CUPTI clients to call this function before
687
+ * starting the profling session. In case profiling session is already
688
+ * started by another CUPTI client, this function returns the error code
689
+ * CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED.
690
+ * Note that this function returns the same error when application is
691
+ * launched using NVIDIA tools like nvprof, Visual Profiler, Nsight Systems,
692
+ * Nsight Compute, cuda-gdb and cuda-memcheck.
693
+ * \note This function does not enable any callbacks.
694
+ * \note \b Thread-safety: this function is thread safe.
695
+ *
696
+ * \param subscriber Returns handle to initialize subscriber
697
+ * \param callback The callback function
698
+ * \param userdata A pointer to user data. This data will be passed to
699
+ * the callback function via the \p userdata parameter.
700
+ *
701
+ * \retval CUPTI_SUCCESS on success
702
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialize CUPTI
703
+ * \retval CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED if there is already a CUPTI subscriber
704
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber is NULL
705
+ */
706
+ CUptiResult CUPTIAPI cuptiSubscribe(CUpti_SubscriberHandle *subscriber,
707
+ CUpti_CallbackFunc callback,
708
+ void *userdata);
709
+
710
+ /**
711
+ * \brief Unregister a callback subscriber.
712
+ *
713
+ * Removes a callback subscriber so that no future callbacks will be
714
+ * issued to that subscriber.
715
+ * \note \b Thread-safety: this function is thread safe.
716
+ *
717
+ * \param subscriber Handle to the initialize subscriber
718
+ *
719
+ * \retval CUPTI_SUCCESS on success
720
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
721
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber is NULL or not initialized
722
+ */
723
+ CUptiResult CUPTIAPI cuptiUnsubscribe(CUpti_SubscriberHandle subscriber);
724
+
725
+ /**
726
+ * \brief Get the current enabled/disabled state of a callback for a specific
727
+ * domain and function ID.
728
+ *
729
+ * Returns non-zero in \p *enable if the callback for a domain and
730
+ * callback ID is enabled, and zero if not enabled.
731
+ *
732
+ * \note \b Thread-safety: a subscriber must serialize access to
733
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
734
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackState(sub,
735
+ * d, c) and cuptiEnableCallback(sub, d, c) are called concurrently,
736
+ * the results are undefined.
737
+ *
738
+ * \param enable Returns non-zero if callback enabled, zero if not enabled
739
+ * \param subscriber Handle to the initialize subscriber
740
+ * \param domain The domain of the callback
741
+ * \param cbid The ID of the callback
742
+ *
743
+ * \retval CUPTI_SUCCESS on success
744
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
745
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p enabled is NULL, or if \p
746
+ * subscriber, \p domain or \p cbid is invalid.
747
+ */
748
+ CUptiResult CUPTIAPI cuptiGetCallbackState(uint32_t *enable,
749
+ CUpti_SubscriberHandle subscriber,
750
+ CUpti_CallbackDomain domain,
751
+ CUpti_CallbackId cbid);
752
+
753
+ /**
754
+ * \brief Enable or disabled callbacks for a specific domain and
755
+ * callback ID.
756
+ *
757
+ * Enable or disabled callbacks for a subscriber for a specific domain
758
+ * and callback ID.
759
+ *
760
+ * \note \b Thread-safety: a subscriber must serialize access to
761
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
762
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackState(sub,
763
+ * d, c) and cuptiEnableCallback(sub, d, c) are called concurrently,
764
+ * the results are undefined.
765
+ *
766
+ * \param enable New enable state for the callback. Zero disables the
767
+ * callback, non-zero enables the callback.
768
+ * \param subscriber - Handle to callback subscription
769
+ * \param domain The domain of the callback
770
+ * \param cbid The ID of the callback
771
+ *
772
+ * \retval CUPTI_SUCCESS on success
773
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
774
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber, \p domain or \p
775
+ * cbid is invalid.
776
+ */
777
+ CUptiResult CUPTIAPI cuptiEnableCallback(uint32_t enable,
778
+ CUpti_SubscriberHandle subscriber,
779
+ CUpti_CallbackDomain domain,
780
+ CUpti_CallbackId cbid);
781
+
782
+ /**
783
+ * \brief Enable or disabled all callbacks for a specific domain.
784
+ *
785
+ * Enable or disabled all callbacks for a specific domain.
786
+ *
787
+ * \note \b Thread-safety: a subscriber must serialize access to
788
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
789
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackEnabled(sub,
790
+ * d, *) and cuptiEnableDomain(sub, d) are called concurrently, the
791
+ * results are undefined.
792
+ *
793
+ * \param enable New enable state for all callbacks in the
794
+ * domain. Zero disables all callbacks, non-zero enables all
795
+ * callbacks.
796
+ * \param subscriber - Handle to callback subscription
797
+ * \param domain The domain of the callback
798
+ *
799
+ * \retval CUPTI_SUCCESS on success
800
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
801
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber or \p domain is invalid
802
+ */
803
+ CUptiResult CUPTIAPI cuptiEnableDomain(uint32_t enable,
804
+ CUpti_SubscriberHandle subscriber,
805
+ CUpti_CallbackDomain domain);
806
+
807
+ /**
808
+ * \brief Enable or disable all callbacks in all domains.
809
+ *
810
+ * Enable or disable all callbacks in all domains.
811
+ *
812
+ * \note \b Thread-safety: a subscriber must serialize access to
813
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
814
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackState(sub,
815
+ * d, *) and cuptiEnableAllDomains(sub) are called concurrently, the
816
+ * results are undefined.
817
+ *
818
+ * \param enable New enable state for all callbacks in all
819
+ * domain. Zero disables all callbacks, non-zero enables all
820
+ * callbacks.
821
+ * \param subscriber - Handle to callback subscription
822
+ *
823
+ * \retval CUPTI_SUCCESS on success
824
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
825
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber is invalid
826
+ */
827
+ CUptiResult CUPTIAPI cuptiEnableAllDomains(uint32_t enable,
828
+ CUpti_SubscriberHandle subscriber);
829
+
830
+ /**
831
+ * \brief Get the name of a callback for a specific domain and callback ID.
832
+ *
833
+ * Returns a pointer to the name c_string in \p **name.
834
+ *
835
+ * \note \b Names are available only for the DRIVER and RUNTIME domains.
836
+ *
837
+ * \param domain The domain of the callback
838
+ * \param cbid The ID of the callback
839
+ * \param name Returns pointer to the name string on success, NULL otherwise
840
+ *
841
+ * \retval CUPTI_SUCCESS on success
842
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p name is NULL, or if
843
+ * \p domain or \p cbid is invalid.
844
+ */
845
+ CUptiResult CUPTIAPI cuptiGetCallbackName(CUpti_CallbackDomain domain,
846
+ uint32_t cbid,
847
+ const char **name);
848
+
849
+ /** @} */ /* END CUPTI_CALLBACK_API */
850
+
851
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
852
+ #pragma GCC visibility pop
853
+ #endif
854
+
855
+ #if defined(__cplusplus)
856
+ }
857
+ #endif
858
+
859
+ #endif // file guard
860
+
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_checkpoint.h ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <cupti_result.h>
5
+
6
+ #include <stddef.h>
7
+ #include <stdint.h>
8
+
9
+ namespace NV { namespace Cupti { namespace Checkpoint {
10
+
11
+ #ifdef __cplusplus
12
+ extern "C"
13
+ {
14
+ #endif
15
+
16
+ /**
17
+ * \defgroup CUPTI_CHECKPOINT_API CUPTI Checkpoint API
18
+ * Functions, types, and enums that implement the CUPTI Checkpoint API.
19
+ * @{
20
+ */
21
+
22
+ /**
23
+ * \brief Specifies optimization options for a checkpoint, may be OR'd together to specify multiple options.
24
+ */
25
+ typedef enum
26
+ {
27
+ CUPTI_CHECKPOINT_OPT_NONE = 0, //!< Default behavior
28
+ CUPTI_CHECKPOINT_OPT_TRANSFER = 1, //!< Determine which mem blocks have changed, and only restore those. This optimization is cached, which means cuptiCheckpointRestore must always be called at the same point in the application when this option is enabled, or the result may be incorrect.
29
+ } CUpti_CheckpointOptimizations;
30
+
31
+ /**
32
+ * \brief Configuration and handle for a CUPTI Checkpoint
33
+ *
34
+ * A CUptiCheckpoint object should be initialized with desired options prior to passing into any
35
+ * CUPTI Checkpoint API function. The first call into a Checkpoint API function will initialize internal
36
+ * state based on these options. Subsequent changes to these options will not have any effect.
37
+ *
38
+ * Checkpoint data is saved in device, host, and filesystem space. There are options to reserve memory
39
+ * at each level (device, host, filesystem) which are intended to allow a guarantee that a certain amount
40
+ * of memory will remain free for use after the checkpoint is saved.
41
+ * Note, however, that falling back to slower levels of memory (host, and then filesystem) to save the checkpoint
42
+ * will result in performance degradation.
43
+ * Currently, the filesystem limitation is not implemented. Note that falling back to filesystem storage may
44
+ * significantly impact the performance for saving and restoring a checkpoint.
45
+ */
46
+ typedef struct
47
+ {
48
+ size_t structSize; //!< [in] Must be set to CUpti_Checkpoint_STRUCT_SIZE
49
+
50
+ CUcontext ctx; //!< [in] Set to context to save from, or will use current context if NULL
51
+
52
+ size_t reserveDeviceMB; //!< [in] Restrict checkpoint from using last N MB of device memory (-1 = use no device memory)
53
+ size_t reserveHostMB; //!< [in] Restrict checkpoint from using last N MB of host memory (-1 = use no host memory)
54
+ uint8_t allowOverwrite; //!< [in] Boolean, Allow checkpoint to save over existing checkpoint
55
+ uint8_t optimizations; //!< [in] Mask of CUpti_CheckpointOptimizations flags for this checkpoint
56
+
57
+ void * pPriv; //!< [in] Assign to NULL
58
+ } CUpti_Checkpoint;
59
+
60
+ #define CUpti_Checkpoint_STRUCT_SIZE \
61
+ (offsetof(CUpti_Checkpoint, pPriv) + \
62
+ sizeof(((CUpti_Checkpoint*)(nullptr))->pPriv))
63
+
64
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
65
+ #pragma GCC visibility push(default)
66
+ #endif
67
+
68
+ /**
69
+ * \brief Initialize and save a checkpoint of the device state associated with the handle context
70
+ *
71
+ * Uses the handle options to configure and save a checkpoint of the device state associated with the specified context.
72
+ *
73
+ * \param handle A pointer to a CUpti_Checkpoint object
74
+ *
75
+ * \retval CUPTI_SUCCESS if a checkpoint was successfully initialized and saved
76
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p handle does not appear to refer to a valid CUpti_Checkpoint
77
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
78
+ * \retval CUPTI_ERROR_INVALID_DEVICE if device associated with context is not compatible with checkpoint API
79
+ * \retval CUPTI_ERROR_INVALID_OPERATION if Save is requested over an existing checkpoint, but \p allowOverwrite was not originally specified
80
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY if as configured, not enough backing storage space to save the checkpoint
81
+ */
82
+ CUptiResult cuptiCheckpointSave(CUpti_Checkpoint * const handle);
83
+
84
+ /**
85
+ * \brief Restore a checkpoint to the device associated with its context
86
+ *
87
+ * Restores device, pinned, and allocated memory to the state when the checkpoint was saved
88
+ *
89
+ * \param handle A pointer to a previously saved CUpti_Checkpoint object
90
+ *
91
+ * \retval CUTPI_SUCCESS if the checkpoint was successfully restored
92
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if the checkpoint was not previously initialized
93
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
94
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if the handle appears invalid
95
+ * \retval CUPTI_ERROR_UNKNOWN if the restore or optimization operation fails
96
+ */
97
+ CUptiResult cuptiCheckpointRestore(CUpti_Checkpoint * const handle);
98
+
99
+ /**
100
+ * \brief Free the backing data for a checkpoint
101
+ *
102
+ * Frees all associated device, host memory and filesystem storage used for this context.
103
+ * After freeing a handle, it may be re-used as if it was new - options may be re-configured and will
104
+ * take effect on the next call to \p cuptiCheckpointSave.
105
+ *
106
+ * \param handle A pointer to a previously saved CUpti_Checkpoint object
107
+ *
108
+ * \retval CUPTI_SUCCESS if the handle was successfully freed
109
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if the handle was already freed or appears invalid
110
+ * \retval CUPTI_ERROR_INVALID_CONTEXT if the context is no longer valid
111
+ */
112
+ CUptiResult cuptiCheckpointFree(CUpti_Checkpoint * const handle);
113
+
114
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
115
+ #pragma GCC visibility pop
116
+ #endif
117
+
118
+ /**
119
+ * @}
120
+ */
121
+
122
+ #ifdef __cplusplus
123
+ }
124
+ #endif
125
+
126
+ // Exit namespace NV::Cupti::Checkpoint
127
+ }}}
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_common.h ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUPTI_COMMON_H__)
51
+ #define __CUPTI_COMMON_H__
52
+
53
+ #ifndef CUPTIAPI
54
+ #ifdef _WIN32
55
+ #define CUPTIAPI __stdcall
56
+ #else
57
+ #define CUPTIAPI
58
+ #endif
59
+ #endif
60
+
61
+ #ifndef CUPTIUTILAPI
62
+ #ifdef _WIN32
63
+ #define CUPTIUTILAPI __stdcall
64
+ #else
65
+ #define CUPTIUTILAPI
66
+ #endif
67
+ #endif
68
+
69
+ #if defined(__LP64__)
70
+ #define CUPTILP64 1
71
+ #elif defined(_WIN64)
72
+ #define CUPTILP64 1
73
+ #else
74
+ #undef CUPTILP64
75
+ #endif
76
+
77
+ #define ACTIVITY_RECORD_ALIGNMENT 8
78
+ #if defined(_WIN32) // Windows 32- and 64-bit
79
+ #define START_PACKED_ALIGNMENT __pragma(pack(push,1)) // exact fit - no padding
80
+ #define PACKED_ALIGNMENT __declspec(align(ACTIVITY_RECORD_ALIGNMENT))
81
+ #define END_PACKED_ALIGNMENT __pragma(pack(pop))
82
+ #elif defined(__GNUC__) // GCC
83
+ #define START_PACKED_ALIGNMENT
84
+ #define PACKED_ALIGNMENT __attribute__ ((__packed__)) __attribute__ ((aligned (ACTIVITY_RECORD_ALIGNMENT)))
85
+ #define END_PACKED_ALIGNMENT
86
+ #else // all other compilers
87
+ #define START_PACKED_ALIGNMENT
88
+ #define PACKED_ALIGNMENT
89
+ #define END_PACKED_ALIGNMENT
90
+ #endif
91
+
92
+ #endif /*__CUPTI_COMMON_H__*/
93
+
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_driver_cbid.h ADDED
@@ -0,0 +1,767 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ // *************************************************************************
3
+ // Definitions of indices for API functions, unique across entire API
4
+ // *************************************************************************
5
+
6
+ // This file is generated. Any changes you make will be lost during the next clean build.
7
+ // CUDA public interface, for type definitions and cu* function prototypes
8
+
9
+ typedef enum CUpti_driver_api_trace_cbid_enum {
10
+ CUPTI_DRIVER_TRACE_CBID_INVALID = 0,
11
+ CUPTI_DRIVER_TRACE_CBID_cuInit = 1,
12
+ CUPTI_DRIVER_TRACE_CBID_cuDriverGetVersion = 2,
13
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGet = 3,
14
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetCount = 4,
15
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetName = 5,
16
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceComputeCapability = 6,
17
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem = 7,
18
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetProperties = 8,
19
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetAttribute = 9,
20
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate = 10,
21
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy = 11,
22
+ CUPTI_DRIVER_TRACE_CBID_cuCtxAttach = 12,
23
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDetach = 13,
24
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent = 14,
25
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent = 15,
26
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetDevice = 16,
27
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSynchronize = 17,
28
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoad = 18,
29
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoadData = 19,
30
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoadDataEx = 20,
31
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoadFatBinary = 21,
32
+ CUPTI_DRIVER_TRACE_CBID_cuModuleUnload = 22,
33
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetFunction = 23,
34
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal = 24,
35
+ CUPTI_DRIVER_TRACE_CBID_cu64ModuleGetGlobal = 25,
36
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetTexRef = 26,
37
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo = 27,
38
+ CUPTI_DRIVER_TRACE_CBID_cu64MemGetInfo = 28,
39
+ CUPTI_DRIVER_TRACE_CBID_cuMemAlloc = 29,
40
+ CUPTI_DRIVER_TRACE_CBID_cu64MemAlloc = 30,
41
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch = 31,
42
+ CUPTI_DRIVER_TRACE_CBID_cu64MemAllocPitch = 32,
43
+ CUPTI_DRIVER_TRACE_CBID_cuMemFree = 33,
44
+ CUPTI_DRIVER_TRACE_CBID_cu64MemFree = 34,
45
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange = 35,
46
+ CUPTI_DRIVER_TRACE_CBID_cu64MemGetAddressRange = 36,
47
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost = 37,
48
+ CUPTI_DRIVER_TRACE_CBID_cuMemFreeHost = 38,
49
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc = 39,
50
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer = 40,
51
+ CUPTI_DRIVER_TRACE_CBID_cu64MemHostGetDevicePointer = 41,
52
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostGetFlags = 42,
53
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD = 43,
54
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoD = 44,
55
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH = 45,
56
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoH = 46,
57
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD = 47,
58
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoD = 48,
59
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA = 49,
60
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoA = 50,
61
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD = 51,
62
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyAtoD = 52,
63
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA = 53,
64
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH = 54,
65
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA = 55,
66
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D = 56,
67
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned = 57,
68
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D = 58,
69
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3D = 59,
70
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync = 60,
71
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoDAsync = 61,
72
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync = 62,
73
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoHAsync = 63,
74
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync = 64,
75
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoDAsync = 65,
76
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync = 66,
77
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync = 67,
78
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync = 68,
79
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync = 69,
80
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3DAsync = 70,
81
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8 = 71,
82
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8 = 72,
83
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16 = 73,
84
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16 = 74,
85
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32 = 75,
86
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32 = 76,
87
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8 = 77,
88
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8 = 78,
89
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16 = 79,
90
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16 = 80,
91
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32 = 81,
92
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32 = 82,
93
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetBlockShape = 83,
94
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedSize = 84,
95
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetAttribute = 85,
96
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetCacheConfig = 86,
97
+ CUPTI_DRIVER_TRACE_CBID_cuArrayCreate = 87,
98
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor = 88,
99
+ CUPTI_DRIVER_TRACE_CBID_cuArrayDestroy = 89,
100
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate = 90,
101
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor = 91,
102
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefCreate = 92,
103
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefDestroy = 93,
104
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetArray = 94,
105
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress = 95,
106
+ CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress = 96,
107
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D = 97,
108
+ CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress2D = 98,
109
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFormat = 99,
110
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddressMode = 100,
111
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFilterMode = 101,
112
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFlags = 102,
113
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress = 103,
114
+ CUPTI_DRIVER_TRACE_CBID_cu64TexRefGetAddress = 104,
115
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetArray = 105,
116
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddressMode = 106,
117
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFilterMode = 107,
118
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFormat = 108,
119
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFlags = 109,
120
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetSize = 110,
121
+ CUPTI_DRIVER_TRACE_CBID_cuParamSeti = 111,
122
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetf = 112,
123
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetv = 113,
124
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetTexRef = 114,
125
+ CUPTI_DRIVER_TRACE_CBID_cuLaunch = 115,
126
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchGrid = 116,
127
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchGridAsync = 117,
128
+ CUPTI_DRIVER_TRACE_CBID_cuEventCreate = 118,
129
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecord = 119,
130
+ CUPTI_DRIVER_TRACE_CBID_cuEventQuery = 120,
131
+ CUPTI_DRIVER_TRACE_CBID_cuEventSynchronize = 121,
132
+ CUPTI_DRIVER_TRACE_CBID_cuEventDestroy = 122,
133
+ CUPTI_DRIVER_TRACE_CBID_cuEventElapsedTime = 123,
134
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCreate = 124,
135
+ CUPTI_DRIVER_TRACE_CBID_cuStreamQuery = 125,
136
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize = 126,
137
+ CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy = 127,
138
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnregisterResource = 128,
139
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsSubResourceGetMappedArray = 129,
140
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer = 130,
141
+ CUPTI_DRIVER_TRACE_CBID_cu64GraphicsResourceGetMappedPointer = 131,
142
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags = 132,
143
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources = 133,
144
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources = 134,
145
+ CUPTI_DRIVER_TRACE_CBID_cuGetExportTable = 135,
146
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetLimit = 136,
147
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetLimit = 137,
148
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevice = 138,
149
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate = 139,
150
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D10RegisterResource = 140,
151
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10RegisterResource = 141,
152
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10UnregisterResource = 142,
153
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10MapResources = 143,
154
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10UnmapResources = 144,
155
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceSetMapFlags = 145,
156
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedArray = 146,
157
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer = 147,
158
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize = 148,
159
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch = 149,
160
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions = 150,
161
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevice = 151,
162
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate = 152,
163
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D11RegisterResource = 153,
164
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevice = 154,
165
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate = 155,
166
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D9RegisterResource = 156,
167
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDirect3DDevice = 157,
168
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterResource = 158,
169
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterResource = 159,
170
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9MapResources = 160,
171
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapResources = 161,
172
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceSetMapFlags = 162,
173
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions = 163,
174
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedArray = 164,
175
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer = 165,
176
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize = 166,
177
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch = 167,
178
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9Begin = 168,
179
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9End = 169,
180
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterVertexBuffer = 170,
181
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer = 171,
182
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapVertexBuffer = 172,
183
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterVertexBuffer = 173,
184
+ CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate = 174,
185
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterBuffer = 175,
186
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterImage = 176,
187
+ CUPTI_DRIVER_TRACE_CBID_cuWGLGetDevice = 177,
188
+ CUPTI_DRIVER_TRACE_CBID_cuGLInit = 178,
189
+ CUPTI_DRIVER_TRACE_CBID_cuGLRegisterBufferObject = 179,
190
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject = 180,
191
+ CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObject = 181,
192
+ CUPTI_DRIVER_TRACE_CBID_cuGLUnregisterBufferObject = 182,
193
+ CUPTI_DRIVER_TRACE_CBID_cuGLSetBufferObjectMapFlags = 183,
194
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync = 184,
195
+ CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObjectAsync = 185,
196
+ CUPTI_DRIVER_TRACE_CBID_cuVDPAUGetDevice = 186,
197
+ CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate = 187,
198
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterVideoSurface = 188,
199
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterOutputSurface = 189,
200
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetSurfRef = 190,
201
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefCreate = 191,
202
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefDestroy = 192,
203
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetFormat = 193,
204
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetArray = 194,
205
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetFormat = 195,
206
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetArray = 196,
207
+ CUPTI_DRIVER_TRACE_CBID_cu64DeviceTotalMem = 197,
208
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPointer = 198,
209
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedSize = 199,
210
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPitch = 200,
211
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetSurfaceDimensions = 201,
212
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetSurfaceDimensions = 202,
213
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPointer = 203,
214
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedSize = 204,
215
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPitch = 205,
216
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9MapVertexBuffer = 206,
217
+ CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObject = 207,
218
+ CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObjectAsync = 208,
219
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevices = 209,
220
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreateOnDevice = 210,
221
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevices = 211,
222
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreateOnDevice = 212,
223
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevices = 213,
224
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreateOnDevice = 214,
225
+ CUPTI_DRIVER_TRACE_CBID_cu64MemHostAlloc = 215,
226
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async = 216,
227
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8Async = 217,
228
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async = 218,
229
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16Async = 219,
230
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async = 220,
231
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32Async = 221,
232
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async = 222,
233
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8Async = 223,
234
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async = 224,
235
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16Async = 225,
236
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async = 226,
237
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32Async = 227,
238
+ CUPTI_DRIVER_TRACE_CBID_cu64ArrayCreate = 228,
239
+ CUPTI_DRIVER_TRACE_CBID_cu64ArrayGetDescriptor = 229,
240
+ CUPTI_DRIVER_TRACE_CBID_cu64Array3DCreate = 230,
241
+ CUPTI_DRIVER_TRACE_CBID_cu64Array3DGetDescriptor = 231,
242
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2D = 232,
243
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DUnaligned = 233,
244
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DAsync = 234,
245
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v2 = 235,
246
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate_v2 = 236,
247
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate_v2 = 237,
248
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate_v2 = 238,
249
+ CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate_v2 = 239,
250
+ CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate_v2 = 240,
251
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal_v2 = 241,
252
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo_v2 = 242,
253
+ CUPTI_DRIVER_TRACE_CBID_cuMemAlloc_v2 = 243,
254
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch_v2 = 244,
255
+ CUPTI_DRIVER_TRACE_CBID_cuMemFree_v2 = 245,
256
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange_v2 = 246,
257
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer_v2 = 247,
258
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy_v2 = 248,
259
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2 = 249,
260
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2 = 250,
261
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2 = 251,
262
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2 = 252,
263
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2 = 253,
264
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2 = 254,
265
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress_v2 = 255,
266
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v2 = 256,
267
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress_v2 = 257,
268
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer_v2 = 258,
269
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem_v2 = 259,
270
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer_v2 = 260,
271
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize_v2 = 261,
272
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch_v2 = 262,
273
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions_v2 = 263,
274
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions_v2 = 264,
275
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer_v2 = 265,
276
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize_v2 = 266,
277
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch_v2 = 267,
278
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer_v2 = 268,
279
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2 = 269,
280
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2 = 270,
281
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc_v2 = 271,
282
+ CUPTI_DRIVER_TRACE_CBID_cuArrayCreate_v2 = 272,
283
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor_v2 = 273,
284
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate_v2 = 274,
285
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor_v2 = 275,
286
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2 = 276,
287
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2 = 277,
288
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2 = 278,
289
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2 = 279,
290
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2 = 280,
291
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2 = 281,
292
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2 = 282,
293
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2 = 283,
294
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2 = 284,
295
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2 = 285,
296
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2 = 286,
297
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2 = 287,
298
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2 = 288,
299
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2 = 289,
300
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2 = 290,
301
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2 = 291,
302
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2 = 292,
303
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2 = 293,
304
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost_v2 = 294,
305
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent = 295,
306
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetApiVersion = 296,
307
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDirect3DDevice = 297,
308
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDirect3DDevice = 298,
309
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetCacheConfig = 299,
310
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetCacheConfig = 300,
311
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister = 301,
312
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostUnregister = 302,
313
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetCurrent = 303,
314
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetCurrent = 304,
315
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy = 305,
316
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync = 306,
317
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel = 307,
318
+ CUPTI_DRIVER_TRACE_CBID_cuProfilerStart = 308,
319
+ CUPTI_DRIVER_TRACE_CBID_cuProfilerStop = 309,
320
+ CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttribute = 310,
321
+ CUPTI_DRIVER_TRACE_CBID_cuProfilerInitialize = 311,
322
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceCanAccessPeer = 312,
323
+ CUPTI_DRIVER_TRACE_CBID_cuCtxEnablePeerAccess = 313,
324
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDisablePeerAccess = 314,
325
+ CUPTI_DRIVER_TRACE_CBID_cuMemPeerRegister = 315,
326
+ CUPTI_DRIVER_TRACE_CBID_cuMemPeerUnregister = 316,
327
+ CUPTI_DRIVER_TRACE_CBID_cuMemPeerGetDevicePointer = 317,
328
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer = 318,
329
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync = 319,
330
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer = 320,
331
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync = 321,
332
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy_v2 = 322,
333
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent_v2 = 323,
334
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent_v2 = 324,
335
+ CUPTI_DRIVER_TRACE_CBID_cuEventDestroy_v2 = 325,
336
+ CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy_v2 = 326,
337
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v3 = 327,
338
+ CUPTI_DRIVER_TRACE_CBID_cuIpcGetMemHandle = 328,
339
+ CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle = 329,
340
+ CUPTI_DRIVER_TRACE_CBID_cuIpcCloseMemHandle = 330,
341
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetByPCIBusId = 331,
342
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetPCIBusId = 332,
343
+ CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices = 333,
344
+ CUPTI_DRIVER_TRACE_CBID_cuIpcGetEventHandle = 334,
345
+ CUPTI_DRIVER_TRACE_CBID_cuIpcOpenEventHandle = 335,
346
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetSharedMemConfig = 336,
347
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetSharedMemConfig = 337,
348
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedMemConfig = 338,
349
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectCreate = 339,
350
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectDestroy = 340,
351
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceDesc = 341,
352
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetTextureDesc = 342,
353
+ CUPTI_DRIVER_TRACE_CBID_cuSurfObjectCreate = 343,
354
+ CUPTI_DRIVER_TRACE_CBID_cuSurfObjectDestroy = 344,
355
+ CUPTI_DRIVER_TRACE_CBID_cuSurfObjectGetResourceDesc = 345,
356
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback = 346,
357
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayCreate = 347,
358
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetLevel = 348,
359
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayDestroy = 349,
360
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmappedArray = 350,
361
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapFilterMode = 351,
362
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelBias = 352,
363
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelClamp = 353,
364
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMaxAnisotropy = 354,
365
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmappedArray = 355,
366
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapFilterMode = 356,
367
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelBias = 357,
368
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelClamp = 358,
369
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMaxAnisotropy = 359,
370
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedMipmappedArray = 360,
371
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceViewDesc = 361,
372
+ CUPTI_DRIVER_TRACE_CBID_cuLinkCreate = 362,
373
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddData = 363,
374
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile = 364,
375
+ CUPTI_DRIVER_TRACE_CBID_cuLinkComplete = 365,
376
+ CUPTI_DRIVER_TRACE_CBID_cuLinkDestroy = 366,
377
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCreateWithPriority = 367,
378
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority = 368,
379
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags = 369,
380
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetStreamPriorityRange = 370,
381
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocManaged = 371,
382
+ CUPTI_DRIVER_TRACE_CBID_cuGetErrorString = 372,
383
+ CUPTI_DRIVER_TRACE_CBID_cuGetErrorName = 373,
384
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessor = 374,
385
+ CUPTI_DRIVER_TRACE_CBID_cuCompilePtx = 375,
386
+ CUPTI_DRIVER_TRACE_CBID_cuBinaryFree = 376,
387
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync = 377,
388
+ CUPTI_DRIVER_TRACE_CBID_cuPointerSetAttribute = 378,
389
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister_v2 = 379,
390
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags_v2 = 380,
391
+ CUPTI_DRIVER_TRACE_CBID_cuLinkCreate_v2 = 381,
392
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddData_v2 = 382,
393
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile_v2 = 383,
394
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSize = 384,
395
+ CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices_v2 = 385,
396
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRetain = 386,
397
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease = 387,
398
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags = 388,
399
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset = 389,
400
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsEGLRegisterImage = 390,
401
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetFlags = 391,
402
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxGetState = 392,
403
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnect = 393,
404
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerDisconnect = 394,
405
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerAcquireFrame = 395,
406
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerReleaseFrame = 396,
407
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2_ptds = 397,
408
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2_ptds = 398,
409
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2_ptds = 399,
410
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2_ptds = 400,
411
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2_ptds = 401,
412
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2_ptds = 402,
413
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2_ptds = 403,
414
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2_ptds = 404,
415
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2_ptds = 405,
416
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2_ptds = 406,
417
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2_ptds = 407,
418
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy_ptds = 408,
419
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer_ptds = 409,
420
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer_ptds = 410,
421
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2_ptds = 411,
422
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2_ptds = 412,
423
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2_ptds = 413,
424
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2_ptds = 414,
425
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2_ptds = 415,
426
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2_ptds = 416,
427
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2_ptds = 417,
428
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync_ptsz = 418,
429
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2_ptsz = 419,
430
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2_ptsz = 420,
431
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2_ptsz = 421,
432
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2_ptsz = 422,
433
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2_ptsz = 423,
434
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2_ptsz = 424,
435
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2_ptsz = 425,
436
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync_ptsz = 426,
437
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync_ptsz = 427,
438
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async_ptsz = 428,
439
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async_ptsz = 429,
440
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async_ptsz = 430,
441
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async_ptsz = 431,
442
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async_ptsz = 432,
443
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async_ptsz = 433,
444
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority_ptsz = 434,
445
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags_ptsz = 435,
446
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent_ptsz = 436,
447
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback_ptsz = 437,
448
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync_ptsz = 438,
449
+ CUPTI_DRIVER_TRACE_CBID_cuStreamQuery_ptsz = 439,
450
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize_ptsz = 440,
451
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecord_ptsz = 441,
452
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel_ptsz = 442,
453
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources_ptsz = 443,
454
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources_ptsz = 444,
455
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2_ptsz = 445,
456
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerConnect = 446,
457
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerDisconnect = 447,
458
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerPresentFrame = 448,
459
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedEglFrame = 449,
460
+ CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttributes = 450,
461
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags = 451,
462
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSizeWithFlags = 452,
463
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerReturnFrame = 453,
464
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetP2PAttribute = 454,
465
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetBorderColor = 455,
466
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetBorderColor = 456,
467
+ CUPTI_DRIVER_TRACE_CBID_cuMemAdvise = 457,
468
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32 = 458,
469
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_ptsz = 459,
470
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32 = 460,
471
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_ptsz = 461,
472
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp = 462,
473
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_ptsz = 463,
474
+ CUPTI_DRIVER_TRACE_CBID_cuNVNbufferGetPointer = 464,
475
+ CUPTI_DRIVER_TRACE_CBID_cuNVNtextureGetArray = 465,
476
+ CUPTI_DRIVER_TRACE_CBID_cuNNSetAllocator = 466,
477
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync = 467,
478
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_ptsz = 468,
479
+ CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromNVNSync = 469,
480
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnectWithFlags = 470,
481
+ CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttribute = 471,
482
+ CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttributes = 472,
483
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64 = 473,
484
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_ptsz = 474,
485
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64 = 475,
486
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_ptsz = 476,
487
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel = 477,
488
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel_ptsz = 478,
489
+ CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromEGLSync = 479,
490
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernelMultiDevice = 480,
491
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetAttribute = 481,
492
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid = 482,
493
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx = 483,
494
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx_ptsz = 484,
495
+ CUPTI_DRIVER_TRACE_CBID_cuImportExternalMemory = 485,
496
+ CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedBuffer = 486,
497
+ CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedMipmappedArray = 487,
498
+ CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalMemory = 488,
499
+ CUPTI_DRIVER_TRACE_CBID_cuImportExternalSemaphore = 489,
500
+ CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync = 490,
501
+ CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync_ptsz = 491,
502
+ CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync = 492,
503
+ CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync_ptsz = 493,
504
+ CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalSemaphore = 494,
505
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture = 495,
506
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_ptsz = 496,
507
+ CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture = 497,
508
+ CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture_ptsz = 498,
509
+ CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing = 499,
510
+ CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing_ptsz = 500,
511
+ CUPTI_DRIVER_TRACE_CBID_cuGraphCreate = 501,
512
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode = 502,
513
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams = 503,
514
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemcpyNode = 504,
515
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeGetParams = 505,
516
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemsetNode = 506,
517
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeGetParams = 507,
518
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeSetParams = 508,
519
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetType = 509,
520
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetRootNodes = 510,
521
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependencies = 511,
522
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependentNodes = 512,
523
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate = 513,
524
+ CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch = 514,
525
+ CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch_ptsz = 515,
526
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecDestroy = 516,
527
+ CUPTI_DRIVER_TRACE_CBID_cuGraphDestroy = 517,
528
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddDependencies = 518,
529
+ CUPTI_DRIVER_TRACE_CBID_cuGraphRemoveDependencies = 519,
530
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeSetParams = 520,
531
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams = 521,
532
+ CUPTI_DRIVER_TRACE_CBID_cuGraphDestroyNode = 522,
533
+ CUPTI_DRIVER_TRACE_CBID_cuGraphClone = 523,
534
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeFindInClone = 524,
535
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddChildGraphNode = 525,
536
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddEmptyNode = 526,
537
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc = 527,
538
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc_ptsz = 528,
539
+ CUPTI_DRIVER_TRACE_CBID_cuGraphChildGraphNodeGetGraph = 529,
540
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddHostNode = 530,
541
+ CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeGetParams = 531,
542
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetLuid = 532,
543
+ CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeSetParams = 533,
544
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetNodes = 534,
545
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetEdges = 535,
546
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo = 536,
547
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_ptsz = 537,
548
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams = 538,
549
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2 = 539,
550
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2_ptsz = 540,
551
+ CUPTI_DRIVER_TRACE_CBID_cuThreadExchangeStreamCaptureMode = 541,
552
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetNvSciSyncAttributes = 542,
553
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyAvailableDynamicSMemPerBlock = 543,
554
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease_v2 = 544,
555
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset_v2 = 545,
556
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags_v2 = 546,
557
+ CUPTI_DRIVER_TRACE_CBID_cuMemAddressReserve = 547,
558
+ CUPTI_DRIVER_TRACE_CBID_cuMemAddressFree = 548,
559
+ CUPTI_DRIVER_TRACE_CBID_cuMemCreate = 549,
560
+ CUPTI_DRIVER_TRACE_CBID_cuMemRelease = 550,
561
+ CUPTI_DRIVER_TRACE_CBID_cuMemMap = 551,
562
+ CUPTI_DRIVER_TRACE_CBID_cuMemUnmap = 552,
563
+ CUPTI_DRIVER_TRACE_CBID_cuMemSetAccess = 553,
564
+ CUPTI_DRIVER_TRACE_CBID_cuMemExportToShareableHandle = 554,
565
+ CUPTI_DRIVER_TRACE_CBID_cuMemImportFromShareableHandle = 555,
566
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationGranularity = 556,
567
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationPropertiesFromHandle = 557,
568
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAccess = 558,
569
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags = 559,
570
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags_ptsz = 560,
571
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate = 561,
572
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemcpyNodeSetParams = 562,
573
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemsetNodeSetParams = 563,
574
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecHostNodeSetParams = 564,
575
+ CUPTI_DRIVER_TRACE_CBID_cuMemRetainAllocationHandle = 565,
576
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetModule = 566,
577
+ CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle_v2 = 567,
578
+ CUPTI_DRIVER_TRACE_CBID_cuCtxResetPersistingL2Cache = 568,
579
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeCopyAttributes = 569,
580
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetAttribute = 570,
581
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetAttribute = 571,
582
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes = 572,
583
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes_ptsz = 573,
584
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute = 574,
585
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute_ptsz = 575,
586
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute = 576,
587
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute_ptsz = 577,
588
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate_v2 = 578,
589
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetTexture1DLinearMaxWidth = 579,
590
+ CUPTI_DRIVER_TRACE_CBID_cuGraphUpload = 580,
591
+ CUPTI_DRIVER_TRACE_CBID_cuGraphUpload_ptsz = 581,
592
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetSparseProperties = 582,
593
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetSparseProperties = 583,
594
+ CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync = 584,
595
+ CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync_ptsz = 585,
596
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecChildGraphNodeSetParams = 586,
597
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags = 587,
598
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags_ptsz = 588,
599
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventRecordNode = 589,
600
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventWaitNode = 590,
601
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeGetEvent = 591,
602
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeGetEvent = 592,
603
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeSetEvent = 593,
604
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeSetEvent = 594,
605
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventRecordNodeSetEvent = 595,
606
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventWaitNodeSetEvent = 596,
607
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetPlane = 597,
608
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync = 598,
609
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync_ptsz = 599,
610
+ CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync = 600,
611
+ CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync_ptsz = 601,
612
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolTrimTo = 602,
613
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAttribute = 603,
614
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAttribute = 604,
615
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAccess = 605,
616
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetDefaultMemPool = 606,
617
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolCreate = 607,
618
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolDestroy = 608,
619
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceSetMemPool = 609,
620
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetMemPool = 610,
621
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync = 611,
622
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync_ptsz = 612,
623
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportToShareableHandle = 613,
624
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportFromShareableHandle = 614,
625
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportPointer = 615,
626
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportPointer = 616,
627
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAccess = 617,
628
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresSignalNode = 618,
629
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeGetParams = 619,
630
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeSetParams = 620,
631
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresWaitNode = 621,
632
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeGetParams = 622,
633
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeSetParams = 623,
634
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresSignalNodeSetParams = 624,
635
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresWaitNodeSetParams = 625,
636
+ CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress = 626,
637
+ CUPTI_DRIVER_TRACE_CBID_cuFlushGPUDirectRDMAWrites = 627,
638
+ CUPTI_DRIVER_TRACE_CBID_cuGraphDebugDotPrint = 628,
639
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2 = 629,
640
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2_ptsz = 630,
641
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies = 631,
642
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_ptsz = 632,
643
+ CUPTI_DRIVER_TRACE_CBID_cuUserObjectCreate = 633,
644
+ CUPTI_DRIVER_TRACE_CBID_cuUserObjectRetain = 634,
645
+ CUPTI_DRIVER_TRACE_CBID_cuUserObjectRelease = 635,
646
+ CUPTI_DRIVER_TRACE_CBID_cuGraphRetainUserObject = 636,
647
+ CUPTI_DRIVER_TRACE_CBID_cuGraphReleaseUserObject = 637,
648
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemAllocNode = 638,
649
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemFreeNode = 639,
650
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGraphMemTrim = 640,
651
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetGraphMemAttribute = 641,
652
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceSetGraphMemAttribute = 642,
653
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithFlags = 643,
654
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetExecAffinitySupport = 644,
655
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v3 = 645,
656
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetExecAffinity = 646,
657
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid_v2 = 647,
658
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemAllocNodeGetParams = 648,
659
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemFreeNodeGetParams = 649,
660
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeSetEnabled = 650,
661
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetEnabled = 651,
662
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx = 652,
663
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx_ptsz = 653,
664
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetMemoryRequirements = 654,
665
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetMemoryRequirements = 655,
666
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams = 656,
667
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams_ptsz = 657,
668
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecGetFlags = 658,
669
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2 = 659,
670
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2_ptsz = 660,
671
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2 = 661,
672
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2_ptsz = 662,
673
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2 = 663,
674
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2_ptsz = 664,
675
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2 = 665,
676
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2_ptsz = 666,
677
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2 = 667,
678
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2_ptsz = 668,
679
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddBatchMemOpNode = 669,
680
+ CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeGetParams = 670,
681
+ CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeSetParams = 671,
682
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecBatchMemOpNodeSetParams = 672,
683
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetLoadingMode = 673,
684
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetHandleForAddressRange = 674,
685
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialClusterSize = 675,
686
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveClusters = 676,
687
+ CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress_v2 = 677,
688
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadData = 678,
689
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadFromFile = 679,
690
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryUnload = 680,
691
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetKernel = 681,
692
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetModule = 682,
693
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetFunction = 683,
694
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetGlobal = 684,
695
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetManaged = 685,
696
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetAttribute = 686,
697
+ CUPTI_DRIVER_TRACE_CBID_cuKernelSetAttribute = 687,
698
+ CUPTI_DRIVER_TRACE_CBID_cuKernelSetCacheConfig = 688,
699
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode_v2 = 689,
700
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams_v2 = 690,
701
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams_v2 = 691,
702
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams_v2 = 692,
703
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetId = 693,
704
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetId_ptsz = 694,
705
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetId = 695,
706
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate_v2 = 696,
707
+ CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeTiled = 697,
708
+ CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeIm2col = 698,
709
+ CUPTI_DRIVER_TRACE_CBID_cuTensorMapReplaceAddress = 699,
710
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetUnifiedFunction = 700,
711
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttribute = 701,
712
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttributeGlobal = 702,
713
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttribute = 703,
714
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttributeGlobal = 704,
715
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetFlags = 705,
716
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastCreate = 706,
717
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastAddDevice = 707,
718
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastBindMem = 708,
719
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastBindAddr = 709,
720
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastUnbind = 710,
721
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastGetGranularity = 711,
722
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddNode = 712,
723
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeSetParams = 713,
724
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecNodeSetParams = 714,
725
+ CUPTI_DRIVER_TRACE_CBID_cuMemAdvise_v2 = 715,
726
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_v2 = 716,
727
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_v2_ptsz = 717,
728
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetName = 718,
729
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetName = 719,
730
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCaptureToGraph = 720,
731
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCaptureToGraph_ptsz = 721,
732
+ CUPTI_DRIVER_TRACE_CBID_cuGraphConditionalHandleCreate = 722,
733
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddNode_v2 = 723,
734
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetEdges_v2 = 724,
735
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependencies_v2 = 725,
736
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependentNodes_v2 = 726,
737
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddDependencies_v2 = 727,
738
+ CUPTI_DRIVER_TRACE_CBID_cuGraphRemoveDependencies_v2 = 728,
739
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v3 = 729,
740
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v3_ptsz = 730,
741
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_v2 = 731,
742
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_v2_ptsz = 732,
743
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetParamInfo = 733,
744
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetParamInfo = 734,
745
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceRegisterAsyncNotification = 735,
746
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceUnregisterAsyncNotification = 736,
747
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetFunctionCount = 737,
748
+ CUPTI_DRIVER_TRACE_CBID_cuModuleEnumerateFunctions = 738,
749
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetKernelCount = 739,
750
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryEnumerateKernels = 740,
751
+ CUPTI_DRIVER_TRACE_CBID_cuFuncIsLoaded = 741,
752
+ CUPTI_DRIVER_TRACE_CBID_cuFuncLoad = 742,
753
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxCreate = 743,
754
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxDestroy = 744,
755
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetDevResource = 745,
756
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetDevResource = 746,
757
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxGetDevResource = 747,
758
+ CUPTI_DRIVER_TRACE_CBID_cuDevResourceGenerateDesc = 748,
759
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxRecordEvent = 749,
760
+ CUPTI_DRIVER_TRACE_CBID_cuGreenCtxWaitEvent = 750,
761
+ CUPTI_DRIVER_TRACE_CBID_cuDevSmResourceSplitByCount = 751,
762
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetGreenCtx = 752,
763
+ CUPTI_DRIVER_TRACE_CBID_cuCtxFromGreenCtx = 753,
764
+ CUPTI_DRIVER_TRACE_CBID_SIZE = 754,
765
+ CUPTI_DRIVER_TRACE_CBID_FORCE_INT = 0x7fffffff
766
+ } CUpti_driver_api_trace_cbid;
767
+
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_nvtx_cbid.h ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2013-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
51
+ #pragma GCC visibility push(default)
52
+ #endif
53
+
54
+ typedef enum {
55
+ CUPTI_CBID_NVTX_INVALID = 0,
56
+ CUPTI_CBID_NVTX_nvtxMarkA = 1,
57
+ CUPTI_CBID_NVTX_nvtxMarkW = 2,
58
+ CUPTI_CBID_NVTX_nvtxMarkEx = 3,
59
+ CUPTI_CBID_NVTX_nvtxRangeStartA = 4,
60
+ CUPTI_CBID_NVTX_nvtxRangeStartW = 5,
61
+ CUPTI_CBID_NVTX_nvtxRangeStartEx = 6,
62
+ CUPTI_CBID_NVTX_nvtxRangeEnd = 7,
63
+ CUPTI_CBID_NVTX_nvtxRangePushA = 8,
64
+ CUPTI_CBID_NVTX_nvtxRangePushW = 9,
65
+ CUPTI_CBID_NVTX_nvtxRangePushEx = 10,
66
+ CUPTI_CBID_NVTX_nvtxRangePop = 11,
67
+ CUPTI_CBID_NVTX_nvtxNameCategoryA = 12,
68
+ CUPTI_CBID_NVTX_nvtxNameCategoryW = 13,
69
+ CUPTI_CBID_NVTX_nvtxNameOsThreadA = 14,
70
+ CUPTI_CBID_NVTX_nvtxNameOsThreadW = 15,
71
+ CUPTI_CBID_NVTX_nvtxNameCuDeviceA = 16,
72
+ CUPTI_CBID_NVTX_nvtxNameCuDeviceW = 17,
73
+ CUPTI_CBID_NVTX_nvtxNameCuContextA = 18,
74
+ CUPTI_CBID_NVTX_nvtxNameCuContextW = 19,
75
+ CUPTI_CBID_NVTX_nvtxNameCuStreamA = 20,
76
+ CUPTI_CBID_NVTX_nvtxNameCuStreamW = 21,
77
+ CUPTI_CBID_NVTX_nvtxNameCuEventA = 22,
78
+ CUPTI_CBID_NVTX_nvtxNameCuEventW = 23,
79
+ CUPTI_CBID_NVTX_nvtxNameCudaDeviceA = 24,
80
+ CUPTI_CBID_NVTX_nvtxNameCudaDeviceW = 25,
81
+ CUPTI_CBID_NVTX_nvtxNameCudaStreamA = 26,
82
+ CUPTI_CBID_NVTX_nvtxNameCudaStreamW = 27,
83
+ CUPTI_CBID_NVTX_nvtxNameCudaEventA = 28,
84
+ CUPTI_CBID_NVTX_nvtxNameCudaEventW = 29,
85
+ CUPTI_CBID_NVTX_nvtxDomainMarkEx = 30,
86
+ CUPTI_CBID_NVTX_nvtxDomainRangeStartEx = 31,
87
+ CUPTI_CBID_NVTX_nvtxDomainRangeEnd = 32,
88
+ CUPTI_CBID_NVTX_nvtxDomainRangePushEx = 33,
89
+ CUPTI_CBID_NVTX_nvtxDomainRangePop = 34,
90
+ CUPTI_CBID_NVTX_nvtxDomainResourceCreate = 35,
91
+ CUPTI_CBID_NVTX_nvtxDomainResourceDestroy = 36,
92
+ CUPTI_CBID_NVTX_nvtxDomainNameCategoryA = 37,
93
+ CUPTI_CBID_NVTX_nvtxDomainNameCategoryW = 38,
94
+ CUPTI_CBID_NVTX_nvtxDomainRegisterStringA = 39,
95
+ CUPTI_CBID_NVTX_nvtxDomainRegisterStringW = 40,
96
+ CUPTI_CBID_NVTX_nvtxDomainCreateA = 41,
97
+ CUPTI_CBID_NVTX_nvtxDomainCreateW = 42,
98
+ CUPTI_CBID_NVTX_nvtxDomainDestroy = 43,
99
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserCreate = 44,
100
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserDestroy = 45,
101
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserAcquireStart = 46,
102
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserAcquireFailed = 47,
103
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserAcquireSuccess = 48,
104
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserReleasing = 49,
105
+ CUPTI_CBID_NVTX_SIZE,
106
+ CUPTI_CBID_NVTX_FORCE_INT = 0x7fffffff
107
+ } CUpti_nvtx_api_trace_cbid;
108
+
109
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
110
+ #pragma GCC visibility pop
111
+ #endif
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling.h ADDED
@@ -0,0 +1,936 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_PCSAMPLING_H_)
51
+ #define _CUPTI_PCSAMPLING_H_
52
+
53
+ #include <cuda.h>
54
+ #include <stdint.h>
55
+ #include <stddef.h>
56
+ #include "cupti_result.h"
57
+ #include "cupti_common.h"
58
+
59
+
60
+ #if defined(__cplusplus)
61
+ extern "C" {
62
+ #endif
63
+
64
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
65
+ #pragma GCC visibility push(default)
66
+ #endif
67
+
68
+ /**
69
+ * \defgroup CUPTI_PCSAMPLING_API CUPTI PC Sampling API
70
+ * Functions, types, and enums that implement the CUPTI PC Sampling API.
71
+ * @{
72
+ */
73
+
74
+ #ifndef CUPTI_PCSAMPLING_STRUCT_SIZE
75
+ #define CUPTI_PCSAMPLING_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
76
+ #endif
77
+
78
+ #ifndef CUPTI_STALL_REASON_STRING_SIZE
79
+ #define CUPTI_STALL_REASON_STRING_SIZE 128
80
+ #endif
81
+
82
+ /**
83
+ * \brief PC Sampling collection mode
84
+ */
85
+ typedef enum
86
+ {
87
+ /**
88
+ * INVALID Value
89
+ */
90
+ CUPTI_PC_SAMPLING_COLLECTION_MODE_INVALID = 0,
91
+ /**
92
+ * Continuous mode. Kernels are not serialized in this mode.
93
+ */
94
+ CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS = 1,
95
+ /**
96
+ * Serialized mode. Kernels are serialized in this mode.
97
+ */
98
+ CUPTI_PC_SAMPLING_COLLECTION_MODE_KERNEL_SERIALIZED = 2,
99
+ } CUpti_PCSamplingCollectionMode;
100
+
101
+ /**
102
+ * \brief PC Sampling stall reasons
103
+ */
104
+ typedef struct PACKED_ALIGNMENT
105
+ {
106
+ /**
107
+ * [r] Collected stall reason index
108
+ */
109
+ uint32_t pcSamplingStallReasonIndex;
110
+ /**
111
+ * [r] Number of times the PC was sampled with the stallReason.
112
+ */
113
+ uint32_t samples;
114
+ } CUpti_PCSamplingStallReason;
115
+
116
+ /**
117
+ * \brief PC Sampling data
118
+ */
119
+ typedef struct PACKED_ALIGNMENT
120
+ {
121
+ /**
122
+ * [w] Size of the data structure.
123
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
124
+ * available in the structure. Used to preserve backward compatibility.
125
+ */
126
+ size_t size;
127
+ /**
128
+ * [r] Unique cubin id
129
+ */
130
+ uint64_t cubinCrc;
131
+ /**
132
+ * [r] PC offset
133
+ */
134
+ uint64_t pcOffset;
135
+ /**
136
+ * The function's unique symbol index in the module.
137
+ */
138
+ uint32_t functionIndex;
139
+ /**
140
+ * Padding
141
+ */
142
+ uint32_t pad;
143
+ /**
144
+ * [r] The function name. This name string might be shared across all the records
145
+ * including records from activity APIs representing the same function, and so it should not be
146
+ * modified or freed until post processing of all the records is done. Once done, it is user’s responsibility to
147
+ * free the memory using free() function.
148
+ */
149
+ char* functionName;
150
+ /**
151
+ * [r] Collected stall reason count
152
+ */
153
+ size_t stallReasonCount;
154
+ /**
155
+ * [r] Stall reason id
156
+ * Total samples
157
+ */
158
+ CUpti_PCSamplingStallReason *stallReason;
159
+ /**
160
+ * The correlation ID of the kernel to which this result is associated. Only valid for serialized mode of pc sampling collection.
161
+ * For continous mode of collection the correlationId will be set to 0.
162
+ */
163
+ uint32_t correlationId;
164
+ } CUpti_PCSamplingPCData;
165
+
166
+ /**
167
+ * \brief PC Sampling output data format
168
+ */
169
+ typedef enum
170
+ {
171
+ CUPTI_PC_SAMPLING_OUTPUT_DATA_FORMAT_INVALID = 0,
172
+ /**
173
+ * HW buffer data will be parsed during collection of data
174
+ */
175
+ CUPTI_PC_SAMPLING_OUTPUT_DATA_FORMAT_PARSED = 1,
176
+ } CUpti_PCSamplingOutputDataFormat;
177
+
178
+ /**
179
+ * \brief Collected PC Sampling data
180
+ *
181
+ */
182
+ typedef struct PACKED_ALIGNMENT
183
+ {
184
+ /**
185
+ * [w] Size of the data structure.
186
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
187
+ * available in the structure. Used to preserve backward compatibility.
188
+ */
189
+ size_t size;
190
+ /**
191
+ * [w] Number of PCs to be collected
192
+ */
193
+ size_t collectNumPcs;
194
+ /**
195
+ * [r] Number of samples collected across all PCs.
196
+ * It includes samples for user modules, samples for non-user kernels and dropped samples.
197
+ * It includes counts for all non selected stall reasons.
198
+ * CUPTI does not provide PC records for non-user kernels.
199
+ * CUPTI does not provide PC records for instructions for which all selected stall reason metrics counts are zero.
200
+ */
201
+ uint64_t totalSamples;
202
+ /**
203
+ * [r] Number of samples that were dropped by hardware due to backpressure/overflow.
204
+ */
205
+ uint64_t droppedSamples;
206
+ /**
207
+ * [r] Number of PCs collected
208
+ */
209
+ size_t totalNumPcs;
210
+ /**
211
+ * [r] Number of PCs available for collection
212
+ */
213
+ size_t remainingNumPcs;
214
+ /**
215
+ * [r] Unique identifier for each range.
216
+ * Data collected across multiple ranges in multiple buffers can be identified using range id.
217
+ */
218
+ uint64_t rangeId;
219
+ /**
220
+ * [r] Profiled PC data
221
+ * This data struct should have enough memory to collect number of PCs mentioned in \brief collectNumPcs
222
+ */
223
+ CUpti_PCSamplingPCData *pPcData;
224
+ /**
225
+ * [r] Number of samples collected across all non user kernels PCs.
226
+ * It includes samples for non-user kernels.
227
+ * It includes counts for all non selected stall reasons as well.
228
+ * CUPTI does not provide PC records for non-user kernels.
229
+ */
230
+ uint64_t nonUsrKernelsTotalSamples;
231
+
232
+ /**
233
+ * [r] Status of the hardware buffer.
234
+ * CUPTI returns the error code CUPTI_ERROR_OUT_OF_MEMORY when hardware buffer is full.
235
+ * When hardware buffer is full, user will get pc data as 0. To mitigate this issue, one or more of the below options can be tried:
236
+ * 1. Increase the hardware buffer size using the attribute CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_HARDWARE_BUFFER_SIZE
237
+ * 2. Decrease the thread sleep span using the attribute CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_WORKER_THREAD_PERIODIC_SLEEP_SPAN
238
+ * 3. Decrease the sampling frequency using the attribute CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_PERIOD
239
+ */
240
+ uint8_t hardwareBufferFull;
241
+ } CUpti_PCSamplingData;
242
+
243
+ /**
244
+ * \brief PC Sampling configuration attributes
245
+ *
246
+ * PC Sampling configuration attribute types. These attributes can be read
247
+ * using \ref cuptiPCSamplingGetConfigurationAttribute and can be written
248
+ * using \ref cuptiPCSamplingSetConfigurationAttribute. Attributes marked
249
+ * [r] can only be read using \ref cuptiPCSamplingGetConfigurationAttribute
250
+ * [w] can only be written using \ref cuptiPCSamplingSetConfigurationAttribute
251
+ * [rw] can be read using \ref cuptiPCSamplingGetConfigurationAttribute and
252
+ * written using \ref cuptiPCSamplingSetConfigurationAttribute
253
+ */
254
+ typedef enum
255
+ {
256
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_INVALID = 0,
257
+ /**
258
+ * [rw] Sampling period for PC Sampling.
259
+ * DEFAULT - CUPTI defined value based on number of SMs
260
+ * Valid values for the sampling
261
+ * periods are between 5 to 31 both inclusive. This will set the
262
+ * sampling period to (2^samplingPeriod) cycles.
263
+ * For e.g. for sampling period = 5 to 31, cycles = 32, 64, 128,..., 2^31
264
+ * Value is a uint32_t
265
+ */
266
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_PERIOD = 1,
267
+ /**
268
+ * [w] Number of stall reasons to collect.
269
+ * DEFAULT - All stall reasons will be collected
270
+ * Value is a size_t
271
+ * [w] Stall reasons to collect
272
+ * DEFAULT - All stall reasons will be collected
273
+ * Input value should be a pointer pointing to array of stall reason indexes
274
+ * containing all the stall reason indexes to collect.
275
+ */
276
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_STALL_REASON = 2,
277
+ /**
278
+ * [rw] Size of SW buffer for raw PC counter data downloaded from HW buffer
279
+ * DEFAULT - 1 MB, which can accommodate approximately 5500 PCs
280
+ * with all stall reasons
281
+ * Approximately it takes 16 Bytes (and some fixed size memory)
282
+ * to accommodate one PC with one stall reason
283
+ * For e.g. 1 PC with 1 stall reason = 32 Bytes
284
+ * 1 PC with 2 stall reason = 48 Bytes
285
+ * 1 PC with 4 stall reason = 96 Bytes
286
+ * Value is a size_t
287
+ */
288
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SCRATCH_BUFFER_SIZE = 3,
289
+ /**
290
+ * [rw] Size of HW buffer in bytes
291
+ * DEFAULT - 512 MB
292
+ * If sampling period is too less, HW buffer can overflow
293
+ * and drop PC data
294
+ * Value is a size_t
295
+ */
296
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_HARDWARE_BUFFER_SIZE = 4,
297
+ /**
298
+ * [rw] PC Sampling collection mode
299
+ * DEFAULT - CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS
300
+ * Input value should be of type \ref CUpti_PCSamplingCollectionMode.
301
+ */
302
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_COLLECTION_MODE = 5,
303
+ /**
304
+ * [rw] Control over PC Sampling data collection range
305
+ * Default - 0
306
+ * 1 - Allows user to start and stop PC Sampling using APIs -
307
+ * \ref cuptiPCSamplingStart() - Start PC Sampling
308
+ * \ref cuptiPCSamplingStop() - Stop PC Sampling
309
+ * Value is a uint32_t
310
+ */
311
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL = 6,
312
+ /**
313
+ * [w] Value for output data format
314
+ * Default - CUPTI_PC_SAMPLING_OUTPUT_DATA_FORMAT_PARSED
315
+ * Input value should be of type \ref CUpti_PCSamplingOutputDataFormat.
316
+ */
317
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_OUTPUT_DATA_FORMAT = 7,
318
+ /**
319
+ * [w] Data buffer to hold collected PC Sampling data PARSED_DATA
320
+ * Default - none.
321
+ * Buffer type is void * which can point to PARSED_DATA
322
+ * Refer \ref CUpti_PCSamplingData for buffer format for PARSED_DATA
323
+ */
324
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_DATA_BUFFER = 8,
325
+ /**
326
+ * [rw] Control sleep time of the worker threads created by CUPTI for various PC sampling operations.
327
+ * CUPTI creates multiple worker threads to offload certain operations to these threads. This includes decoding of HW data to
328
+ * the CUPTI PC sampling data and correlating PC data to SASS instructions. CUPTI wakes up these threads periodically.
329
+ * Default - 100 milliseconds.
330
+ * Value is a uint32_t
331
+ */
332
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_WORKER_THREAD_PERIODIC_SLEEP_SPAN = 9,
333
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_FORCE_INT = 0x7fffffff,
334
+ } CUpti_PCSamplingConfigurationAttributeType;
335
+
336
+ /**
337
+ * \brief PC sampling configuration information structure
338
+ *
339
+ * This structure provides \ref CUpti_PCSamplingConfigurationAttributeType which can be configured
340
+ * or queried for PC sampling configuration
341
+ */
342
+ typedef struct
343
+ {
344
+ /**
345
+ * Refer \ref CUpti_PCSamplingConfigurationAttributeType for all supported attribute types
346
+ */
347
+ CUpti_PCSamplingConfigurationAttributeType attributeType;
348
+ /*
349
+ * Configure or query status for \p attributeType
350
+ * CUPTI_SUCCESS for valid \p attributeType and \p attributeData
351
+ * CUPTI_ERROR_INVALID_OPERATION if \p attributeData is not valid
352
+ * CUPTI_ERROR_INVALID_PARAMETER if \p attributeType is not valid
353
+ */
354
+ CUptiResult attributeStatus;
355
+ union
356
+ {
357
+ /**
358
+ * Invalid Value
359
+ */
360
+ struct
361
+ {
362
+ uint64_t data[3];
363
+ } invalidData;
364
+ /**
365
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_PERIOD
366
+ */
367
+ struct
368
+ {
369
+ uint32_t samplingPeriod;
370
+ } samplingPeriodData;
371
+ /**
372
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_STALL_REASON
373
+ */
374
+ struct
375
+ {
376
+ size_t stallReasonCount;
377
+ uint32_t *pStallReasonIndex;
378
+ } stallReasonData;
379
+ /**
380
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SCRATCH_BUFFER_SIZE
381
+ */
382
+ struct
383
+ {
384
+ size_t scratchBufferSize;
385
+ } scratchBufferSizeData;
386
+ /**
387
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_HARDWARE_BUFFER_SIZE
388
+ */
389
+ struct
390
+ {
391
+ size_t hardwareBufferSize;
392
+ } hardwareBufferSizeData;
393
+ /**
394
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_COLLECTION_MODE
395
+ */
396
+ struct
397
+ {
398
+ CUpti_PCSamplingCollectionMode collectionMode;
399
+ } collectionModeData;
400
+ /**
401
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL
402
+ */
403
+ struct
404
+ {
405
+ uint32_t enableStartStopControl;
406
+ } enableStartStopControlData;
407
+ /**
408
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_OUTPUT_DATA_FORMAT
409
+ */
410
+ struct
411
+ {
412
+ CUpti_PCSamplingOutputDataFormat outputDataFormat;
413
+ } outputDataFormatData;
414
+ /**
415
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_DATA_BUFFER
416
+ */
417
+ struct
418
+ {
419
+ void *samplingDataBuffer;
420
+ } samplingDataBufferData;
421
+ /**
422
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_WORKER_THREAD_PERIODIC_SLEEP_SPAN
423
+ */
424
+ struct
425
+ {
426
+ uint32_t workerThreadPeriodicSleepSpan;
427
+ } workerThreadPeriodicSleepSpanData;
428
+
429
+ } attributeData;
430
+ } CUpti_PCSamplingConfigurationInfo;
431
+
432
+ /**
433
+ * \brief PC sampling configuration structure
434
+ *
435
+ * This structure configures PC sampling using \ref cuptiPCSamplingSetConfigurationAttribute
436
+ * and queries PC sampling default configuration using \ref cuptiPCSamplingGetConfigurationAttribute
437
+ */
438
+ typedef struct
439
+ {
440
+ /**
441
+ * [w] Size of the data structure i.e. CUpti_PCSamplingConfigurationInfoParamsSize
442
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
443
+ * available in the structure. Used to preserve backward compatibility.
444
+ */
445
+ size_t size;
446
+ /**
447
+ * [w] Assign to NULL
448
+ */
449
+ void* pPriv;
450
+ /**
451
+ * [w] CUcontext
452
+ */
453
+ CUcontext ctx;
454
+ /**
455
+ * [w] Number of attributes to configure using \ref cuptiPCSamplingSetConfigurationAttribute or query
456
+ * using \ref cuptiPCSamplingGetConfigurationAttribute
457
+ */
458
+ size_t numAttributes;
459
+ /**
460
+ * Refer \ref CUpti_PCSamplingConfigurationInfo
461
+ */
462
+ CUpti_PCSamplingConfigurationInfo *pPCSamplingConfigurationInfo;
463
+ } CUpti_PCSamplingConfigurationInfoParams;
464
+ #define CUpti_PCSamplingConfigurationInfoParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingConfigurationInfoParams,pPCSamplingConfigurationInfo)
465
+
466
+ /**
467
+ * \brief Write PC Sampling configuration attribute.
468
+ *
469
+ * \param pParams A pointer to \ref CUpti_PCSamplingConfigurationInfoParams
470
+ * containing PC sampling configuration.
471
+ *
472
+ * \retval CUPTI_SUCCESS
473
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
474
+ * some invalid \p attrib.
475
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if attribute \p value is not valid
476
+ * or any \p pParams is not valid
477
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
478
+ * does not support the API
479
+ */
480
+ CUptiResult CUPTIAPI cuptiPCSamplingSetConfigurationAttribute(CUpti_PCSamplingConfigurationInfoParams *pParams);
481
+
482
+ /**
483
+ * \brief Read PC Sampling configuration attribute.
484
+ *
485
+ * \param pParams A pointer to \ref CUpti_PCSamplingConfigurationInfoParams
486
+ * containing PC sampling configuration.
487
+ *
488
+ * \retval CUPTI_SUCCESS
489
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
490
+ * some invalid attribute.
491
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p attrib is not valid
492
+ * or any \p pParams is not valid
493
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT indicates that
494
+ * the \p value buffer is too small to hold the attribute value
495
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
496
+ * does not support the API
497
+ */
498
+ CUptiResult CUPTIAPI cuptiPCSamplingGetConfigurationAttribute(CUpti_PCSamplingConfigurationInfoParams *pParams);
499
+
500
+ /**
501
+ * \brief Params for cuptiPCSamplingEnable
502
+ */
503
+ typedef struct
504
+ {
505
+ /**
506
+ * [w] Size of the data structure i.e. CUpti_PCSamplingGetDataParamsSize
507
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
508
+ * available in the structure. Used to preserve backward compatibility.
509
+ */
510
+ size_t size;
511
+ /**
512
+ * [w] Assign to NULL
513
+ */
514
+ void* pPriv;
515
+ /**
516
+ * [w] CUcontext
517
+ */
518
+ CUcontext ctx;
519
+ /**
520
+ * \param pcSamplingData Data buffer to hold collected PC Sampling data PARSED_DATA
521
+ * Buffer type is void * which can point to PARSED_DATA
522
+ * Refer \ref CUpti_PCSamplingData for buffer format for PARSED_DATA
523
+ */
524
+ void *pcSamplingData;
525
+ } CUpti_PCSamplingGetDataParams;
526
+ #define CUpti_PCSamplingGetDataParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingGetDataParams, pcSamplingData)
527
+ /**
528
+ * \brief Flush GPU PC sampling data periodically.
529
+ *
530
+ * Flushing of GPU PC Sampling data is required at following point to maintain uniqueness of PCs:
531
+ * For \brief CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS, after every module load-unload-load
532
+ * For \brief CUPTI_PC_SAMPLING_COLLECTION_MODE_KERNEL_SERIALIZED, after every kernel ends
533
+ * If configuration option \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL
534
+ * is enabled, then after every range end i.e. \brief cuptiPCSamplingStop()
535
+ *
536
+ * If application is profiled in \brief CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS, with disabled
537
+ * \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL, and there is no module unload,
538
+ * user can collect data in two ways:
539
+ * Use \brief cuptiPCSamplingGetData() API periodically
540
+ * Use \brief cuptiPCSamplingDisable() on application exit and read GPU PC sampling data from sampling
541
+ * data buffer passed during configuration.
542
+ * Note: In case, \brief cuptiPCSamplingGetData() API is not called periodically, then sampling data buffer
543
+ * passed during configuration should be large enough to hold all PCs data.
544
+ * \brief cuptiPCSamplingGetData() API never does device synchronization.
545
+ * It is possible that when the API is called there is some unconsumed data from the HW buffer. In this case
546
+ * CUPTI provides only the data available with it at that moment.
547
+ *
548
+ * \param pParams A pointer to \ref CUpti_PCSamplingGetDataParams
549
+ *
550
+ * \retval CUPTI_SUCCESS
551
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called without
552
+ * enabling PC sampling.
553
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
554
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
555
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY indicates that the HW buffer is full
556
+ * does not support the API
557
+ */
558
+ CUptiResult CUPTIAPI cuptiPCSamplingGetData(CUpti_PCSamplingGetDataParams *pParams);
559
+
560
+ /**
561
+ * \brief Params for cuptiPCSamplingEnable
562
+ */
563
+ typedef struct
564
+ {
565
+ /**
566
+ * [w] Size of the data structure i.e. CUpti_PCSamplingEnableParamsSize
567
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
568
+ * available in the structure. Used to preserve backward compatibility.
569
+ */
570
+ size_t size;
571
+ /**
572
+ * [w] Assign to NULL
573
+ */
574
+ void* pPriv;
575
+ /**
576
+ * [w] CUcontext
577
+ */
578
+ CUcontext ctx;
579
+ } CUpti_PCSamplingEnableParams;
580
+ #define CUpti_PCSamplingEnableParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingEnableParams, ctx)
581
+
582
+ /**
583
+ * \brief Enable PC sampling.
584
+ *
585
+ * \param pParams A pointer to \ref CUpti_PCSamplingEnableParams
586
+ *
587
+ * \retval CUPTI_SUCCESS
588
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
589
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
590
+ * does not support the API
591
+ */
592
+ CUptiResult CUPTIAPI cuptiPCSamplingEnable(CUpti_PCSamplingEnableParams *pParams);
593
+
594
+ /**
595
+ * \brief Params for cuptiPCSamplingDisable
596
+ */
597
+ typedef struct
598
+ {
599
+ /**
600
+ * [w] Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
601
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
602
+ * available in the structure. Used to preserve backward compatibility.
603
+ */
604
+ size_t size;
605
+ /**
606
+ * [w] Assign to NULL
607
+ */
608
+ void* pPriv;
609
+ /**
610
+ * [w] CUcontext
611
+ */
612
+ CUcontext ctx;
613
+ } CUpti_PCSamplingDisableParams;
614
+ #define CUpti_PCSamplingDisableParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingDisableParams, ctx)
615
+
616
+ /**
617
+ * \brief Disable PC sampling.
618
+ *
619
+ * For application which doesn't destroy the CUDA context explicitly,
620
+ * this API does the PC Sampling tear-down, joins threads and copies PC records in the buffer provided
621
+ * during the PC sampling configuration. PC records which can't be accommodated in the buffer are discarded.
622
+ *
623
+ * \param pParams A pointer to \ref CUpti_PCSamplingDisableParams
624
+ *
625
+ * \retval CUPTI_SUCCESS
626
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
627
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
628
+ * does not support the API
629
+ */
630
+ CUptiResult CUPTIAPI cuptiPCSamplingDisable(CUpti_PCSamplingDisableParams *pParams);
631
+
632
+ /**
633
+ * \brief Params for cuptiPCSamplingStart
634
+ */
635
+ typedef struct
636
+ {
637
+ /**
638
+ * [w] Size of the data structure i.e. CUpti_PCSamplingStartParamsSize
639
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
640
+ * available in the structure. Used to preserve backward compatibility.
641
+ */
642
+ size_t size;
643
+ /**
644
+ * [w] Assign to NULL
645
+ */
646
+ void* pPriv;
647
+ /**
648
+ * [w] CUcontext
649
+ */
650
+ CUcontext ctx;
651
+ } CUpti_PCSamplingStartParams;
652
+ #define CUpti_PCSamplingStartParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingStartParams, ctx)
653
+
654
+ /**
655
+ * \brief Start PC sampling.
656
+ *
657
+ * User can collect PC Sampling data for user-defined range specified by Start/Stop APIs.
658
+ * This API can be used to mark starting of range. Set configuration option
659
+ * \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL to use this API.
660
+ *
661
+ * \param pParams A pointer to \ref CUpti_PCSamplingStartParams
662
+ *
663
+ * \retval CUPTI_SUCCESS
664
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
665
+ * incorrect PC Sampling configuration.
666
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
667
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
668
+ * does not support the API
669
+ */
670
+ CUptiResult CUPTIAPI cuptiPCSamplingStart(CUpti_PCSamplingStartParams *pParams);
671
+
672
+ /**
673
+ * \brief Params for cuptiPCSamplingStop
674
+ */
675
+ typedef struct
676
+ {
677
+ /**
678
+ * [w] Size of the data structure i.e. CUpti_PCSamplingStopParamsSize
679
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
680
+ * available in the structure. Used to preserve backward compatibility.
681
+ */
682
+ size_t size;
683
+ /**
684
+ * [w] Assign to NULL
685
+ */
686
+ void* pPriv;
687
+ /**
688
+ * [w] CUcontext
689
+ */
690
+ CUcontext ctx;
691
+ } CUpti_PCSamplingStopParams;
692
+ #define CUpti_PCSamplingStopParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingStopParams, ctx)
693
+
694
+ /**
695
+ * \brief Stop PC sampling.
696
+ *
697
+ * User can collect PC Sampling data for user-defined range specified by Start/Stop APIs.
698
+ * This API can be used to mark end of range. Set configuration option
699
+ * \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL to use this API.
700
+ *
701
+ * \param pParams A pointer to \ref CUpti_PCSamplingStopParams
702
+ *
703
+ * \retval CUPTI_SUCCESS
704
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
705
+ * incorrect PC Sampling configuration.
706
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
707
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
708
+ * does not support the API
709
+ */
710
+ CUptiResult CUPTIAPI cuptiPCSamplingStop(CUpti_PCSamplingStopParams *pParams);
711
+
712
+ /**
713
+ * \brief Params for cuptiPCSamplingGetNumStallReasons
714
+ */
715
+ typedef struct
716
+ {
717
+ /**
718
+ * [w] Size of the data structure i.e. CUpti_PCSamplingGetNumStallReasonsParamsSize
719
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
720
+ * available in the structure. Used to preserve backward compatibility.
721
+ */
722
+ size_t size;
723
+ /**
724
+ * [w] Assign to NULL
725
+ */
726
+ void* pPriv;
727
+ /**
728
+ * [w] CUcontext
729
+ */
730
+ CUcontext ctx;
731
+ /**
732
+ * [r] Number of stall reasons
733
+ */
734
+ size_t *numStallReasons;
735
+ } CUpti_PCSamplingGetNumStallReasonsParams;
736
+ #define CUpti_PCSamplingGetNumStallReasonsParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingGetNumStallReasonsParams, numStallReasons)
737
+
738
+ /**
739
+ * \brief Get PC sampling stall reason count.
740
+ *
741
+ * \param pParams A pointer to \ref CUpti_PCSamplingGetNumStallReasonsParams
742
+ *
743
+ * \retval CUPTI_SUCCESS
744
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
745
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
746
+ * does not support the API
747
+ */
748
+ CUptiResult CUPTIAPI cuptiPCSamplingGetNumStallReasons(CUpti_PCSamplingGetNumStallReasonsParams *pParams);
749
+
750
+ /**
751
+ * \brief Params for cuptiPCSamplingGetStallReasons
752
+ */
753
+ typedef struct
754
+ {
755
+ /**
756
+ * [w] Size of the data structure i.e. CUpti_PCSamplingGetStallReasonsParamsSize
757
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
758
+ * available in the structure. Used to preserve backward compatibility.
759
+ */
760
+ size_t size;
761
+ /**
762
+ * [w] Assign to NULL
763
+ */
764
+ void* pPriv;
765
+ /**
766
+ * [w] CUcontext
767
+ */
768
+ CUcontext ctx;
769
+ /**
770
+ * [w] Number of stall reasons
771
+ */
772
+ size_t numStallReasons;
773
+ /**
774
+ * [r] Stall reason index
775
+ */
776
+ uint32_t *stallReasonIndex;
777
+ /**
778
+ * [r] Stall reasons name
779
+ */
780
+ char **stallReasons;
781
+ } CUpti_PCSamplingGetStallReasonsParams;
782
+ #define CUpti_PCSamplingGetStallReasonsParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingGetStallReasonsParams, stallReasons)
783
+
784
+ /**
785
+ * \brief Get PC sampling stall reasons.
786
+ *
787
+ * \param pParams A pointer to \ref CUpti_PCSamplingGetStallReasonsParams
788
+ *
789
+ * \retval CUPTI_SUCCESS
790
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
791
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
792
+ * does not support the API
793
+ */
794
+ CUptiResult CUPTIAPI cuptiPCSamplingGetStallReasons(CUpti_PCSamplingGetStallReasonsParams *pParams);
795
+
796
+
797
+ /**
798
+ * \brief Params for cuptiGetSassToSourceCorrelation
799
+ */
800
+ typedef struct CUpti_GetSassToSourceCorrelationParams {
801
+ /**
802
+ * [w] Size of the data structure i.e. CUpti_GetSassToSourceCorrelationParamsSize
803
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
804
+ * available in the structure. Used to preserve backward compatibility.
805
+ */
806
+ size_t size;
807
+ /**
808
+ * [w] Pointer to cubin binary where function belongs.
809
+ */
810
+ const void* cubin;
811
+ /**
812
+ * [w] Function name to which PC belongs.
813
+ */
814
+ const char *functionName;
815
+ /**
816
+ * [w] Size of cubin binary.
817
+ */
818
+ size_t cubinSize;
819
+ /**
820
+ * [r] Line number in the source code.
821
+ */
822
+ uint32_t lineNumber;
823
+ /**
824
+ * [w] PC offset
825
+ */
826
+ uint64_t pcOffset;
827
+ /**
828
+ * [r] Path for the source file.
829
+ */
830
+ char *fileName;
831
+ /**
832
+ * [r] Path for the directory of source file.
833
+ */
834
+ char *dirName;
835
+ } CUpti_GetSassToSourceCorrelationParams;
836
+
837
+ #define CUpti_GetSassToSourceCorrelationParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_GetSassToSourceCorrelationParams, dirName)
838
+
839
+ /**
840
+ * \brief SASS to Source correlation.
841
+ *
842
+ * \param pParams A pointer to \ref CUpti_GetSassToSourceCorrelationParams
843
+ *
844
+ * It is expected from user to free allocated memory for fileName and dirName after use.
845
+ *
846
+ * \retval CUPTI_SUCCESS
847
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if either of the parameters cubin or functionName
848
+ * is NULL or cubinSize is zero or size field is not set correctly.
849
+ * \retval CUPTI_ERROR_INVALID_MODULE provided cubin is invalid.
850
+ * \retval CUPTI_ERROR_UNKNOWN an internal error occurred.
851
+ * This error code is also used for cases when the function is not present in the module.
852
+ * A better error code will be returned in the future release.
853
+ */
854
+ CUptiResult CUPTIAPI cuptiGetSassToSourceCorrelation(CUpti_GetSassToSourceCorrelationParams *pParams);
855
+
856
+ /**
857
+ * \brief Params for cuptiGetCubinCrc
858
+ */
859
+ typedef struct {
860
+ /**
861
+ * [w] Size of configuration structure.
862
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
863
+ * available in the structure. Used to preserve backward compatibility.
864
+ */
865
+ size_t size;
866
+ /**
867
+ * [w] Size of cubin binary.
868
+ */
869
+ size_t cubinSize;
870
+ /**
871
+ * [w] Pointer to cubin binary
872
+ */
873
+ const void* cubin;
874
+ /**
875
+ * [r] Computed CRC will be stored in it.
876
+ */
877
+ uint64_t cubinCrc;
878
+ } CUpti_GetCubinCrcParams;
879
+ #define CUpti_GetCubinCrcParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_GetCubinCrcParams, cubinCrc)
880
+
881
+ /**
882
+ * \brief Get the CRC of cubin.
883
+ *
884
+ * This function returns the CRC of provided cubin binary.
885
+ *
886
+ * \param pParams A pointer to \ref CUpti_GetCubinCrcParams
887
+ *
888
+ * \retval CUPTI_SUCCESS
889
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if parameter cubin is NULL or
890
+ * provided cubinSize is zero or size field is not set.
891
+ */
892
+ CUptiResult CUPTIAPI cuptiGetCubinCrc(CUpti_GetCubinCrcParams *pParams);
893
+
894
+ /**
895
+ * \brief Function type for callback used by CUPTI to request crc of
896
+ * loaded module.
897
+ *
898
+ * This callback function ask for crc of provided module in function.
899
+ * The provided crc will be stored in PC sampling records i.e. in the field 'cubinCrc' of the PC sampling
900
+ * struct CUpti_PCSamplingPCData. The CRC is uses during the offline source correlation to uniquely identify the module.
901
+ *
902
+ * \param cubin The pointer to cubin binary
903
+ * \param cubinSize The size of cubin binary.
904
+ * \param cubinCrc Returns the computed crc of cubin.
905
+ */
906
+ typedef void (CUPTIAPI *CUpti_ComputeCrcCallbackFunc)(
907
+ const void* cubin,
908
+ size_t cubinSize,
909
+ uint64_t *cubinCrc);
910
+
911
+ /**
912
+ * \brief Register callback function with CUPTI to use
913
+ * your own algorithm to compute cubin crc.
914
+ *
915
+ * This function registers a callback function and it gets called
916
+ * from CUPTI when a CUDA module is loaded.
917
+ *
918
+ * \param funcComputeCubinCrc callback is invoked when a CUDA module
919
+ * is loaded.
920
+ *
921
+ * \retval CUPTI_SUCCESS
922
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p funcComputeCubinCrc is NULL.
923
+ */
924
+ CUptiResult CUPTIAPI cuptiRegisterComputeCrcCallback(CUpti_ComputeCrcCallbackFunc funcComputeCubinCrc);
925
+
926
+ /** @} */ /* END CUPTI_PCSAMPLING_API */
927
+
928
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
929
+ #pragma GCC visibility pop
930
+ #endif
931
+
932
+ #if defined(__cplusplus)
933
+ }
934
+ #endif
935
+
936
+ #endif /*_CUPTI_PCSAMPLING_H_*/
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling_util.h ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if !defined(_CUPTI_PCSAMPLING_UTIL_H_)
2
+ #define _CUPTI_PCSAMPLING_UTIL_H_
3
+
4
+ #include <cupti_pcsampling.h>
5
+ #include <fstream>
6
+
7
+ #include <cupti_common.h>
8
+
9
+ #ifndef CUPTI_UTIL_STRUCT_SIZE
10
+ #define CUPTI_UTIL_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
11
+ #endif
12
+
13
+ #ifndef CHECK_PC_SAMPLING_STRUCT_FIELD_EXISTS
14
+ #define CHECK_PC_SAMPLING_STRUCT_FIELD_EXISTS(type, member, structSize) \
15
+ (offsetof(type, member) < structSize)
16
+ #endif
17
+
18
+ #if defined(__cplusplus)
19
+ extern "C" {
20
+ #endif
21
+
22
+ #if defined(__GNUC__)
23
+ #pragma GCC visibility push(default)
24
+ #endif
25
+
26
+ namespace CUPTI { namespace PcSamplingUtil {
27
+
28
+ /**
29
+ * \defgroup CUPTI_PCSAMPLING_UTILITY CUPTI PC Sampling Utility API
30
+ * Functions, types, and enums that implement the CUPTI PC Sampling Utility API.
31
+ * @{
32
+ */
33
+
34
+ /**
35
+ * \brief Header info will be stored in file.
36
+ */
37
+ typedef struct PACKED_ALIGNMENT {
38
+ /**
39
+ * Version of file format.
40
+ */
41
+ uint32_t version;
42
+ /**
43
+ * Total number of buffers present in the file.
44
+ */
45
+ uint32_t totalBuffers;
46
+ } Header;
47
+
48
+ /**
49
+ * \brief BufferInfo will be stored in the file for every buffer
50
+ * i.e for every call of UtilDumpPcSamplingBufferInFile() API.
51
+ */
52
+ typedef struct PACKED_ALIGNMENT {
53
+ /**
54
+ * Total number of PC records.
55
+ */
56
+ uint64_t recordCount;
57
+ /**
58
+ * Count of all stall reasons supported on the GPU
59
+ */
60
+ size_t numStallReasons;
61
+ /**
62
+ * Total number of stall reasons in single record.
63
+ */
64
+ uint64_t numSelectedStallReasons;
65
+ /**
66
+ * Buffer size in Bytes.
67
+ */
68
+ uint64_t bufferByteSize;
69
+ } BufferInfo;
70
+
71
+ /**
72
+ * \brief All available stall reasons name and respective indexes
73
+ * will be stored in it.
74
+ */
75
+ typedef struct PACKED_ALIGNMENT {
76
+ /**
77
+ * Number of all available stall reasons
78
+ */
79
+ size_t numStallReasons;
80
+ /**
81
+ * Stall reasons names of all available stall reasons
82
+ */
83
+ char **stallReasons;
84
+ /**
85
+ * Stall reason index of all available stall reasons
86
+ */
87
+ uint32_t *stallReasonIndex;
88
+ } PcSamplingStallReasons;
89
+
90
+ /**
91
+ * \brief CUPTI PC sampling buffer types.
92
+ *
93
+ */
94
+ typedef enum {
95
+ /**
96
+ * Invalid buffer type.
97
+ */
98
+ PC_SAMPLING_BUFFER_INVALID = 0,
99
+ /**
100
+ * Refers to CUpti_PCSamplingData buffer.
101
+ */
102
+ PC_SAMPLING_BUFFER_PC_TO_COUNTER_DATA = 1
103
+ } PcSamplingBufferType;
104
+
105
+ /**
106
+ * \brief CUPTI PC sampling utility API result codes.
107
+ *
108
+ * Error and result codes returned by CUPTI PC sampling utility API.
109
+ */
110
+ typedef enum {
111
+ /**
112
+ * No error
113
+ */
114
+ CUPTI_UTIL_SUCCESS = 0,
115
+ /**
116
+ * One or more of the parameters are invalid.
117
+ */
118
+ CUPTI_UTIL_ERROR_INVALID_PARAMETER = 1,
119
+ /**
120
+ * Unable to create a new file
121
+ */
122
+ CUPTI_UTIL_ERROR_UNABLE_TO_CREATE_FILE = 2,
123
+ /**
124
+ * Unable to open a file
125
+ */
126
+ CUPTI_UTIL_ERROR_UNABLE_TO_OPEN_FILE = 3,
127
+ /**
128
+ * Read or write operation failed
129
+ */
130
+ CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED = 4,
131
+ /**
132
+ * Provided file handle is corrupted.
133
+ */
134
+ CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED = 5,
135
+ /**
136
+ * seek operation failed.
137
+ */
138
+ CUPTI_UTIL_ERROR_SEEK_OPERATION_FAILED = 6,
139
+ /**
140
+ * Unable to allocate enough memory to perform the requested
141
+ * operation.
142
+ */
143
+ CUPTI_UTIL_ERROR_OUT_OF_MEMORY = 7,
144
+ /**
145
+ * An unknown internal error has occurred.
146
+ */
147
+ CUPTI_UTIL_ERROR_UNKNOWN = 999,
148
+ CUPTI_UTIL_ERROR_FORCE_INT = 0x7fffffff
149
+ } CUptiUtilResult;
150
+
151
+ /**
152
+ * \brief Params for \ref CuptiUtilPutPcSampData
153
+ */
154
+ typedef struct {
155
+ /**
156
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
157
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
158
+ * available in the structure. Used to preserve backward compatibility.
159
+ */
160
+ size_t size;
161
+ /**
162
+ * Type of buffer to store in file
163
+ */
164
+ PcSamplingBufferType bufferType;
165
+ /**
166
+ * PC sampling buffer.
167
+ */
168
+ void *pSamplingData;
169
+ /**
170
+ * Number of configured attributes
171
+ */
172
+ size_t numAttributes;
173
+ /**
174
+ * Refer \ref CUpti_PCSamplingConfigurationInfo
175
+ * It is expected to provide configuration details of at least
176
+ * CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_STALL_REASON attribute.
177
+ */
178
+ CUpti_PCSamplingConfigurationInfo *pPCSamplingConfigurationInfo;
179
+ /**
180
+ * Refer \ref PcSamplingStallReasons.
181
+ */
182
+ PcSamplingStallReasons *pPcSamplingStallReasons;
183
+ /**
184
+ * File name to store buffer into it.
185
+ */
186
+ const char* fileName;
187
+ } CUptiUtil_PutPcSampDataParams;
188
+ #define CUptiUtil_PutPcSampDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_PutPcSampDataParams, fileName)
189
+
190
+ /**
191
+ * \brief Dump PC sampling data into the file.
192
+ *
193
+ * This API can be called multiple times.
194
+ * It will append buffer in the file.
195
+ * For every buffer it will store BufferInfo
196
+ * so that before retrieving data it will help to allocate buffer
197
+ * to store retrieved data.
198
+ * This API creates file if file does not present.
199
+ * If stallReasonIndex or stallReasons pointer of \ref CUptiUtil_PutPcSampDataParams is NULL
200
+ * then stall reasons data will not be stored in file.
201
+ * It is expected to store all available stall reason data at least once to refer it during
202
+ * offline correlation.
203
+ *
204
+ * \retval CUPTI_UTIL_SUCCESS
205
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if buffer type is invalid
206
+ * or if either of pSamplingData, pParams pointer is NULL or stall reason configuration details not provided
207
+ * or filename is empty.
208
+ * \retval CUPTI_UTIL_ERROR_UNABLE_TO_CREATE_FILE
209
+ * \retval CUPTI_UTIL_ERROR_UNABLE_TO_OPEN_FILE
210
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED
211
+ */
212
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilPutPcSampData(CUptiUtil_PutPcSampDataParams *pParams);
213
+
214
+ /**
215
+ * \brief Params for \ref CuptiUtilGetHeaderData
216
+ */
217
+ typedef struct {
218
+ /**
219
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
220
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
221
+ * available in the structure. Used to preserve backward compatibility.
222
+ */
223
+ size_t size;
224
+ /**
225
+ * File handle.
226
+ */
227
+ std::ifstream *fileHandler;
228
+ /**
229
+ * Header Info.
230
+ */
231
+ Header headerInfo;
232
+
233
+ } CUptiUtil_GetHeaderDataParams;
234
+ #define CUptiUtil_GetHeaderDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_GetHeaderDataParams, headerInfo)
235
+
236
+ /**
237
+ * \brief Get header data of file.
238
+ *
239
+ * This API must be called once initially while retrieving data from file.
240
+ * \ref Header structure, it gives info about total number
241
+ * of buffers present in the file.
242
+ *
243
+ * \retval CUPTI_UTIL_SUCCESS
244
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if either of pParam or fileHandle is NULL or param struct size is incorrect.
245
+ * \retval CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED file handle is not in good state to read data from file
246
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED failed to read data from file.
247
+ */
248
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilGetHeaderData(CUptiUtil_GetHeaderDataParams *pParams);
249
+
250
+ /**
251
+ * \brief Params for \ref CuptiUtilGetBufferInfo
252
+ */
253
+ typedef struct {
254
+ /**
255
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
256
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
257
+ * available in the structure. Used to preserve backward compatibility.
258
+ */
259
+ size_t size;
260
+ /**
261
+ * File handle.
262
+ */
263
+ std::ifstream *fileHandler;
264
+ /**
265
+ * Buffer Info.
266
+ */
267
+ BufferInfo bufferInfoData;
268
+ } CUptiUtil_GetBufferInfoParams;
269
+ #define CUptiUtil_GetBufferInfoParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_GetBufferInfoParams, bufferInfoData)
270
+
271
+ /**
272
+ * \brief Get buffer info data of file.
273
+ *
274
+ * This API must be called every time before calling CuptiUtilGetPcSampData API.
275
+ * \ref BufferInfo structure, it gives info about recordCount and stallReasonCount
276
+ * of every record in the buffer. This will help to allocate exact buffer to retrieve data into it.
277
+ *
278
+ * \retval CUPTI_UTIL_SUCCESS
279
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if either of pParam or fileHandle is NULL or param struct size is incorrect.
280
+ * \retval CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED file handle is not in good state to read data from file.
281
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED failed to read data from file.
282
+ */
283
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilGetBufferInfo(CUptiUtil_GetBufferInfoParams *pParams);
284
+
285
+ /**
286
+ * \brief Params for \ref CuptiUtilGetPcSampData
287
+ */
288
+ typedef struct {
289
+ /**
290
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
291
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
292
+ * available in the structure. Used to preserve backward compatibility.
293
+ */
294
+ size_t size;
295
+ /**
296
+ * File handle.
297
+ */
298
+ std::ifstream *fileHandler;
299
+ /**
300
+ * Type of buffer to store in file
301
+ */
302
+ PcSamplingBufferType bufferType;
303
+ /**
304
+ * Pointer to collected buffer info using \ref CuptiUtilGetBufferInfo
305
+ */
306
+ BufferInfo *pBufferInfoData;
307
+ /**
308
+ * Pointer to allocated memory to store retrieved data from file.
309
+ */
310
+ void *pSamplingData;
311
+ /**
312
+ * Number of configuration attributes
313
+ */
314
+ size_t numAttributes;
315
+ /**
316
+ * Refer \ref CUpti_PCSamplingConfigurationInfo
317
+ */
318
+ CUpti_PCSamplingConfigurationInfo *pPCSamplingConfigurationInfo;
319
+ /**
320
+ * Refer \ref PcSamplingStallReasons.
321
+ * For stallReasons field of \ref PcSamplingStallReasons it is expected to
322
+ * allocate memory for each string element of array.
323
+ */
324
+ PcSamplingStallReasons *pPcSamplingStallReasons;
325
+ } CUptiUtil_GetPcSampDataParams;
326
+ #define CUptiUtil_GetPcSampDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_GetPcSampDataParams, pPcSamplingStallReasons)
327
+
328
+ /**
329
+ * \brief Retrieve PC sampling data from file into allocated buffer.
330
+ *
331
+ * This API must be called after CuptiUtilGetBufferInfo API.
332
+ * It will retrieve data from file into allocated buffer.
333
+ *
334
+ * \retval CUPTI_UTIL_SUCCESS
335
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if buffer type is invalid
336
+ * or if either of pSampData, pParams is NULL. If pPcSamplingStallReasons is not NULL then
337
+ * error out if either of stallReasonIndex, stallReasons or stallReasons array element pointer is NULL.
338
+ * or filename is empty.
339
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED
340
+ * \retval CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED file handle is not in good state to read data from file.
341
+ */
342
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilGetPcSampData(CUptiUtil_GetPcSampDataParams *pParams);
343
+
344
+ /**
345
+ * \brief Params for \ref CuptiUtilMergePcSampData
346
+ */
347
+ typedef struct
348
+ {
349
+ /**
350
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
351
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
352
+ * available in the structure. Used to preserve backward compatibility.
353
+ */
354
+ size_t size;
355
+ /**
356
+ * Number of buffers to merge.
357
+ */
358
+ size_t numberOfBuffers;
359
+ /**
360
+ * Pointer to array of buffers to merge
361
+ */
362
+ CUpti_PCSamplingData *PcSampDataBuffer;
363
+ /**
364
+ * Pointer to array of merged buffers as per the range id.
365
+ */
366
+ CUpti_PCSamplingData **MergedPcSampDataBuffers;
367
+ /**
368
+ * Number of merged buffers.
369
+ */
370
+ size_t *numMergedBuffer;
371
+ } CUptiUtil_MergePcSampDataParams;
372
+ #define CUptiUtil_MergePcSampDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_MergePcSampDataParams, numMergedBuffer)
373
+
374
+ /**
375
+ * \brief Merge PC sampling data range id wise.
376
+ *
377
+ * This API merge PC sampling data range id wise.
378
+ * It allocates memory for merged data and fill data in it
379
+ * and provide buffer pointer in MergedPcSampDataBuffers field.
380
+ * It is expected from user to free merge data buffers after use.
381
+ *
382
+ * \retval CUPTI_UTIL_SUCCESS
383
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if param struct size is invalid
384
+ * or count of buffers to merge is invalid i.e less than 1
385
+ * or either of PcSampDataBuffer, MergedPcSampDataBuffers, numMergedBuffer is NULL
386
+ * \retval CUPTI_UTIL_ERROR_OUT_OF_MEMORY Unable to allocate memory for merged buffer.
387
+ */
388
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilMergePcSampData(CUptiUtil_MergePcSampDataParams *pParams);
389
+
390
+ /** @} */ /* END CUPTI_PCSAMPLING_UTILITY */
391
+
392
+ } }
393
+
394
+ #if defined(__GNUC__)
395
+ #pragma GCC visibility pop
396
+ #endif
397
+
398
+ #if defined(__cplusplus)
399
+ }
400
+ #endif
401
+
402
+ #endif
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_runtime_cbid.h ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ // *************************************************************************
3
+ // Definitions of indices for API functions, unique across entire API
4
+ // *************************************************************************
5
+
6
+ // This file is generated. Any changes you make will be lost during the next clean build.
7
+ // CUDA public interface, for type definitions and cu* function prototypes
8
+
9
+ typedef enum CUpti_runtime_api_trace_cbid_enum {
10
+ CUPTI_RUNTIME_TRACE_CBID_INVALID = 0,
11
+ CUPTI_RUNTIME_TRACE_CBID_cudaDriverGetVersion_v3020 = 1,
12
+ CUPTI_RUNTIME_TRACE_CBID_cudaRuntimeGetVersion_v3020 = 2,
13
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceCount_v3020 = 3,
14
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceProperties_v3020 = 4,
15
+ CUPTI_RUNTIME_TRACE_CBID_cudaChooseDevice_v3020 = 5,
16
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetChannelDesc_v3020 = 6,
17
+ CUPTI_RUNTIME_TRACE_CBID_cudaCreateChannelDesc_v3020 = 7,
18
+ CUPTI_RUNTIME_TRACE_CBID_cudaConfigureCall_v3020 = 8,
19
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetupArgument_v3020 = 9,
20
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetLastError_v3020 = 10,
21
+ CUPTI_RUNTIME_TRACE_CBID_cudaPeekAtLastError_v3020 = 11,
22
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetErrorString_v3020 = 12,
23
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020 = 13,
24
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetCacheConfig_v3020 = 14,
25
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncGetAttributes_v3020 = 15,
26
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetDevice_v3020 = 16,
27
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDevice_v3020 = 17,
28
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetValidDevices_v3020 = 18,
29
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetDeviceFlags_v3020 = 19,
30
+ CUPTI_RUNTIME_TRACE_CBID_cudaMalloc_v3020 = 20,
31
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocPitch_v3020 = 21,
32
+ CUPTI_RUNTIME_TRACE_CBID_cudaFree_v3020 = 22,
33
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocArray_v3020 = 23,
34
+ CUPTI_RUNTIME_TRACE_CBID_cudaFreeArray_v3020 = 24,
35
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocHost_v3020 = 25,
36
+ CUPTI_RUNTIME_TRACE_CBID_cudaFreeHost_v3020 = 26,
37
+ CUPTI_RUNTIME_TRACE_CBID_cudaHostAlloc_v3020 = 27,
38
+ CUPTI_RUNTIME_TRACE_CBID_cudaHostGetDevicePointer_v3020 = 28,
39
+ CUPTI_RUNTIME_TRACE_CBID_cudaHostGetFlags_v3020 = 29,
40
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemGetInfo_v3020 = 30,
41
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020 = 31,
42
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2D_v3020 = 32,
43
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArray_v3020 = 33,
44
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArray_v3020 = 34,
45
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArray_v3020 = 35,
46
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArray_v3020 = 36,
47
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyArrayToArray_v3020 = 37,
48
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DArrayToArray_v3020 = 38,
49
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbol_v3020 = 39,
50
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbol_v3020 = 40,
51
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyAsync_v3020 = 41,
52
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArrayAsync_v3020 = 42,
53
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArrayAsync_v3020 = 43,
54
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DAsync_v3020 = 44,
55
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArrayAsync_v3020 = 45,
56
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArrayAsync_v3020 = 46,
57
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbolAsync_v3020 = 47,
58
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbolAsync_v3020 = 48,
59
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset_v3020 = 49,
60
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset2D_v3020 = 50,
61
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemsetAsync_v3020 = 51,
62
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset2DAsync_v3020 = 52,
63
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetSymbolAddress_v3020 = 53,
64
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetSymbolSize_v3020 = 54,
65
+ CUPTI_RUNTIME_TRACE_CBID_cudaBindTexture_v3020 = 55,
66
+ CUPTI_RUNTIME_TRACE_CBID_cudaBindTexture2D_v3020 = 56,
67
+ CUPTI_RUNTIME_TRACE_CBID_cudaBindTextureToArray_v3020 = 57,
68
+ CUPTI_RUNTIME_TRACE_CBID_cudaUnbindTexture_v3020 = 58,
69
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureAlignmentOffset_v3020 = 59,
70
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureReference_v3020 = 60,
71
+ CUPTI_RUNTIME_TRACE_CBID_cudaBindSurfaceToArray_v3020 = 61,
72
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetSurfaceReference_v3020 = 62,
73
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLSetGLDevice_v3020 = 63,
74
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLRegisterBufferObject_v3020 = 64,
75
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLMapBufferObject_v3020 = 65,
76
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLUnmapBufferObject_v3020 = 66,
77
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLUnregisterBufferObject_v3020 = 67,
78
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLSetBufferObjectMapFlags_v3020 = 68,
79
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLMapBufferObjectAsync_v3020 = 69,
80
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLUnmapBufferObjectAsync_v3020 = 70,
81
+ CUPTI_RUNTIME_TRACE_CBID_cudaWGLGetDevice_v3020 = 71,
82
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsGLRegisterImage_v3020 = 72,
83
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsGLRegisterBuffer_v3020 = 73,
84
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsUnregisterResource_v3020 = 74,
85
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceSetMapFlags_v3020 = 75,
86
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsMapResources_v3020 = 76,
87
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsUnmapResources_v3020 = 77,
88
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedPointer_v3020 = 78,
89
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsSubResourceGetMappedArray_v3020 = 79,
90
+ CUPTI_RUNTIME_TRACE_CBID_cudaVDPAUGetDevice_v3020 = 80,
91
+ CUPTI_RUNTIME_TRACE_CBID_cudaVDPAUSetVDPAUDevice_v3020 = 81,
92
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsVDPAURegisterVideoSurface_v3020 = 82,
93
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsVDPAURegisterOutputSurface_v3020 = 83,
94
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDevice_v3020 = 84,
95
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDevices_v3020 = 85,
96
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D11SetDirect3DDevice_v3020 = 86,
97
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D11RegisterResource_v3020 = 87,
98
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDevice_v3020 = 88,
99
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDevices_v3020 = 89,
100
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10SetDirect3DDevice_v3020 = 90,
101
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D10RegisterResource_v3020 = 91,
102
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10RegisterResource_v3020 = 92,
103
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10UnregisterResource_v3020 = 93,
104
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10MapResources_v3020 = 94,
105
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10UnmapResources_v3020 = 95,
106
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceSetMapFlags_v3020 = 96,
107
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetSurfaceDimensions_v3020 = 97,
108
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedArray_v3020 = 98,
109
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedPointer_v3020 = 99,
110
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedSize_v3020 = 100,
111
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10ResourceGetMappedPitch_v3020 = 101,
112
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDevice_v3020 = 102,
113
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDevices_v3020 = 103,
114
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9SetDirect3DDevice_v3020 = 104,
115
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9GetDirect3DDevice_v3020 = 105,
116
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsD3D9RegisterResource_v3020 = 106,
117
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9RegisterResource_v3020 = 107,
118
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnregisterResource_v3020 = 108,
119
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9MapResources_v3020 = 109,
120
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnmapResources_v3020 = 110,
121
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceSetMapFlags_v3020 = 111,
122
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetSurfaceDimensions_v3020 = 112,
123
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedArray_v3020 = 113,
124
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedPointer_v3020 = 114,
125
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedSize_v3020 = 115,
126
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9ResourceGetMappedPitch_v3020 = 116,
127
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9Begin_v3020 = 117,
128
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9End_v3020 = 118,
129
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9RegisterVertexBuffer_v3020 = 119,
130
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnregisterVertexBuffer_v3020 = 120,
131
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9MapVertexBuffer_v3020 = 121,
132
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D9UnmapVertexBuffer_v3020 = 122,
133
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadExit_v3020 = 123,
134
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetDoubleForDevice_v3020 = 124,
135
+ CUPTI_RUNTIME_TRACE_CBID_cudaSetDoubleForHost_v3020 = 125,
136
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadSynchronize_v3020 = 126,
137
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadGetLimit_v3020 = 127,
138
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadSetLimit_v3020 = 128,
139
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreate_v3020 = 129,
140
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamDestroy_v3020 = 130,
141
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSynchronize_v3020 = 131,
142
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamQuery_v3020 = 132,
143
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventCreate_v3020 = 133,
144
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventCreateWithFlags_v3020 = 134,
145
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventRecord_v3020 = 135,
146
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventDestroy_v3020 = 136,
147
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventSynchronize_v3020 = 137,
148
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventQuery_v3020 = 138,
149
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventElapsedTime_v3020 = 139,
150
+ CUPTI_RUNTIME_TRACE_CBID_cudaMalloc3D_v3020 = 140,
151
+ CUPTI_RUNTIME_TRACE_CBID_cudaMalloc3DArray_v3020 = 141,
152
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset3D_v3020 = 142,
153
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset3DAsync_v3020 = 143,
154
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3D_v3020 = 144,
155
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DAsync_v3020 = 145,
156
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadSetCacheConfig_v3020 = 146,
157
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamWaitEvent_v3020 = 147,
158
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D11GetDirect3DDevice_v3020 = 148,
159
+ CUPTI_RUNTIME_TRACE_CBID_cudaD3D10GetDirect3DDevice_v3020 = 149,
160
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadGetCacheConfig_v3020 = 150,
161
+ CUPTI_RUNTIME_TRACE_CBID_cudaPointerGetAttributes_v4000 = 151,
162
+ CUPTI_RUNTIME_TRACE_CBID_cudaHostRegister_v4000 = 152,
163
+ CUPTI_RUNTIME_TRACE_CBID_cudaHostUnregister_v4000 = 153,
164
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceCanAccessPeer_v4000 = 154,
165
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceEnablePeerAccess_v4000 = 155,
166
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceDisablePeerAccess_v4000 = 156,
167
+ CUPTI_RUNTIME_TRACE_CBID_cudaPeerRegister_v4000 = 157,
168
+ CUPTI_RUNTIME_TRACE_CBID_cudaPeerUnregister_v4000 = 158,
169
+ CUPTI_RUNTIME_TRACE_CBID_cudaPeerGetDevicePointer_v4000 = 159,
170
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyPeer_v4000 = 160,
171
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyPeerAsync_v4000 = 161,
172
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeer_v4000 = 162,
173
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeerAsync_v4000 = 163,
174
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceReset_v3020 = 164,
175
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSynchronize_v3020 = 165,
176
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetLimit_v3020 = 166,
177
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetLimit_v3020 = 167,
178
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetCacheConfig_v3020 = 168,
179
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetCacheConfig_v3020 = 169,
180
+ CUPTI_RUNTIME_TRACE_CBID_cudaProfilerInitialize_v4000 = 170,
181
+ CUPTI_RUNTIME_TRACE_CBID_cudaProfilerStart_v4000 = 171,
182
+ CUPTI_RUNTIME_TRACE_CBID_cudaProfilerStop_v4000 = 172,
183
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetByPCIBusId_v4010 = 173,
184
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetPCIBusId_v4010 = 174,
185
+ CUPTI_RUNTIME_TRACE_CBID_cudaGLGetDevices_v4010 = 175,
186
+ CUPTI_RUNTIME_TRACE_CBID_cudaIpcGetEventHandle_v4010 = 176,
187
+ CUPTI_RUNTIME_TRACE_CBID_cudaIpcOpenEventHandle_v4010 = 177,
188
+ CUPTI_RUNTIME_TRACE_CBID_cudaIpcGetMemHandle_v4010 = 178,
189
+ CUPTI_RUNTIME_TRACE_CBID_cudaIpcOpenMemHandle_v4010 = 179,
190
+ CUPTI_RUNTIME_TRACE_CBID_cudaIpcCloseMemHandle_v4010 = 180,
191
+ CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetInfo_v4010 = 181,
192
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetSharedMemConfig_v4020 = 182,
193
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetSharedMemConfig_v4020 = 183,
194
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetSharedMemConfig_v4020 = 184,
195
+ CUPTI_RUNTIME_TRACE_CBID_cudaCreateTextureObject_v5000 = 185,
196
+ CUPTI_RUNTIME_TRACE_CBID_cudaDestroyTextureObject_v5000 = 186,
197
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectResourceDesc_v5000 = 187,
198
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectTextureDesc_v5000 = 188,
199
+ CUPTI_RUNTIME_TRACE_CBID_cudaCreateSurfaceObject_v5000 = 189,
200
+ CUPTI_RUNTIME_TRACE_CBID_cudaDestroySurfaceObject_v5000 = 190,
201
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetSurfaceObjectResourceDesc_v5000 = 191,
202
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocMipmappedArray_v5000 = 192,
203
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetMipmappedArrayLevel_v5000 = 193,
204
+ CUPTI_RUNTIME_TRACE_CBID_cudaFreeMipmappedArray_v5000 = 194,
205
+ CUPTI_RUNTIME_TRACE_CBID_cudaBindTextureToMipmappedArray_v5000 = 195,
206
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedMipmappedArray_v5000 = 196,
207
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamAddCallback_v5000 = 197,
208
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreateWithFlags_v5000 = 198,
209
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectResourceViewDesc_v5000 = 199,
210
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetAttribute_v5000 = 200,
211
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamDestroy_v5050 = 201,
212
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamCreateWithPriority_v5050 = 202,
213
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetPriority_v5050 = 203,
214
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetFlags_v5050 = 204,
215
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetStreamPriorityRange_v5050 = 205,
216
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocManaged_v6000 = 206,
217
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000 = 207,
218
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamAttachMemAsync_v6000 = 208,
219
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetErrorName_v6050 = 209,
220
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050 = 210,
221
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000 = 211,
222
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceFlags_v7000 = 212,
223
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_ptsz_v7000 = 213,
224
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_ptsz_v7000 = 214,
225
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_ptds_v7000 = 215,
226
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2D_ptds_v7000 = 216,
227
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArray_ptds_v7000 = 217,
228
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArray_ptds_v7000 = 218,
229
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArray_ptds_v7000 = 219,
230
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArray_ptds_v7000 = 220,
231
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyArrayToArray_ptds_v7000 = 221,
232
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DArrayToArray_ptds_v7000 = 222,
233
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbol_ptds_v7000 = 223,
234
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbol_ptds_v7000 = 224,
235
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyAsync_ptsz_v7000 = 225,
236
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToArrayAsync_ptsz_v7000 = 226,
237
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromArrayAsync_ptsz_v7000 = 227,
238
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DAsync_ptsz_v7000 = 228,
239
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DToArrayAsync_ptsz_v7000 = 229,
240
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy2DFromArrayAsync_ptsz_v7000 = 230,
241
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyToSymbolAsync_ptsz_v7000 = 231,
242
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyFromSymbolAsync_ptsz_v7000 = 232,
243
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset_ptds_v7000 = 233,
244
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset2D_ptds_v7000 = 234,
245
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemsetAsync_ptsz_v7000 = 235,
246
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset2DAsync_ptsz_v7000 = 236,
247
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetPriority_ptsz_v7000 = 237,
248
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetFlags_ptsz_v7000 = 238,
249
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSynchronize_ptsz_v7000 = 239,
250
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamQuery_ptsz_v7000 = 240,
251
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamAttachMemAsync_ptsz_v7000 = 241,
252
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventRecord_ptsz_v7000 = 242,
253
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset3D_ptds_v7000 = 243,
254
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemset3DAsync_ptsz_v7000 = 244,
255
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3D_ptds_v7000 = 245,
256
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DAsync_ptsz_v7000 = 246,
257
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamWaitEvent_ptsz_v7000 = 247,
258
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamAddCallback_ptsz_v7000 = 248,
259
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeer_ptds_v7000 = 249,
260
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy3DPeerAsync_ptsz_v7000 = 250,
261
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000 = 251,
262
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_v8000 = 252,
263
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_ptsz_v8000 = 253,
264
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemAdvise_v8000 = 254,
265
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetP2PAttribute_v8000 = 255,
266
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsEGLRegisterImage_v7000 = 256,
267
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerConnect_v7000 = 257,
268
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerDisconnect_v7000 = 258,
269
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerAcquireFrame_v7000 = 259,
270
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerReleaseFrame_v7000 = 260,
271
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerConnect_v7000 = 261,
272
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerDisconnect_v7000 = 262,
273
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerPresentFrame_v7000 = 263,
274
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamProducerReturnFrame_v7000 = 264,
275
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphicsResourceGetMappedEglFrame_v7000 = 265,
276
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemRangeGetAttribute_v8000 = 266,
277
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemRangeGetAttributes_v8000 = 267,
278
+ CUPTI_RUNTIME_TRACE_CBID_cudaEGLStreamConsumerConnectWithFlags_v7000 = 268,
279
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernel_v9000 = 269,
280
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernel_ptsz_v9000 = 270,
281
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventCreateFromEGLSync_v9000 = 271,
282
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernelMultiDevice_v9000 = 272,
283
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncSetAttribute_v9000 = 273,
284
+ CUPTI_RUNTIME_TRACE_CBID_cudaImportExternalMemory_v10000 = 274,
285
+ CUPTI_RUNTIME_TRACE_CBID_cudaExternalMemoryGetMappedBuffer_v10000 = 275,
286
+ CUPTI_RUNTIME_TRACE_CBID_cudaExternalMemoryGetMappedMipmappedArray_v10000 = 276,
287
+ CUPTI_RUNTIME_TRACE_CBID_cudaDestroyExternalMemory_v10000 = 277,
288
+ CUPTI_RUNTIME_TRACE_CBID_cudaImportExternalSemaphore_v10000 = 278,
289
+ CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v10000 = 279,
290
+ CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_ptsz_v10000 = 280,
291
+ CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v10000 = 281,
292
+ CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_ptsz_v10000 = 282,
293
+ CUPTI_RUNTIME_TRACE_CBID_cudaDestroyExternalSemaphore_v10000 = 283,
294
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchHostFunc_v10000 = 284,
295
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchHostFunc_ptsz_v10000 = 285,
296
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphCreate_v10000 = 286,
297
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeGetParams_v10000 = 287,
298
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeSetParams_v10000 = 288,
299
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddKernelNode_v10000 = 289,
300
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNode_v10000 = 290,
301
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeGetParams_v10000 = 291,
302
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParams_v10000 = 292,
303
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemsetNode_v10000 = 293,
304
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemsetNodeGetParams_v10000 = 294,
305
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemsetNodeSetParams_v10000 = 295,
306
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddHostNode_v10000 = 296,
307
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphHostNodeGetParams_v10000 = 297,
308
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddChildGraphNode_v10000 = 298,
309
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphChildGraphNodeGetGraph_v10000 = 299,
310
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEmptyNode_v10000 = 300,
311
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphClone_v10000 = 301,
312
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeFindInClone_v10000 = 302,
313
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetType_v10000 = 303,
314
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetRootNodes_v10000 = 304,
315
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependencies_v10000 = 305,
316
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependentNodes_v10000 = 306,
317
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddDependencies_v10000 = 307,
318
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphRemoveDependencies_v10000 = 308,
319
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphDestroyNode_v10000 = 309,
320
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiate_v10000 = 310,
321
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphLaunch_v10000 = 311,
322
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphLaunch_ptsz_v10000 = 312,
323
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecDestroy_v10000 = 313,
324
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphDestroy_v10000 = 314,
325
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCapture_v10000 = 315,
326
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCapture_ptsz_v10000 = 316,
327
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamIsCapturing_v10000 = 317,
328
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamIsCapturing_ptsz_v10000 = 318,
329
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamEndCapture_v10000 = 319,
330
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamEndCapture_ptsz_v10000 = 320,
331
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphHostNodeSetParams_v10000 = 321,
332
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetNodes_v10000 = 322,
333
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetEdges_v10000 = 323,
334
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v10010 = 324,
335
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_ptsz_v10010 = 325,
336
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecKernelNodeSetParams_v10010 = 326,
337
+ CUPTI_RUNTIME_TRACE_CBID_cudaThreadExchangeStreamCaptureMode_v10010 = 327,
338
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetNvSciSyncAttributes_v10020 = 328,
339
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyAvailableDynamicSMemPerBlock_v10200 = 329,
340
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetFlags_v10200 = 330,
341
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetFlags_ptsz_v10200 = 331,
342
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParams_v10020 = 332,
343
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemsetNodeSetParams_v10020 = 333,
344
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecHostNodeSetParams_v10020 = 334,
345
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecUpdate_v10020 = 335,
346
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetFuncBySymbol_v11000 = 336,
347
+ CUPTI_RUNTIME_TRACE_CBID_cudaCtxResetPersistingL2Cache_v11000 = 337,
348
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeCopyAttributes_v11000 = 338,
349
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeGetAttribute_v11000 = 339,
350
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphKernelNodeSetAttribute_v11000 = 340,
351
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamCopyAttributes_v11000 = 341,
352
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamCopyAttributes_ptsz_v11000 = 342,
353
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetAttribute_v11000 = 343,
354
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetAttribute_ptsz_v11000 = 344,
355
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetAttribute_v11000 = 345,
356
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamSetAttribute_ptsz_v11000 = 346,
357
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetTexture1DLinearMaxWidth_v11010 = 347,
358
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphUpload_v10000 = 348,
359
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphUpload_ptsz_v10000 = 349,
360
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNodeToSymbol_v11010 = 350,
361
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNodeFromSymbol_v11010 = 351,
362
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemcpyNode1D_v11010 = 352,
363
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParamsToSymbol_v11010 = 353,
364
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParamsFromSymbol_v11010 = 354,
365
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemcpyNodeSetParams1D_v11010 = 355,
366
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010 = 356,
367
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010 = 357,
368
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecMemcpyNodeSetParams1D_v11010 = 358,
369
+ CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetSparseProperties_v11010 = 359,
370
+ CUPTI_RUNTIME_TRACE_CBID_cudaMipmappedArrayGetSparseProperties_v11010 = 360,
371
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecChildGraphNodeSetParams_v11010 = 361,
372
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEventRecordNode_v11010 = 362,
373
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventRecordNodeGetEvent_v11010 = 363,
374
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventRecordNodeSetEvent_v11010 = 364,
375
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddEventWaitNode_v11010 = 365,
376
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventWaitNodeGetEvent_v11010 = 366,
377
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphEventWaitNodeSetEvent_v11010 = 367,
378
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecEventRecordNodeSetEvent_v11010 = 368,
379
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecEventWaitNodeSetEvent_v11010 = 369,
380
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventRecordWithFlags_v11010 = 370,
381
+ CUPTI_RUNTIME_TRACE_CBID_cudaEventRecordWithFlags_ptsz_v11010 = 371,
382
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetDefaultMemPool_v11020 = 372,
383
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocAsync_v11020 = 373,
384
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocAsync_ptsz_v11020 = 374,
385
+ CUPTI_RUNTIME_TRACE_CBID_cudaFreeAsync_v11020 = 375,
386
+ CUPTI_RUNTIME_TRACE_CBID_cudaFreeAsync_ptsz_v11020 = 376,
387
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolTrimTo_v11020 = 377,
388
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolSetAttribute_v11020 = 378,
389
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolGetAttribute_v11020 = 379,
390
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolSetAccess_v11020 = 380,
391
+ CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetPlane_v11020 = 381,
392
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolGetAccess_v11020 = 382,
393
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolCreate_v11020 = 383,
394
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolDestroy_v11020 = 384,
395
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetMemPool_v11020 = 385,
396
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetMemPool_v11020 = 386,
397
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolExportToShareableHandle_v11020 = 387,
398
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolImportFromShareableHandle_v11020 = 388,
399
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolExportPointer_v11020 = 389,
400
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPoolImportPointer_v11020 = 390,
401
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocFromPoolAsync_v11020 = 391,
402
+ CUPTI_RUNTIME_TRACE_CBID_cudaMallocFromPoolAsync_ptsz_v11020 = 392,
403
+ CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v2_v11020 = 393,
404
+ CUPTI_RUNTIME_TRACE_CBID_cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020 = 394,
405
+ CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v2_v11020 = 395,
406
+ CUPTI_RUNTIME_TRACE_CBID_cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020 = 396,
407
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddExternalSemaphoresSignalNode_v11020 = 397,
408
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresSignalNodeGetParams_v11020 = 398,
409
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresSignalNodeSetParams_v11020 = 399,
410
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddExternalSemaphoresWaitNode_v11020 = 400,
411
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresWaitNodeGetParams_v11020 = 401,
412
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExternalSemaphoresWaitNodeSetParams_v11020 = 402,
413
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020 = 403,
414
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020 = 404,
415
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceFlushGPUDirectRDMAWrites_v11030 = 405,
416
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPoint_v11030 = 406,
417
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDriverEntryPoint_ptsz_v11030 = 407,
418
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphDebugDotPrint_v11030 = 408,
419
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v2_v11030 = 409,
420
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v2_ptsz_v11030 = 410,
421
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_v11030 = 411,
422
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_ptsz_v11030 = 412,
423
+ CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectCreate_v11030 = 413,
424
+ CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectRetain_v11030 = 414,
425
+ CUPTI_RUNTIME_TRACE_CBID_cudaUserObjectRelease_v11030 = 415,
426
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphRetainUserObject_v11030 = 416,
427
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphReleaseUserObject_v11030 = 417,
428
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithFlags_v11040 = 418,
429
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemAllocNode_v11040 = 419,
430
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemAllocNodeGetParams_v11040 = 420,
431
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddMemFreeNode_v11040 = 421,
432
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphMemFreeNodeGetParams_v11040 = 422,
433
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGraphMemTrim_v11040 = 423,
434
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceGetGraphMemAttribute_v11040 = 424,
435
+ CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSetGraphMemAttribute_v11040 = 425,
436
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeSetEnabled_v11060 = 426,
437
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetEnabled_v11060 = 427,
438
+ CUPTI_RUNTIME_TRACE_CBID_cudaArrayGetMemoryRequirements_v11060 = 428,
439
+ CUPTI_RUNTIME_TRACE_CBID_cudaMipmappedArrayGetMemoryRequirements_v11060 = 429,
440
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernelExC_v11060 = 430,
441
+ CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernelExC_ptsz_v11060 = 431,
442
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxPotentialClusterSize_v11070 = 432,
443
+ CUPTI_RUNTIME_TRACE_CBID_cudaOccupancyMaxActiveClusters_v11070 = 433,
444
+ CUPTI_RUNTIME_TRACE_CBID_cudaCreateTextureObject_v2_v11080 = 434,
445
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetTextureObjectTextureDesc_v2_v11080 = 435,
446
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithParams_v12000 = 436,
447
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiateWithParams_ptsz_v12000 = 437,
448
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecGetFlags_v12000 = 438,
449
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetKernel_v12000 = 439,
450
+ CUPTI_RUNTIME_TRACE_CBID_cudaGetDeviceProperties_v2_v12000 = 440,
451
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetId_v12000 = 441,
452
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetId_ptsz_v12000 = 442,
453
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphInstantiate_v12000 = 443,
454
+ CUPTI_RUNTIME_TRACE_CBID_cudaInitDevice_v12000 = 444,
455
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddNode_v12020 = 445,
456
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeSetParams_v12020 = 446,
457
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphExecNodeSetParams_v12020 = 447,
458
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemAdvise_v2_v12020 = 448,
459
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_v2_v12020 = 449,
460
+ CUPTI_RUNTIME_TRACE_CBID_cudaMemPrefetchAsync_v2_ptsz_v12020 = 450,
461
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncGetName_v12030 = 451,
462
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCaptureToGraph_v12030 = 452,
463
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamBeginCaptureToGraph_ptsz_v12030 = 453,
464
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphConditionalHandleCreate_v12030 = 454,
465
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphGetEdges_v2_v12030 = 455,
466
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependencies_v2_v12030 = 456,
467
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphNodeGetDependentNodes_v2_v12030 = 457,
468
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddDependencies_v2_v12030 = 458,
469
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphRemoveDependencies_v2_v12030 = 459,
470
+ CUPTI_RUNTIME_TRACE_CBID_cudaGraphAddNode_v2_v12030 = 460,
471
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v3_v12030 = 461,
472
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamGetCaptureInfo_v3_ptsz_v12030 = 462,
473
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_v2_v12030 = 463,
474
+ CUPTI_RUNTIME_TRACE_CBID_cudaStreamUpdateCaptureDependencies_v2_ptsz_v12030 = 464,
475
+ CUPTI_RUNTIME_TRACE_CBID_cuda465_v12040 = 465,
476
+ CUPTI_RUNTIME_TRACE_CBID_cuda466_v12040 = 466,
477
+ CUPTI_RUNTIME_TRACE_CBID_cudaFuncGetParamInfo_v12040 = 467,
478
+ CUPTI_RUNTIME_TRACE_CBID_SIZE = 468,
479
+ CUPTI_RUNTIME_TRACE_CBID_FORCE_INT = 0x7fffffff
480
+ } CUpti_runtime_api_trace_cbid;
481
+
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_sass_metrics.h ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_SASS_METRICS_H_)
51
+ #define _CUPTI_SASS_METRICS_H_
52
+
53
+ #include <cuda.h>
54
+ #include <cupti_result.h>
55
+ #include <cupti_profiler_target.h>
56
+
57
+ #ifdef __cplusplus
58
+ extern "C" {
59
+ #endif
60
+
61
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
62
+ #pragma GCC visibility push(default)
63
+ #endif
64
+
65
+ /**
66
+ * \defgroup CUPTI_SASS_METRICS_API CUPTI SASS Metrics API
67
+ * Functions, types, and enums that implement the CUPTI SASS Metrics API.
68
+ * @{
69
+ */
70
+
71
+ typedef enum
72
+ {
73
+ /// SASS metric data will be collected at GPU level.
74
+ /// In CUpti_SassMetricsGetDataProperties_Params struct the numOfInstances will be equal to 1
75
+ CUPTI_SASS_METRICS_OUTPUT_GRANULARITY_GPU = 0,
76
+
77
+ /// SASS metric data will be collected at SM level
78
+ /// In CUpti_SassMetricsGetDataProperties_Params struct the numOfInstances will be equal to number of SMs in the GPU
79
+ CUPTI_SASS_METRICS_OUTPUT_GRANULARITY_SM = 1,
80
+
81
+ /// SASS metric data will be collected at SM sub-partition level
82
+ /// In CUpti_SassMetricsGetDataProperties_Params struct the numOfInstances will be equal to number of SM sub-partitions in the GPU
83
+ CUPTI_SASS_METRICS_OUTPUT_GRANULARITY_SMSP = 2,
84
+
85
+ CUPTI_SASS_METRICS_OUTPUT_GRANULARITY_INVALID
86
+ } CUpti_SassMetrics_OutputGranularity;
87
+
88
+ typedef struct CUpti_SassMetrics_MetricDetails
89
+ {
90
+ /// unique ID for the SASS metric
91
+ uint64_t metricId;
92
+ /// metric name
93
+ const char* pMetricName;
94
+ /// metric description
95
+ const char* pMetricDescription;
96
+ } CUpti_SassMetrics_MetricDetails;
97
+
98
+ /**
99
+ * \brief Params for cuptiSassMetricsGetNumOfMetrics
100
+ */
101
+ typedef struct CUpti_SassMetrics_GetNumOfMetrics_Params
102
+ {
103
+ /// [in] should be equal to CUpti_SassMetrics_GetNumOfMetrics_Params_STRUCT_SIZE
104
+ size_t structSize;
105
+ /// [in] assign to NULL
106
+ void* pPriv;
107
+ /// [in] chip name for which metrics will be queried
108
+ const char* pChipName;
109
+ /// [out] number of metrics supported for the queried chip
110
+ size_t numOfMetrics;
111
+ } CUpti_SassMetrics_GetNumOfMetrics_Params;
112
+
113
+ #define CUpti_SassMetrics_GetNumOfMetrics_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetrics_GetNumOfMetrics_Params, numOfMetrics)
114
+
115
+ /**
116
+ * \brief Get the number of supported SASS metrics for the chip.
117
+ *
118
+ * \param pParams A pointer to \ref CUpti_SassMetrics_GetNumOfMetrics_Params
119
+ *
120
+ * \retval CUPTI_SUCCESS
121
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
122
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric collection
123
+ */
124
+ CUptiResult CUPTIAPI cuptiSassMetricsGetNumOfMetrics(CUpti_SassMetrics_GetNumOfMetrics_Params* pParams);
125
+
126
+ /**
127
+ * \brief Params for cuptiSassMetricsGetMetrics
128
+ */
129
+ typedef struct CUpti_SassMetrics_GetMetrics_Params
130
+ {
131
+ /// [in] should be equal to CUpti_SassMetrics_GetMetrics_Params_STRUCT_SIZE
132
+ size_t structSize;
133
+ /// [in] assign to NULL
134
+ void* pPriv;
135
+ /// [in] chip name for which metrics will be queried
136
+ const char* pChipName;
137
+ /// [in] number of metrics supported for the queried chip (can be queried using cuptiSassMetricsGetNumOfMetrics())
138
+ size_t numOfMetrics;
139
+ /// [out] list of metrics supported for queried chip
140
+ CUpti_SassMetrics_MetricDetails* pMetricsList;
141
+ } CUpti_SassMetrics_GetMetrics_Params;
142
+ #define CUpti_SassMetrics_GetMetrics_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetrics_GetMetrics_Params, pMetricsList)
143
+
144
+ /**
145
+ * \brief Get the list of all supported SASS metrics for the chip.
146
+ *
147
+ * \param pParams A pointer to \ref CUpti_SassMetrics_GetMetrics_Params
148
+ *
149
+ * \retval CUPTI_SUCCESS
150
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
151
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric collection
152
+ */
153
+ CUptiResult CUPTIAPI cuptiSassMetricsGetMetrics(CUpti_SassMetrics_GetMetrics_Params* pParams);
154
+
155
+ /**
156
+ * \brief Params for cuptiSassMetricsGetProperties
157
+ */
158
+ typedef struct CUpti_SassMetrics_GetProperties_Params
159
+ {
160
+ /// [in] should be equal to CUpti_SassMetrics_GetProperties_Params_STRUCT_SIZE
161
+ size_t structSize;
162
+ /// [in] assign to NULL
163
+ void* pPriv;
164
+ /// [in] chip name for which metric will be queried
165
+ const char* pChipName;
166
+ /// [in] metric name
167
+ const char* pMetricName;
168
+ /// [out] returns the metric ID and the metric description
169
+ CUpti_SassMetrics_MetricDetails metric;
170
+ } CUpti_SassMetrics_GetProperties_Params;
171
+ #define CUpti_SassMetrics_GetProperties_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetrics_GetProperties_Params, metric)
172
+
173
+ /**
174
+ * \brief Get metric properties for the queried metric.
175
+ * For a given metric the results will be put in CUpti_SassMetrics_MetricDetails which
176
+ * stores metric ID, description of the metric.
177
+ *
178
+ * \param pParams A pointer to \ref CUpti_SassMetrics_GetProperties_Params
179
+ *
180
+ * \retval CUPTI_SUCCESS
181
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
182
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection
183
+ */
184
+ CUptiResult CUPTIAPI cuptiSassMetricsGetProperties(CUpti_SassMetrics_GetProperties_Params *pParams);
185
+
186
+ typedef struct CUpti_SassMetrics_Config
187
+ {
188
+ /// [in] unique id for the SASS metric, can be queried using cuptiSassMetricsGetProperties()
189
+ uint64_t metricId;
190
+ /// [in] CUpti_SassMetrics_OutputGranularity
191
+ uint8_t outputGranularity;
192
+ } CUpti_SassMetrics_Config;
193
+
194
+ /**
195
+ * \brief Params for cuptiSassMetricsSetConfig
196
+ */
197
+ typedef struct CUpti_SassMetricsSetConfig_Params
198
+ {
199
+ /// [in] equal to CUpti_SassMetricsSetConfig_Params_STRUCT_SIZE
200
+ size_t structSize;
201
+ /// [in] assign to NULL
202
+ void* pPriv;
203
+ /// [in] num of metric configs, will be equal to number of metrics queried
204
+ size_t numOfMetricConfig;
205
+ /// [in] list of metric config generated for given sass metrics
206
+ CUpti_SassMetrics_Config* pConfigs;
207
+ /// [in] device index for which config will be set, user can call this once for
208
+ /// the device on which the the SASS metric data will be collected
209
+ uint32_t deviceIndex;
210
+ } CUpti_SassMetricsSetConfig_Params;
211
+ #define CUpti_SassMetricsSetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsSetConfig_Params, deviceIndex)
212
+
213
+ /**
214
+ * \brief Set config for the SASS metric data collection for a device.
215
+ * User need to call this API before calling any of the SASS metric data collection APIs.
216
+ * Each set config API call need to be followed by cuptiSassPatchingUnSetConfig API
217
+ * before calling the cuptiSassMetricsSetConfig() API again for the same device.
218
+ *
219
+ * \param pParams A pointer to \ref CUpti_SassMetricsSetConfig_Params
220
+ *
221
+ * \retval CUPTI_SUCCESS
222
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
223
+ * \retval CUPTI_ERROR_INVALID_CONTEXT if any cuda context has not been created prior to this API call
224
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this is called multiple times for the device without calling unset config API
225
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection
226
+ */
227
+ CUptiResult CUPTIAPI cuptiSassMetricsSetConfig(CUpti_SassMetricsSetConfig_Params *pParams);
228
+
229
+ /**
230
+ * \brief Params for cuptiSassMetricsUnsetConfig
231
+ */
232
+ typedef struct CUpti_SassMetricsUnsetConfig_Params
233
+ {
234
+ /// [in] equal to CUpti_SassMetricsUnsetConfig_Params_STRUCT_SIZE
235
+ size_t structSize;
236
+ /// [in] assign to NULL
237
+ void* pPriv;
238
+ /// [in] device index for which SASS metric data collection config will get reset, user need to call this API for
239
+ /// all the devices on which the the SASS metric data collection have been configured.
240
+ uint32_t deviceIndex;
241
+ } CUpti_SassMetricsUnsetConfig_Params;
242
+ #define CUpti_SassMetricsUnsetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsUnsetConfig_Params, deviceIndex)
243
+
244
+ /**
245
+ * \brief Unset config API will reset the SASS metric data collection configuration for the device.
246
+ * Once this API called CUPTI will deallocate all the memory allocated and remove all
247
+ * the configuration for SASS metric data collection. User can only call this API for a device where
248
+ * cuptiSassMetricsSetConfig() API has been called earlier for the device.
249
+ *
250
+ * \param pParams A pointer to \ref CUpti_SassMetricsSetConfig_Params
251
+ *
252
+ * \retval CUPTI_SUCCESS
253
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
254
+ * \retval CUPTI_ERROR_INVALID_CONTEXT if any cuda context has not been created prior to this API call
255
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this is called multiple times for the device without calling set config API
256
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection
257
+ */
258
+ CUptiResult CUPTIAPI cuptiSassMetricsUnsetConfig(CUpti_SassMetricsUnsetConfig_Params *pParams);
259
+
260
+ /**
261
+ * \brief Params for cuptiSassMetricsEnable
262
+ */
263
+ typedef struct CUpti_SassMetricsEnable_Params
264
+ {
265
+ /// [in] equal to CUpti_SassMetricsEnable_Params_STRUCT_SIZE
266
+ size_t structSize;
267
+ /// [in] assign to NULL
268
+ void* pPriv;
269
+ /// [in] CUDA context on which SASS metric data collection will be enabled.
270
+ /// If set NULL, default context will be consider for SASS metric data collection.
271
+ CUcontext ctx;
272
+ /// [in] if false, all the functions will patched regardless of their execution with cuptiSassMetricsEnable() API call.
273
+ /// when this parameter is set to true, metric data collection for the function will be done at the very first execution in the enable/disble
274
+ /// range.
275
+ uint8_t enableLazyPatching;
276
+ } CUpti_SassMetricsEnable_Params;
277
+ #define CUpti_SassMetricsEnable_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsEnable_Params, enableLazyPatching)
278
+
279
+ /**
280
+ * \brief Sass metric data collection enable API will mark the start of a range, between which kernel
281
+ * will be profiled for SASS metrics.
282
+ *
283
+ * \param pParams A pointer to \ref CUpti_SassMetricsEnable_Params
284
+ *
285
+ * \retval CUPTI_SUCCESS
286
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
287
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection
288
+ * \retval CUPTI_ERROR_INVALID_CONTEXT if any cuda context has not been created prior to this API call
289
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called multiple times for a cuda context without calling
290
+ * cuptiSassMetricsDisable() API or called before cuptiSassMetricsSetConfig() API call.
291
+ */
292
+ CUptiResult CUPTIAPI cuptiSassMetricsEnable(CUpti_SassMetricsEnable_Params* pParams);
293
+
294
+ /**
295
+ * \brief Params for cuptiSassMetricsDisable
296
+ */
297
+ typedef struct CUpti_SassMetricsDisable_Params
298
+ {
299
+ /// [in] equal to CUpti_SassMetricsDisable_Params_STRUCT_SIZE
300
+ size_t structSize;
301
+ /// [in] assign to NULL
302
+ void* pPriv;
303
+ /// [in] CUDA context on which SASS metric data collection will be disabled.
304
+ /// If set NULL, default context will be consider for SASS metric data collection.
305
+ CUcontext ctx;
306
+ /// [out] Num of dropped SASS records will be equal to numOfPatchedInstructions * numOfInstances.
307
+ /// Number of dropped records will be zero when data is flushed prior to calling the disable API.
308
+ size_t numOfDroppedRecords;
309
+ } CUpti_SassMetricsDisable_Params;
310
+ #define CUpti_SassMetricsDisable_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsDisable_Params, numOfDroppedRecords)
311
+
312
+ /**
313
+ * \brief SASS metric data collection disable API will mark the end of a range, any kernel launched after this
314
+ * API call will not be profiled for the SASS metrics.
315
+ *
316
+ * \param pParams A pointer to \ref CUpti_SassMetricsDisable_Params
317
+ *
318
+ * \retval CUPTI_SUCCESS
319
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
320
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection
321
+ * \retval CUPTI_ERROR_INVALID_CONTEXT if any cuda context has not been created prior to this API call
322
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called multiple times for a cuda context without calling
323
+ * cuptiSassMetricsEnable() API or called before cuptiSassMetricsSetConfig() API call.
324
+ */
325
+ CUptiResult CUPTIAPI cuptiSassMetricsDisable(CUpti_SassMetricsDisable_Params* pParams);
326
+
327
+ /**
328
+ * \brief Params for cuptiSassMetricsGetDataProperties
329
+ */
330
+ typedef struct CUpti_SassMetricsGetDataProperties_Params
331
+ {
332
+ /// [in] equal to CUpti_SassMetricsGetDataProperties_Params_STRUCT_SIZE
333
+ size_t structSize;
334
+ /// [in] assign to NULL
335
+ void* pPriv;
336
+ /// [in] CUDA context on which SASS metric data collection was enabled.
337
+ /// If set NULL, default context will be consider for SASS metric data collection.
338
+ CUcontext ctx;
339
+ /// [out] total number of SASS records has been collected
340
+ size_t numOfPatchedInstructionRecords;
341
+ /// [out] number of instances for each metric value per instruction.
342
+ /// This will depend on CUpti_SassPatching_OutputGranularity level set for the metric config.
343
+ size_t numOfInstances;
344
+ } CUpti_SassMetricsGetDataProperties_Params;
345
+
346
+ #define CUpti_SassMetricsGetDataProperties_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsGetDataProperties_Params, numOfInstances)
347
+ /**
348
+ * \brief SASS metric data properties API will give the data regarding number of instances of a metric
349
+ * value and number of SASS instruction data has been collected. The number of instances of a metric
350
+ * will vary as per user set the output granularity level with CUpti_SassMetrics_OutputGranularity value.
351
+ * User need to allocate memory for retriving the SASS data using cuptiSassMetricsFlushData() API.
352
+ *
353
+ * \param pParams A pointer to \ref CUpti_SassMetricsGetDataProperties_Params
354
+ *
355
+ * \retval CUPTI_SUCCESS
356
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
357
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection
358
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called outside the enable/disable range.
359
+ */
360
+ CUptiResult CUPTIAPI cuptiSassMetricsGetDataProperties(CUpti_SassMetricsGetDataProperties_Params* pParams);
361
+
362
+ typedef struct CUpti_SassMetrics_InstanceValue
363
+ {
364
+ // unique id of the metric
365
+ uint64_t metricId;
366
+ // metric value
367
+ uint64_t value;
368
+ } CUpti_SassMetrics_InstanceValue;
369
+ #define CUpti_SassMetrics_InstanceValue_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetrics_InstanceValue, value)
370
+
371
+ typedef struct CUpti_SassMetrics_Data
372
+ {
373
+ /// [in] equal to CUpti_SassMetricsFlushData_Params_STRUCT_SIZE
374
+ size_t structSize;
375
+ /// [in] assign to NULL
376
+ void* pPriv;
377
+ /// [out] Unique cubin id
378
+ uint32_t cubinCrc;
379
+ /// [out] function's unique symbol index in the module.
380
+ uint32_t functionIndex;
381
+ /// [out] The function name
382
+ const char* functionName;
383
+ /// [out] pc offset for the function in a module
384
+ uint32_t pcOffset;
385
+ /// [out] array of size equal to number of instances per metric, which contains the metric ID and metric value.
386
+ CUpti_SassMetrics_InstanceValue* pInstanceValues;
387
+ } CUpti_SassMetrics_Data;
388
+
389
+ /**
390
+ * \brief Params for cuptiSassMetricsFlushData
391
+ */
392
+ typedef struct CUpti_SassMetricsFlushData_Params
393
+ {
394
+ /// [in] equal to CUpti_SassMetricsFlushData_Params_STRUCT_SIZE
395
+ size_t structSize;
396
+ /// [in] assign to NULL
397
+ void* pPriv;
398
+ /// [in] CUDA context on which SASS metric data collection was enabled.
399
+ /// If set NULL, default context will be consider for SASS metric data collection.
400
+ CUcontext ctx;
401
+ /// [in] number of patched instruction record will be retrived, user can call cuptiSassMetricsGetDataProperties()
402
+ /// for getting total number of records available.
403
+ size_t numOfPatchedInstructionRecords;
404
+ /// [in] number of patched instruction record instances for a metric, user can call cuptiSassMetricsGetDataProperties()
405
+ /// for getting total number of instances for each record per metric available.
406
+ size_t numOfInstances;
407
+ /// [out]
408
+ CUpti_SassMetrics_Data* pMetricsData;
409
+ } CUpti_SassMetricsFlushData_Params;
410
+ #define CUpti_SassMetricsFlushData_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsFlushData_Params, numOfInstances)
411
+
412
+ /**
413
+ * \brief Flush SASS metrics data from CUPTI internal buffer to the user buffer.
414
+ * User needs to allocate the buffer for retrieving the data. The number of records collected
415
+ * can be queried using the API cuptiSassMetricsGetDataProperties().
416
+ *
417
+ * \param pParams A pointer to \ref CUpti_SassMetricsFlushData_Params
418
+ *
419
+ * \retval CUPTI_SUCCESS
420
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
421
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection.
422
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called outside the enable/disable range.
423
+ */
424
+ CUptiResult CUPTIAPI cuptiSassMetricsFlushData(CUpti_SassMetricsFlushData_Params* pParams);
425
+
426
+ /** @} */ /* END CUPTI_SASS_METRICS_API */
427
+
428
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
429
+ #pragma GCC visibility pop
430
+ #endif
431
+
432
+ #ifdef __cplusplus
433
+ } /* extern "C" */
434
+ #endif
435
+
436
+ #endif // _CUPTI_SASS_METRICS_H_
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_target.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if !defined(_CUPTI_TARGET_H_)
2
+ #define _CUPTI_TARGET_H_
3
+
4
+ /*
5
+ CUPTI profiler target API's
6
+ This file contains the CUPTI profiling API's.
7
+ */
8
+ #include <cupti_result.h>
9
+ #include <stddef.h>
10
+ #include <stdint.h>
11
+
12
+ #ifdef __cplusplus
13
+ extern "C" {
14
+ #endif
15
+
16
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
17
+ #pragma GCC visibility push(default)
18
+ #endif
19
+
20
+ #ifndef CUPTI_PROFILER_STRUCT_SIZE
21
+ #define CUPTI_PROFILER_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
22
+ #endif
23
+
24
+ typedef struct CUpti_Device_GetChipName_Params
25
+ {
26
+ size_t structSize; //!< [in]
27
+ void* pPriv; //!< [in] assign to NULL
28
+
29
+ size_t deviceIndex; //!< [in]
30
+ const char* pChipName; //!< [out]
31
+ } CUpti_Device_GetChipName_Params;
32
+
33
+ #define CUpti_Device_GetChipName_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Device_GetChipName_Params, pChipName)
34
+ CUptiResult CUPTIAPI cuptiDeviceGetChipName(CUpti_Device_GetChipName_Params *pParams);
35
+
36
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
37
+ #pragma GCC visibility pop
38
+ #endif
39
+
40
+ #ifdef __cplusplus
41
+ } /* extern "C" */
42
+ #endif
43
+ #endif
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudaGL_meta.h ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // Dependent includes
4
+ #ifdef __APPLE__
5
+ #include <OpenGL/gl.h>
6
+ #else
7
+ #include <GL/gl.h>
8
+ #endif
9
+
10
+ // CUDA public interface, for type definitions and cu* function prototypes
11
+ #include "cudaGL.h"
12
+
13
+
14
+ // *************************************************************************
15
+ // Definitions of structs to hold parameters for each function
16
+ // *************************************************************************
17
+
18
+ typedef struct cuGraphicsGLRegisterBuffer_params_st {
19
+ CUgraphicsResource *pCudaResource;
20
+ GLuint buffer;
21
+ unsigned int Flags;
22
+ } cuGraphicsGLRegisterBuffer_params;
23
+
24
+ typedef struct cuGraphicsGLRegisterImage_params_st {
25
+ CUgraphicsResource *pCudaResource;
26
+ GLuint image;
27
+ GLenum target;
28
+ unsigned int Flags;
29
+ } cuGraphicsGLRegisterImage_params;
30
+
31
+ typedef struct cuGLGetDevices_v2_params_st {
32
+ unsigned int *pCudaDeviceCount;
33
+ CUdevice *pCudaDevices;
34
+ unsigned int cudaDeviceCount;
35
+ CUGLDeviceList deviceList;
36
+ } cuGLGetDevices_v2_params;
37
+
38
+ typedef struct cuGLCtxCreate_v2_params_st {
39
+ CUcontext *pCtx;
40
+ unsigned int Flags;
41
+ CUdevice device;
42
+ } cuGLCtxCreate_v2_params;
43
+
44
+ typedef struct cuGLRegisterBufferObject_params_st {
45
+ GLuint buffer;
46
+ } cuGLRegisterBufferObject_params;
47
+
48
+ typedef struct cuGLMapBufferObject_v2_ptds_params_st {
49
+ CUdeviceptr *dptr;
50
+ size_t *size;
51
+ GLuint buffer;
52
+ } cuGLMapBufferObject_v2_ptds_params;
53
+
54
+ typedef struct cuGLUnmapBufferObject_params_st {
55
+ GLuint buffer;
56
+ } cuGLUnmapBufferObject_params;
57
+
58
+ typedef struct cuGLUnregisterBufferObject_params_st {
59
+ GLuint buffer;
60
+ } cuGLUnregisterBufferObject_params;
61
+
62
+ typedef struct cuGLSetBufferObjectMapFlags_params_st {
63
+ GLuint buffer;
64
+ unsigned int Flags;
65
+ } cuGLSetBufferObjectMapFlags_params;
66
+
67
+ typedef struct cuGLMapBufferObjectAsync_v2_ptsz_params_st {
68
+ CUdeviceptr *dptr;
69
+ size_t *size;
70
+ GLuint buffer;
71
+ CUstream hStream;
72
+ } cuGLMapBufferObjectAsync_v2_ptsz_params;
73
+
74
+ typedef struct cuGLUnmapBufferObjectAsync_params_st {
75
+ GLuint buffer;
76
+ CUstream hStream;
77
+ } cuGLUnmapBufferObjectAsync_params;
78
+
79
+ typedef struct cuGLGetDevices_params_st {
80
+ unsigned int *pCudaDeviceCount;
81
+ CUdevice *pCudaDevices;
82
+ unsigned int cudaDeviceCount;
83
+ CUGLDeviceList deviceList;
84
+ } cuGLGetDevices_params;
85
+
86
+ typedef struct cuGLMapBufferObject_v2_params_st {
87
+ CUdeviceptr *dptr;
88
+ size_t *size;
89
+ GLuint buffer;
90
+ } cuGLMapBufferObject_v2_params;
91
+
92
+ typedef struct cuGLMapBufferObjectAsync_v2_params_st {
93
+ CUdeviceptr *dptr;
94
+ size_t *size;
95
+ GLuint buffer;
96
+ CUstream hStream;
97
+ } cuGLMapBufferObjectAsync_v2_params;
98
+
99
+ typedef struct cuGLCtxCreate_params_st {
100
+ CUcontext *pCtx;
101
+ unsigned int Flags;
102
+ CUdevice device;
103
+ } cuGLCtxCreate_params;
104
+
105
+ typedef struct cuGLMapBufferObject_params_st {
106
+ CUdeviceptr_v1 *dptr;
107
+ unsigned int *size;
108
+ GLuint buffer;
109
+ } cuGLMapBufferObject_params;
110
+
111
+ typedef struct cuGLMapBufferObjectAsync_params_st {
112
+ CUdeviceptr_v1 *dptr;
113
+ unsigned int *size;
114
+ GLuint buffer;
115
+ CUstream hStream;
116
+ } cuGLMapBufferObjectAsync_params;
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudaVDPAU_meta.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // Dependent includes
4
+ #include <vdpau/vdpau.h>
5
+
6
+ // CUDA public interface, for type definitions and cu* function prototypes
7
+ #include "cudaVDPAU.h"
8
+
9
+
10
+ // *************************************************************************
11
+ // Definitions of structs to hold parameters for each function
12
+ // *************************************************************************
13
+
14
+ typedef struct cuVDPAUGetDevice_params_st {
15
+ CUdevice *pDevice;
16
+ VdpDevice vdpDevice;
17
+ VdpGetProcAddress *vdpGetProcAddress;
18
+ } cuVDPAUGetDevice_params;
19
+
20
+ typedef struct cuVDPAUCtxCreate_v2_params_st {
21
+ CUcontext *pCtx;
22
+ unsigned int flags;
23
+ CUdevice device;
24
+ VdpDevice vdpDevice;
25
+ VdpGetProcAddress *vdpGetProcAddress;
26
+ } cuVDPAUCtxCreate_v2_params;
27
+
28
+ typedef struct cuGraphicsVDPAURegisterVideoSurface_params_st {
29
+ CUgraphicsResource *pCudaResource;
30
+ VdpVideoSurface vdpSurface;
31
+ unsigned int flags;
32
+ } cuGraphicsVDPAURegisterVideoSurface_params;
33
+
34
+ typedef struct cuGraphicsVDPAURegisterOutputSurface_params_st {
35
+ CUgraphicsResource *pCudaResource;
36
+ VdpOutputSurface vdpSurface;
37
+ unsigned int flags;
38
+ } cuGraphicsVDPAURegisterOutputSurface_params;
39
+
40
+ typedef struct cuVDPAUCtxCreate_params_st {
41
+ CUcontext *pCtx;
42
+ unsigned int flags;
43
+ CUdevice device;
44
+ VdpDevice vdpDevice;
45
+ VdpGetProcAddress *vdpGetProcAddress;
46
+ } cuVDPAUCtxCreate_params;
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_gl_interop_meta.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // CUDA public interface, for type definitions and api function prototypes
4
+ #include "cuda_gl_interop.h"
5
+
6
+ // *************************************************************************
7
+ // Definitions of structs to hold parameters for each function
8
+ // *************************************************************************
9
+
10
+ // Currently used parameter trace structures
11
+ typedef struct cudaGLGetDevices_v4010_params_st {
12
+ unsigned int *pCudaDeviceCount;
13
+ int *pCudaDevices;
14
+ unsigned int cudaDeviceCount;
15
+ enum cudaGLDeviceList deviceList;
16
+ } cudaGLGetDevices_v4010_params;
17
+
18
+ typedef struct cudaGraphicsGLRegisterImage_v3020_params_st {
19
+ struct cudaGraphicsResource **resource;
20
+ GLuint image;
21
+ GLenum target;
22
+ unsigned int flags;
23
+ } cudaGraphicsGLRegisterImage_v3020_params;
24
+
25
+ typedef struct cudaGraphicsGLRegisterBuffer_v3020_params_st {
26
+ struct cudaGraphicsResource **resource;
27
+ GLuint buffer;
28
+ unsigned int flags;
29
+ } cudaGraphicsGLRegisterBuffer_v3020_params;
30
+
31
+ typedef struct cudaGLSetGLDevice_v3020_params_st {
32
+ int device;
33
+ } cudaGLSetGLDevice_v3020_params;
34
+
35
+ typedef struct cudaGLRegisterBufferObject_v3020_params_st {
36
+ GLuint bufObj;
37
+ } cudaGLRegisterBufferObject_v3020_params;
38
+
39
+ typedef struct cudaGLMapBufferObject_v3020_params_st {
40
+ void **devPtr;
41
+ GLuint bufObj;
42
+ } cudaGLMapBufferObject_v3020_params;
43
+
44
+ typedef struct cudaGLUnmapBufferObject_v3020_params_st {
45
+ GLuint bufObj;
46
+ } cudaGLUnmapBufferObject_v3020_params;
47
+
48
+ typedef struct cudaGLUnregisterBufferObject_v3020_params_st {
49
+ GLuint bufObj;
50
+ } cudaGLUnregisterBufferObject_v3020_params;
51
+
52
+ typedef struct cudaGLSetBufferObjectMapFlags_v3020_params_st {
53
+ GLuint bufObj;
54
+ unsigned int flags;
55
+ } cudaGLSetBufferObjectMapFlags_v3020_params;
56
+
57
+ typedef struct cudaGLMapBufferObjectAsync_v3020_params_st {
58
+ void **devPtr;
59
+ GLuint bufObj;
60
+ cudaStream_t stream;
61
+ } cudaGLMapBufferObjectAsync_v3020_params;
62
+
63
+ typedef struct cudaGLUnmapBufferObjectAsync_v3020_params_st {
64
+ GLuint bufObj;
65
+ cudaStream_t stream;
66
+ } cudaGLUnmapBufferObjectAsync_v3020_params;
67
+
68
+ // Parameter trace structures for removed functions
69
+
70
+
71
+ // End of parameter trace structures
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_runtime_api_meta.h ADDED
@@ -0,0 +1,2288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // CUDA public interface, for type definitions and api function prototypes
4
+ #include "cuda_runtime_api.h"
5
+
6
+ // *************************************************************************
7
+ // Definitions of structs to hold parameters for each function
8
+ // *************************************************************************
9
+
10
+ // Currently used parameter trace structures
11
+ typedef struct cudaDeviceSetLimit_v3020_params_st {
12
+ enum cudaLimit limit;
13
+ size_t value;
14
+ } cudaDeviceSetLimit_v3020_params;
15
+
16
+ typedef struct cudaDeviceGetLimit_v3020_params_st {
17
+ size_t *pValue;
18
+ enum cudaLimit limit;
19
+ } cudaDeviceGetLimit_v3020_params;
20
+
21
+ typedef struct cudaDeviceGetTexture1DLinearMaxWidth_v11010_params_st {
22
+ size_t *maxWidthInElements;
23
+ const struct cudaChannelFormatDesc *fmtDesc;
24
+ int device;
25
+ } cudaDeviceGetTexture1DLinearMaxWidth_v11010_params;
26
+
27
+ typedef struct cudaDeviceGetCacheConfig_v3020_params_st {
28
+ enum cudaFuncCache *pCacheConfig;
29
+ } cudaDeviceGetCacheConfig_v3020_params;
30
+
31
+ typedef struct cudaDeviceGetStreamPriorityRange_v5050_params_st {
32
+ int *leastPriority;
33
+ int *greatestPriority;
34
+ } cudaDeviceGetStreamPriorityRange_v5050_params;
35
+
36
+ typedef struct cudaDeviceSetCacheConfig_v3020_params_st {
37
+ enum cudaFuncCache cacheConfig;
38
+ } cudaDeviceSetCacheConfig_v3020_params;
39
+
40
+ typedef struct cudaDeviceGetByPCIBusId_v4010_params_st {
41
+ int *device;
42
+ const char *pciBusId;
43
+ } cudaDeviceGetByPCIBusId_v4010_params;
44
+
45
+ typedef struct cudaDeviceGetPCIBusId_v4010_params_st {
46
+ char *pciBusId;
47
+ int len;
48
+ int device;
49
+ } cudaDeviceGetPCIBusId_v4010_params;
50
+
51
+ typedef struct cudaIpcGetEventHandle_v4010_params_st {
52
+ cudaIpcEventHandle_t *handle;
53
+ cudaEvent_t event;
54
+ } cudaIpcGetEventHandle_v4010_params;
55
+
56
+ typedef struct cudaIpcOpenEventHandle_v4010_params_st {
57
+ cudaEvent_t *event;
58
+ cudaIpcEventHandle_t handle;
59
+ } cudaIpcOpenEventHandle_v4010_params;
60
+
61
+ typedef struct cudaIpcGetMemHandle_v4010_params_st {
62
+ cudaIpcMemHandle_t *handle;
63
+ void *devPtr;
64
+ } cudaIpcGetMemHandle_v4010_params;
65
+
66
+ typedef struct cudaIpcOpenMemHandle_v4010_params_st {
67
+ void **devPtr;
68
+ cudaIpcMemHandle_t handle;
69
+ unsigned int flags;
70
+ } cudaIpcOpenMemHandle_v4010_params;
71
+
72
+ typedef struct cudaIpcCloseMemHandle_v4010_params_st {
73
+ void *devPtr;
74
+ } cudaIpcCloseMemHandle_v4010_params;
75
+
76
+ typedef struct cudaDeviceFlushGPUDirectRDMAWrites_v11030_params_st {
77
+ enum cudaFlushGPUDirectRDMAWritesTarget target;
78
+ enum cudaFlushGPUDirectRDMAWritesScope scope;
79
+ } cudaDeviceFlushGPUDirectRDMAWrites_v11030_params;
80
+
81
+ typedef struct cudaDeviceGetSharedMemConfig_v4020_params_st {
82
+ enum cudaSharedMemConfig *pConfig;
83
+ } cudaDeviceGetSharedMemConfig_v4020_params;
84
+
85
+ typedef struct cudaDeviceSetSharedMemConfig_v4020_params_st {
86
+ enum cudaSharedMemConfig config;
87
+ } cudaDeviceSetSharedMemConfig_v4020_params;
88
+
89
+ typedef struct cudaGetErrorName_v6050_params_st {
90
+ cudaError_t error;
91
+ } cudaGetErrorName_v6050_params;
92
+
93
+ typedef struct cudaGetErrorString_v3020_params_st {
94
+ cudaError_t error;
95
+ } cudaGetErrorString_v3020_params;
96
+
97
+ typedef struct cudaGetDeviceCount_v3020_params_st {
98
+ int *count;
99
+ } cudaGetDeviceCount_v3020_params;
100
+
101
+ typedef struct cudaGetDeviceProperties_v2_v12000_params_st {
102
+ struct cudaDeviceProp *prop;
103
+ int device;
104
+ } cudaGetDeviceProperties_v2_v12000_params;
105
+
106
+ typedef struct cudaDeviceGetAttribute_v5000_params_st {
107
+ int *value;
108
+ enum cudaDeviceAttr attr;
109
+ int device;
110
+ } cudaDeviceGetAttribute_v5000_params;
111
+
112
+ typedef struct cudaDeviceGetDefaultMemPool_v11020_params_st {
113
+ cudaMemPool_t *memPool;
114
+ int device;
115
+ } cudaDeviceGetDefaultMemPool_v11020_params;
116
+
117
+ typedef struct cudaDeviceSetMemPool_v11020_params_st {
118
+ int device;
119
+ cudaMemPool_t memPool;
120
+ } cudaDeviceSetMemPool_v11020_params;
121
+
122
+ typedef struct cudaDeviceGetMemPool_v11020_params_st {
123
+ cudaMemPool_t *memPool;
124
+ int device;
125
+ } cudaDeviceGetMemPool_v11020_params;
126
+
127
+ typedef struct cudaDeviceGetNvSciSyncAttributes_v10020_params_st {
128
+ void *nvSciSyncAttrList;
129
+ int device;
130
+ int flags;
131
+ } cudaDeviceGetNvSciSyncAttributes_v10020_params;
132
+
133
+ typedef struct cudaDeviceGetP2PAttribute_v8000_params_st {
134
+ int *value;
135
+ enum cudaDeviceP2PAttr attr;
136
+ int srcDevice;
137
+ int dstDevice;
138
+ } cudaDeviceGetP2PAttribute_v8000_params;
139
+
140
+ typedef struct cudaChooseDevice_v3020_params_st {
141
+ int *device;
142
+ const struct cudaDeviceProp *prop;
143
+ } cudaChooseDevice_v3020_params;
144
+
145
+ typedef struct cudaInitDevice_v12000_params_st {
146
+ int device;
147
+ unsigned int deviceFlags;
148
+ unsigned int flags;
149
+ } cudaInitDevice_v12000_params;
150
+
151
+ typedef struct cudaSetDevice_v3020_params_st {
152
+ int device;
153
+ } cudaSetDevice_v3020_params;
154
+
155
+ typedef struct cudaGetDevice_v3020_params_st {
156
+ int *device;
157
+ } cudaGetDevice_v3020_params;
158
+
159
+ typedef struct cudaSetValidDevices_v3020_params_st {
160
+ int *device_arr;
161
+ int len;
162
+ } cudaSetValidDevices_v3020_params;
163
+
164
+ typedef struct cudaSetDeviceFlags_v3020_params_st {
165
+ unsigned int flags;
166
+ } cudaSetDeviceFlags_v3020_params;
167
+
168
+ typedef struct cudaGetDeviceFlags_v7000_params_st {
169
+ unsigned int *flags;
170
+ } cudaGetDeviceFlags_v7000_params;
171
+
172
+ typedef struct cudaStreamCreate_v3020_params_st {
173
+ cudaStream_t *pStream;
174
+ } cudaStreamCreate_v3020_params;
175
+
176
+ typedef struct cudaStreamCreateWithFlags_v5000_params_st {
177
+ cudaStream_t *pStream;
178
+ unsigned int flags;
179
+ } cudaStreamCreateWithFlags_v5000_params;
180
+
181
+ typedef struct cudaStreamCreateWithPriority_v5050_params_st {
182
+ cudaStream_t *pStream;
183
+ unsigned int flags;
184
+ int priority;
185
+ } cudaStreamCreateWithPriority_v5050_params;
186
+
187
+ typedef struct cudaStreamGetPriority_ptsz_v7000_params_st {
188
+ cudaStream_t hStream;
189
+ int *priority;
190
+ } cudaStreamGetPriority_ptsz_v7000_params;
191
+
192
+ typedef struct cudaStreamGetFlags_ptsz_v7000_params_st {
193
+ cudaStream_t hStream;
194
+ unsigned int *flags;
195
+ } cudaStreamGetFlags_ptsz_v7000_params;
196
+
197
+ typedef struct cudaStreamGetId_ptsz_v12000_params_st {
198
+ cudaStream_t hStream;
199
+ unsigned long long *streamId;
200
+ } cudaStreamGetId_ptsz_v12000_params;
201
+
202
+ typedef struct cudaStreamCopyAttributes_ptsz_v11000_params_st {
203
+ cudaStream_t dst;
204
+ cudaStream_t src;
205
+ } cudaStreamCopyAttributes_ptsz_v11000_params;
206
+
207
+ typedef struct cudaStreamGetAttribute_ptsz_v11000_params_st {
208
+ cudaStream_t hStream;
209
+ cudaStreamAttrID attr;
210
+ cudaStreamAttrValue *value_out;
211
+ } cudaStreamGetAttribute_ptsz_v11000_params;
212
+
213
+ typedef struct cudaStreamSetAttribute_ptsz_v11000_params_st {
214
+ cudaStream_t hStream;
215
+ cudaStreamAttrID attr;
216
+ const cudaStreamAttrValue *value;
217
+ } cudaStreamSetAttribute_ptsz_v11000_params;
218
+
219
+ typedef struct cudaStreamDestroy_v5050_params_st {
220
+ cudaStream_t stream;
221
+ } cudaStreamDestroy_v5050_params;
222
+
223
+ typedef struct cudaStreamWaitEvent_ptsz_v7000_params_st {
224
+ cudaStream_t stream;
225
+ cudaEvent_t event;
226
+ unsigned int flags;
227
+ } cudaStreamWaitEvent_ptsz_v7000_params;
228
+
229
+ typedef struct cudaStreamAddCallback_ptsz_v7000_params_st {
230
+ cudaStream_t stream;
231
+ cudaStreamCallback_t callback;
232
+ void *userData;
233
+ unsigned int flags;
234
+ } cudaStreamAddCallback_ptsz_v7000_params;
235
+
236
+ typedef struct cudaStreamSynchronize_ptsz_v7000_params_st {
237
+ cudaStream_t stream;
238
+ } cudaStreamSynchronize_ptsz_v7000_params;
239
+
240
+ typedef struct cudaStreamQuery_ptsz_v7000_params_st {
241
+ cudaStream_t stream;
242
+ } cudaStreamQuery_ptsz_v7000_params;
243
+
244
+ typedef struct cudaStreamAttachMemAsync_ptsz_v7000_params_st {
245
+ cudaStream_t stream;
246
+ void *devPtr;
247
+ size_t length;
248
+ unsigned int flags;
249
+ } cudaStreamAttachMemAsync_ptsz_v7000_params;
250
+
251
+ typedef struct cudaStreamBeginCapture_ptsz_v10000_params_st {
252
+ cudaStream_t stream;
253
+ enum cudaStreamCaptureMode mode;
254
+ } cudaStreamBeginCapture_ptsz_v10000_params;
255
+
256
+ typedef struct cudaStreamBeginCaptureToGraph_ptsz_v12030_params_st {
257
+ cudaStream_t stream;
258
+ cudaGraph_t graph;
259
+ const cudaGraphNode_t *dependencies;
260
+ const cudaGraphEdgeData *dependencyData;
261
+ size_t numDependencies;
262
+ enum cudaStreamCaptureMode mode;
263
+ } cudaStreamBeginCaptureToGraph_ptsz_v12030_params;
264
+
265
+ typedef struct cudaThreadExchangeStreamCaptureMode_v10010_params_st {
266
+ enum cudaStreamCaptureMode *mode;
267
+ } cudaThreadExchangeStreamCaptureMode_v10010_params;
268
+
269
+ typedef struct cudaStreamEndCapture_ptsz_v10000_params_st {
270
+ cudaStream_t stream;
271
+ cudaGraph_t *pGraph;
272
+ } cudaStreamEndCapture_ptsz_v10000_params;
273
+
274
+ typedef struct cudaStreamIsCapturing_ptsz_v10000_params_st {
275
+ cudaStream_t stream;
276
+ enum cudaStreamCaptureStatus *pCaptureStatus;
277
+ } cudaStreamIsCapturing_ptsz_v10000_params;
278
+
279
+ typedef struct cudaStreamGetCaptureInfo_v2_ptsz_v11030_params_st {
280
+ cudaStream_t stream;
281
+ enum cudaStreamCaptureStatus *captureStatus_out;
282
+ unsigned long long *id_out;
283
+ cudaGraph_t *graph_out;
284
+ const cudaGraphNode_t **dependencies_out;
285
+ size_t *numDependencies_out;
286
+ } cudaStreamGetCaptureInfo_v2_ptsz_v11030_params;
287
+
288
+ typedef struct cudaStreamGetCaptureInfo_v3_ptsz_v12030_params_st {
289
+ cudaStream_t stream;
290
+ enum cudaStreamCaptureStatus *captureStatus_out;
291
+ unsigned long long *id_out;
292
+ cudaGraph_t *graph_out;
293
+ const cudaGraphNode_t **dependencies_out;
294
+ const cudaGraphEdgeData **edgeData_out;
295
+ size_t *numDependencies_out;
296
+ } cudaStreamGetCaptureInfo_v3_ptsz_v12030_params;
297
+
298
+ typedef struct cudaStreamUpdateCaptureDependencies_ptsz_v11030_params_st {
299
+ cudaStream_t stream;
300
+ cudaGraphNode_t *dependencies;
301
+ size_t numDependencies;
302
+ unsigned int flags;
303
+ } cudaStreamUpdateCaptureDependencies_ptsz_v11030_params;
304
+
305
+ typedef struct cudaStreamUpdateCaptureDependencies_v2_ptsz_v12030_params_st {
306
+ cudaStream_t stream;
307
+ cudaGraphNode_t *dependencies;
308
+ const cudaGraphEdgeData *dependencyData;
309
+ size_t numDependencies;
310
+ unsigned int flags;
311
+ } cudaStreamUpdateCaptureDependencies_v2_ptsz_v12030_params;
312
+
313
+ typedef struct cudaEventCreate_v3020_params_st {
314
+ cudaEvent_t *event;
315
+ } cudaEventCreate_v3020_params;
316
+
317
+ typedef struct cudaEventCreateWithFlags_v3020_params_st {
318
+ cudaEvent_t *event;
319
+ unsigned int flags;
320
+ } cudaEventCreateWithFlags_v3020_params;
321
+
322
+ typedef struct cudaEventRecord_ptsz_v7000_params_st {
323
+ cudaEvent_t event;
324
+ cudaStream_t stream;
325
+ } cudaEventRecord_ptsz_v7000_params;
326
+
327
+ typedef struct cudaEventRecordWithFlags_ptsz_v11010_params_st {
328
+ cudaEvent_t event;
329
+ cudaStream_t stream;
330
+ unsigned int flags;
331
+ } cudaEventRecordWithFlags_ptsz_v11010_params;
332
+
333
+ typedef struct cudaEventQuery_v3020_params_st {
334
+ cudaEvent_t event;
335
+ } cudaEventQuery_v3020_params;
336
+
337
+ typedef struct cudaEventSynchronize_v3020_params_st {
338
+ cudaEvent_t event;
339
+ } cudaEventSynchronize_v3020_params;
340
+
341
+ typedef struct cudaEventDestroy_v3020_params_st {
342
+ cudaEvent_t event;
343
+ } cudaEventDestroy_v3020_params;
344
+
345
+ typedef struct cudaEventElapsedTime_v3020_params_st {
346
+ float *ms;
347
+ cudaEvent_t start;
348
+ cudaEvent_t end;
349
+ } cudaEventElapsedTime_v3020_params;
350
+
351
+ typedef struct cudaImportExternalMemory_v10000_params_st {
352
+ cudaExternalMemory_t *extMem_out;
353
+ const struct cudaExternalMemoryHandleDesc *memHandleDesc;
354
+ } cudaImportExternalMemory_v10000_params;
355
+
356
+ typedef struct cudaExternalMemoryGetMappedBuffer_v10000_params_st {
357
+ void **devPtr;
358
+ cudaExternalMemory_t extMem;
359
+ const struct cudaExternalMemoryBufferDesc *bufferDesc;
360
+ } cudaExternalMemoryGetMappedBuffer_v10000_params;
361
+
362
+ typedef struct cudaExternalMemoryGetMappedMipmappedArray_v10000_params_st {
363
+ cudaMipmappedArray_t *mipmap;
364
+ cudaExternalMemory_t extMem;
365
+ const struct cudaExternalMemoryMipmappedArrayDesc *mipmapDesc;
366
+ } cudaExternalMemoryGetMappedMipmappedArray_v10000_params;
367
+
368
+ typedef struct cudaDestroyExternalMemory_v10000_params_st {
369
+ cudaExternalMemory_t extMem;
370
+ } cudaDestroyExternalMemory_v10000_params;
371
+
372
+ typedef struct cudaImportExternalSemaphore_v10000_params_st {
373
+ cudaExternalSemaphore_t *extSem_out;
374
+ const struct cudaExternalSemaphoreHandleDesc *semHandleDesc;
375
+ } cudaImportExternalSemaphore_v10000_params;
376
+
377
+ typedef struct cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020_params_st {
378
+ const cudaExternalSemaphore_t *extSemArray;
379
+ const struct cudaExternalSemaphoreSignalParams *paramsArray;
380
+ unsigned int numExtSems;
381
+ cudaStream_t stream;
382
+ } cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020_params;
383
+
384
+ typedef struct cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020_params_st {
385
+ const cudaExternalSemaphore_t *extSemArray;
386
+ const struct cudaExternalSemaphoreWaitParams *paramsArray;
387
+ unsigned int numExtSems;
388
+ cudaStream_t stream;
389
+ } cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020_params;
390
+
391
+ typedef struct cudaDestroyExternalSemaphore_v10000_params_st {
392
+ cudaExternalSemaphore_t extSem;
393
+ } cudaDestroyExternalSemaphore_v10000_params;
394
+
395
+ typedef struct cudaLaunchKernel_ptsz_v7000_params_st {
396
+ const void *func;
397
+ dim3 gridDim;
398
+ dim3 blockDim;
399
+ void **args;
400
+ size_t sharedMem;
401
+ cudaStream_t stream;
402
+ } cudaLaunchKernel_ptsz_v7000_params;
403
+
404
+ typedef struct cudaLaunchKernelExC_ptsz_v11060_params_st {
405
+ const cudaLaunchConfig_t *config;
406
+ const void *func;
407
+ void **args;
408
+ } cudaLaunchKernelExC_ptsz_v11060_params;
409
+
410
+ typedef struct cudaLaunchCooperativeKernel_ptsz_v9000_params_st {
411
+ const void *func;
412
+ dim3 gridDim;
413
+ dim3 blockDim;
414
+ void **args;
415
+ size_t sharedMem;
416
+ cudaStream_t stream;
417
+ } cudaLaunchCooperativeKernel_ptsz_v9000_params;
418
+
419
+ typedef struct cudaLaunchCooperativeKernelMultiDevice_v9000_params_st {
420
+ struct cudaLaunchParams *launchParamsList;
421
+ unsigned int numDevices;
422
+ unsigned int flags;
423
+ } cudaLaunchCooperativeKernelMultiDevice_v9000_params;
424
+
425
+ typedef struct cudaFuncSetCacheConfig_v3020_params_st {
426
+ const void *func;
427
+ enum cudaFuncCache cacheConfig;
428
+ } cudaFuncSetCacheConfig_v3020_params;
429
+
430
+ typedef struct cudaFuncGetAttributes_v3020_params_st {
431
+ struct cudaFuncAttributes *attr;
432
+ const void *func;
433
+ } cudaFuncGetAttributes_v3020_params;
434
+
435
+ typedef struct cudaFuncSetAttribute_v9000_params_st {
436
+ const void *func;
437
+ enum cudaFuncAttribute attr;
438
+ int value;
439
+ } cudaFuncSetAttribute_v9000_params;
440
+
441
+ typedef struct cudaFuncGetName_v12030_params_st {
442
+ const char **name;
443
+ const void *func;
444
+ } cudaFuncGetName_v12030_params;
445
+
446
+ typedef struct cudaFuncGetParamInfo_v12040_params_st {
447
+ const void *func;
448
+ size_t paramIndex;
449
+ size_t *paramOffset;
450
+ size_t *paramSize;
451
+ } cudaFuncGetParamInfo_v12040_params;
452
+
453
+ typedef struct cudaLaunchHostFunc_ptsz_v10000_params_st {
454
+ cudaStream_t stream;
455
+ cudaHostFn_t fn;
456
+ void *userData;
457
+ } cudaLaunchHostFunc_ptsz_v10000_params;
458
+
459
+ typedef struct cudaFuncSetSharedMemConfig_v4020_params_st {
460
+ const void *func;
461
+ enum cudaSharedMemConfig config;
462
+ } cudaFuncSetSharedMemConfig_v4020_params;
463
+
464
+ typedef struct cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050_params_st {
465
+ int *numBlocks;
466
+ const void *func;
467
+ int blockSize;
468
+ size_t dynamicSMemSize;
469
+ } cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050_params;
470
+
471
+ typedef struct cudaOccupancyAvailableDynamicSMemPerBlock_v10200_params_st {
472
+ size_t *dynamicSmemSize;
473
+ const void *func;
474
+ int numBlocks;
475
+ int blockSize;
476
+ } cudaOccupancyAvailableDynamicSMemPerBlock_v10200_params;
477
+
478
+ typedef struct cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000_params_st {
479
+ int *numBlocks;
480
+ const void *func;
481
+ int blockSize;
482
+ size_t dynamicSMemSize;
483
+ unsigned int flags;
484
+ } cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000_params;
485
+
486
+ typedef struct cudaOccupancyMaxPotentialClusterSize_v11070_params_st {
487
+ int *clusterSize;
488
+ const void *func;
489
+ const cudaLaunchConfig_t *launchConfig;
490
+ } cudaOccupancyMaxPotentialClusterSize_v11070_params;
491
+
492
+ typedef struct cudaOccupancyMaxActiveClusters_v11070_params_st {
493
+ int *numClusters;
494
+ const void *func;
495
+ const cudaLaunchConfig_t *launchConfig;
496
+ } cudaOccupancyMaxActiveClusters_v11070_params;
497
+
498
+ typedef struct cudaMallocManaged_v6000_params_st {
499
+ void **devPtr;
500
+ size_t size;
501
+ unsigned int flags;
502
+ } cudaMallocManaged_v6000_params;
503
+
504
+ typedef struct cudaMalloc_v3020_params_st {
505
+ void **devPtr;
506
+ size_t size;
507
+ } cudaMalloc_v3020_params;
508
+
509
+ typedef struct cudaMallocHost_v3020_params_st {
510
+ void **ptr;
511
+ size_t size;
512
+ } cudaMallocHost_v3020_params;
513
+
514
+ typedef struct cudaMallocPitch_v3020_params_st {
515
+ void **devPtr;
516
+ size_t *pitch;
517
+ size_t width;
518
+ size_t height;
519
+ } cudaMallocPitch_v3020_params;
520
+
521
+ typedef struct cudaMallocArray_v3020_params_st {
522
+ cudaArray_t *array;
523
+ const struct cudaChannelFormatDesc *desc;
524
+ size_t width;
525
+ size_t height;
526
+ unsigned int flags;
527
+ } cudaMallocArray_v3020_params;
528
+
529
+ typedef struct cudaFree_v3020_params_st {
530
+ void *devPtr;
531
+ } cudaFree_v3020_params;
532
+
533
+ typedef struct cudaFreeHost_v3020_params_st {
534
+ void *ptr;
535
+ } cudaFreeHost_v3020_params;
536
+
537
+ typedef struct cudaFreeArray_v3020_params_st {
538
+ cudaArray_t array;
539
+ } cudaFreeArray_v3020_params;
540
+
541
+ typedef struct cudaFreeMipmappedArray_v5000_params_st {
542
+ cudaMipmappedArray_t mipmappedArray;
543
+ } cudaFreeMipmappedArray_v5000_params;
544
+
545
+ typedef struct cudaHostAlloc_v3020_params_st {
546
+ void **pHost;
547
+ size_t size;
548
+ unsigned int flags;
549
+ } cudaHostAlloc_v3020_params;
550
+
551
+ typedef struct cudaHostRegister_v4000_params_st {
552
+ void *ptr;
553
+ size_t size;
554
+ unsigned int flags;
555
+ } cudaHostRegister_v4000_params;
556
+
557
+ typedef struct cudaHostUnregister_v4000_params_st {
558
+ void *ptr;
559
+ } cudaHostUnregister_v4000_params;
560
+
561
+ typedef struct cudaHostGetDevicePointer_v3020_params_st {
562
+ void **pDevice;
563
+ void *pHost;
564
+ unsigned int flags;
565
+ } cudaHostGetDevicePointer_v3020_params;
566
+
567
+ typedef struct cudaHostGetFlags_v3020_params_st {
568
+ unsigned int *pFlags;
569
+ void *pHost;
570
+ } cudaHostGetFlags_v3020_params;
571
+
572
+ typedef struct cudaMalloc3D_v3020_params_st {
573
+ struct cudaPitchedPtr *pitchedDevPtr;
574
+ struct cudaExtent extent;
575
+ } cudaMalloc3D_v3020_params;
576
+
577
+ typedef struct cudaMalloc3DArray_v3020_params_st {
578
+ cudaArray_t *array;
579
+ const struct cudaChannelFormatDesc *desc;
580
+ struct cudaExtent extent;
581
+ unsigned int flags;
582
+ } cudaMalloc3DArray_v3020_params;
583
+
584
+ typedef struct cudaMallocMipmappedArray_v5000_params_st {
585
+ cudaMipmappedArray_t *mipmappedArray;
586
+ const struct cudaChannelFormatDesc *desc;
587
+ struct cudaExtent extent;
588
+ unsigned int numLevels;
589
+ unsigned int flags;
590
+ } cudaMallocMipmappedArray_v5000_params;
591
+
592
+ typedef struct cudaGetMipmappedArrayLevel_v5000_params_st {
593
+ cudaArray_t *levelArray;
594
+ cudaMipmappedArray_const_t mipmappedArray;
595
+ unsigned int level;
596
+ } cudaGetMipmappedArrayLevel_v5000_params;
597
+
598
+ typedef struct cudaMemcpy3D_ptds_v7000_params_st {
599
+ const struct cudaMemcpy3DParms *p;
600
+ } cudaMemcpy3D_ptds_v7000_params;
601
+
602
+ typedef struct cudaMemcpy3DPeer_ptds_v7000_params_st {
603
+ const struct cudaMemcpy3DPeerParms *p;
604
+ } cudaMemcpy3DPeer_ptds_v7000_params;
605
+
606
+ typedef struct cudaMemcpy3DAsync_ptsz_v7000_params_st {
607
+ const struct cudaMemcpy3DParms *p;
608
+ cudaStream_t stream;
609
+ } cudaMemcpy3DAsync_ptsz_v7000_params;
610
+
611
+ typedef struct cudaMemcpy3DPeerAsync_ptsz_v7000_params_st {
612
+ const struct cudaMemcpy3DPeerParms *p;
613
+ cudaStream_t stream;
614
+ } cudaMemcpy3DPeerAsync_ptsz_v7000_params;
615
+
616
+ typedef struct cudaMemGetInfo_v3020_params_st {
617
+ size_t *free;
618
+ size_t *total;
619
+ } cudaMemGetInfo_v3020_params;
620
+
621
+ typedef struct cudaArrayGetInfo_v4010_params_st {
622
+ struct cudaChannelFormatDesc *desc;
623
+ struct cudaExtent *extent;
624
+ unsigned int *flags;
625
+ cudaArray_t array;
626
+ } cudaArrayGetInfo_v4010_params;
627
+
628
+ typedef struct cudaArrayGetPlane_v11020_params_st {
629
+ cudaArray_t *pPlaneArray;
630
+ cudaArray_t hArray;
631
+ unsigned int planeIdx;
632
+ } cudaArrayGetPlane_v11020_params;
633
+
634
+ typedef struct cudaArrayGetMemoryRequirements_v11060_params_st {
635
+ struct cudaArrayMemoryRequirements *memoryRequirements;
636
+ cudaArray_t array;
637
+ int device;
638
+ } cudaArrayGetMemoryRequirements_v11060_params;
639
+
640
+ typedef struct cudaMipmappedArrayGetMemoryRequirements_v11060_params_st {
641
+ struct cudaArrayMemoryRequirements *memoryRequirements;
642
+ cudaMipmappedArray_t mipmap;
643
+ int device;
644
+ } cudaMipmappedArrayGetMemoryRequirements_v11060_params;
645
+
646
+ typedef struct cudaArrayGetSparseProperties_v11010_params_st {
647
+ struct cudaArraySparseProperties *sparseProperties;
648
+ cudaArray_t array;
649
+ } cudaArrayGetSparseProperties_v11010_params;
650
+
651
+ typedef struct cudaMipmappedArrayGetSparseProperties_v11010_params_st {
652
+ struct cudaArraySparseProperties *sparseProperties;
653
+ cudaMipmappedArray_t mipmap;
654
+ } cudaMipmappedArrayGetSparseProperties_v11010_params;
655
+
656
+ typedef struct cudaMemcpy_ptds_v7000_params_st {
657
+ void *dst;
658
+ const void *src;
659
+ size_t count;
660
+ enum cudaMemcpyKind kind;
661
+ } cudaMemcpy_ptds_v7000_params;
662
+
663
+ typedef struct cudaMemcpyPeer_v4000_params_st {
664
+ void *dst;
665
+ int dstDevice;
666
+ const void *src;
667
+ int srcDevice;
668
+ size_t count;
669
+ } cudaMemcpyPeer_v4000_params;
670
+
671
+ typedef struct cudaMemcpy2D_ptds_v7000_params_st {
672
+ void *dst;
673
+ size_t dpitch;
674
+ const void *src;
675
+ size_t spitch;
676
+ size_t width;
677
+ size_t height;
678
+ enum cudaMemcpyKind kind;
679
+ } cudaMemcpy2D_ptds_v7000_params;
680
+
681
+ typedef struct cudaMemcpy2DToArray_ptds_v7000_params_st {
682
+ cudaArray_t dst;
683
+ size_t wOffset;
684
+ size_t hOffset;
685
+ const void *src;
686
+ size_t spitch;
687
+ size_t width;
688
+ size_t height;
689
+ enum cudaMemcpyKind kind;
690
+ } cudaMemcpy2DToArray_ptds_v7000_params;
691
+
692
+ typedef struct cudaMemcpy2DFromArray_ptds_v7000_params_st {
693
+ void *dst;
694
+ size_t dpitch;
695
+ cudaArray_const_t src;
696
+ size_t wOffset;
697
+ size_t hOffset;
698
+ size_t width;
699
+ size_t height;
700
+ enum cudaMemcpyKind kind;
701
+ } cudaMemcpy2DFromArray_ptds_v7000_params;
702
+
703
+ typedef struct cudaMemcpy2DArrayToArray_ptds_v7000_params_st {
704
+ cudaArray_t dst;
705
+ size_t wOffsetDst;
706
+ size_t hOffsetDst;
707
+ cudaArray_const_t src;
708
+ size_t wOffsetSrc;
709
+ size_t hOffsetSrc;
710
+ size_t width;
711
+ size_t height;
712
+ enum cudaMemcpyKind kind;
713
+ } cudaMemcpy2DArrayToArray_ptds_v7000_params;
714
+
715
+ typedef struct cudaMemcpyToSymbol_ptds_v7000_params_st {
716
+ const void *symbol;
717
+ const void *src;
718
+ size_t count;
719
+ size_t offset;
720
+ enum cudaMemcpyKind kind;
721
+ } cudaMemcpyToSymbol_ptds_v7000_params;
722
+
723
+ typedef struct cudaMemcpyFromSymbol_ptds_v7000_params_st {
724
+ void *dst;
725
+ const void *symbol;
726
+ size_t count;
727
+ size_t offset;
728
+ enum cudaMemcpyKind kind;
729
+ } cudaMemcpyFromSymbol_ptds_v7000_params;
730
+
731
+ typedef struct cudaMemcpyAsync_ptsz_v7000_params_st {
732
+ void *dst;
733
+ const void *src;
734
+ size_t count;
735
+ enum cudaMemcpyKind kind;
736
+ cudaStream_t stream;
737
+ } cudaMemcpyAsync_ptsz_v7000_params;
738
+
739
+ typedef struct cudaMemcpyPeerAsync_v4000_params_st {
740
+ void *dst;
741
+ int dstDevice;
742
+ const void *src;
743
+ int srcDevice;
744
+ size_t count;
745
+ cudaStream_t stream;
746
+ } cudaMemcpyPeerAsync_v4000_params;
747
+
748
+ typedef struct cudaMemcpy2DAsync_ptsz_v7000_params_st {
749
+ void *dst;
750
+ size_t dpitch;
751
+ const void *src;
752
+ size_t spitch;
753
+ size_t width;
754
+ size_t height;
755
+ enum cudaMemcpyKind kind;
756
+ cudaStream_t stream;
757
+ } cudaMemcpy2DAsync_ptsz_v7000_params;
758
+
759
+ typedef struct cudaMemcpy2DToArrayAsync_ptsz_v7000_params_st {
760
+ cudaArray_t dst;
761
+ size_t wOffset;
762
+ size_t hOffset;
763
+ const void *src;
764
+ size_t spitch;
765
+ size_t width;
766
+ size_t height;
767
+ enum cudaMemcpyKind kind;
768
+ cudaStream_t stream;
769
+ } cudaMemcpy2DToArrayAsync_ptsz_v7000_params;
770
+
771
+ typedef struct cudaMemcpy2DFromArrayAsync_ptsz_v7000_params_st {
772
+ void *dst;
773
+ size_t dpitch;
774
+ cudaArray_const_t src;
775
+ size_t wOffset;
776
+ size_t hOffset;
777
+ size_t width;
778
+ size_t height;
779
+ enum cudaMemcpyKind kind;
780
+ cudaStream_t stream;
781
+ } cudaMemcpy2DFromArrayAsync_ptsz_v7000_params;
782
+
783
+ typedef struct cudaMemcpyToSymbolAsync_ptsz_v7000_params_st {
784
+ const void *symbol;
785
+ const void *src;
786
+ size_t count;
787
+ size_t offset;
788
+ enum cudaMemcpyKind kind;
789
+ cudaStream_t stream;
790
+ } cudaMemcpyToSymbolAsync_ptsz_v7000_params;
791
+
792
+ typedef struct cudaMemcpyFromSymbolAsync_ptsz_v7000_params_st {
793
+ void *dst;
794
+ const void *symbol;
795
+ size_t count;
796
+ size_t offset;
797
+ enum cudaMemcpyKind kind;
798
+ cudaStream_t stream;
799
+ } cudaMemcpyFromSymbolAsync_ptsz_v7000_params;
800
+
801
+ typedef struct cudaMemset_ptds_v7000_params_st {
802
+ void *devPtr;
803
+ int value;
804
+ size_t count;
805
+ } cudaMemset_ptds_v7000_params;
806
+
807
+ typedef struct cudaMemset2D_ptds_v7000_params_st {
808
+ void *devPtr;
809
+ size_t pitch;
810
+ int value;
811
+ size_t width;
812
+ size_t height;
813
+ } cudaMemset2D_ptds_v7000_params;
814
+
815
+ typedef struct cudaMemset3D_ptds_v7000_params_st {
816
+ struct cudaPitchedPtr pitchedDevPtr;
817
+ int value;
818
+ struct cudaExtent extent;
819
+ } cudaMemset3D_ptds_v7000_params;
820
+
821
+ typedef struct cudaMemsetAsync_ptsz_v7000_params_st {
822
+ void *devPtr;
823
+ int value;
824
+ size_t count;
825
+ cudaStream_t stream;
826
+ } cudaMemsetAsync_ptsz_v7000_params;
827
+
828
+ typedef struct cudaMemset2DAsync_ptsz_v7000_params_st {
829
+ void *devPtr;
830
+ size_t pitch;
831
+ int value;
832
+ size_t width;
833
+ size_t height;
834
+ cudaStream_t stream;
835
+ } cudaMemset2DAsync_ptsz_v7000_params;
836
+
837
+ typedef struct cudaMemset3DAsync_ptsz_v7000_params_st {
838
+ struct cudaPitchedPtr pitchedDevPtr;
839
+ int value;
840
+ struct cudaExtent extent;
841
+ cudaStream_t stream;
842
+ } cudaMemset3DAsync_ptsz_v7000_params;
843
+
844
+ typedef struct cudaGetSymbolAddress_v3020_params_st {
845
+ void **devPtr;
846
+ const void *symbol;
847
+ } cudaGetSymbolAddress_v3020_params;
848
+
849
+ typedef struct cudaGetSymbolSize_v3020_params_st {
850
+ size_t *size;
851
+ const void *symbol;
852
+ } cudaGetSymbolSize_v3020_params;
853
+
854
+ typedef struct cudaMemPrefetchAsync_ptsz_v8000_params_st {
855
+ const void *devPtr;
856
+ size_t count;
857
+ int dstDevice;
858
+ cudaStream_t stream;
859
+ } cudaMemPrefetchAsync_ptsz_v8000_params;
860
+
861
+ typedef struct cudaMemPrefetchAsync_v2_ptsz_v12020_params_st {
862
+ const void *devPtr;
863
+ size_t count;
864
+ struct cudaMemLocation location;
865
+ unsigned int flags;
866
+ cudaStream_t stream;
867
+ } cudaMemPrefetchAsync_v2_ptsz_v12020_params;
868
+
869
+ typedef struct cudaMemAdvise_v8000_params_st {
870
+ const void *devPtr;
871
+ size_t count;
872
+ enum cudaMemoryAdvise advice;
873
+ int device;
874
+ } cudaMemAdvise_v8000_params;
875
+
876
+ typedef struct cudaMemAdvise_v2_v12020_params_st {
877
+ const void *devPtr;
878
+ size_t count;
879
+ enum cudaMemoryAdvise advice;
880
+ struct cudaMemLocation location;
881
+ } cudaMemAdvise_v2_v12020_params;
882
+
883
+ typedef struct cudaMemRangeGetAttribute_v8000_params_st {
884
+ void *data;
885
+ size_t dataSize;
886
+ enum cudaMemRangeAttribute attribute;
887
+ const void *devPtr;
888
+ size_t count;
889
+ } cudaMemRangeGetAttribute_v8000_params;
890
+
891
+ typedef struct cudaMemRangeGetAttributes_v8000_params_st {
892
+ void **data;
893
+ size_t *dataSizes;
894
+ enum cudaMemRangeAttribute *attributes;
895
+ size_t numAttributes;
896
+ const void *devPtr;
897
+ size_t count;
898
+ } cudaMemRangeGetAttributes_v8000_params;
899
+
900
+ typedef struct cudaMemcpyToArray_ptds_v7000_params_st {
901
+ cudaArray_t dst;
902
+ size_t wOffset;
903
+ size_t hOffset;
904
+ const void *src;
905
+ size_t count;
906
+ enum cudaMemcpyKind kind;
907
+ } cudaMemcpyToArray_ptds_v7000_params;
908
+
909
+ typedef struct cudaMemcpyFromArray_ptds_v7000_params_st {
910
+ void *dst;
911
+ cudaArray_const_t src;
912
+ size_t wOffset;
913
+ size_t hOffset;
914
+ size_t count;
915
+ enum cudaMemcpyKind kind;
916
+ } cudaMemcpyFromArray_ptds_v7000_params;
917
+
918
+ typedef struct cudaMemcpyArrayToArray_ptds_v7000_params_st {
919
+ cudaArray_t dst;
920
+ size_t wOffsetDst;
921
+ size_t hOffsetDst;
922
+ cudaArray_const_t src;
923
+ size_t wOffsetSrc;
924
+ size_t hOffsetSrc;
925
+ size_t count;
926
+ enum cudaMemcpyKind kind;
927
+ } cudaMemcpyArrayToArray_ptds_v7000_params;
928
+
929
+ typedef struct cudaMemcpyToArrayAsync_ptsz_v7000_params_st {
930
+ cudaArray_t dst;
931
+ size_t wOffset;
932
+ size_t hOffset;
933
+ const void *src;
934
+ size_t count;
935
+ enum cudaMemcpyKind kind;
936
+ cudaStream_t stream;
937
+ } cudaMemcpyToArrayAsync_ptsz_v7000_params;
938
+
939
+ typedef struct cudaMemcpyFromArrayAsync_ptsz_v7000_params_st {
940
+ void *dst;
941
+ cudaArray_const_t src;
942
+ size_t wOffset;
943
+ size_t hOffset;
944
+ size_t count;
945
+ enum cudaMemcpyKind kind;
946
+ cudaStream_t stream;
947
+ } cudaMemcpyFromArrayAsync_ptsz_v7000_params;
948
+
949
+ typedef struct cudaMallocAsync_ptsz_v11020_params_st {
950
+ void **devPtr;
951
+ size_t size;
952
+ cudaStream_t hStream;
953
+ } cudaMallocAsync_ptsz_v11020_params;
954
+
955
+ typedef struct cudaFreeAsync_ptsz_v11020_params_st {
956
+ void *devPtr;
957
+ cudaStream_t hStream;
958
+ } cudaFreeAsync_ptsz_v11020_params;
959
+
960
+ typedef struct cudaMemPoolTrimTo_v11020_params_st {
961
+ cudaMemPool_t memPool;
962
+ size_t minBytesToKeep;
963
+ } cudaMemPoolTrimTo_v11020_params;
964
+
965
+ typedef struct cudaMemPoolSetAttribute_v11020_params_st {
966
+ cudaMemPool_t memPool;
967
+ enum cudaMemPoolAttr attr;
968
+ void *value;
969
+ } cudaMemPoolSetAttribute_v11020_params;
970
+
971
+ typedef struct cudaMemPoolGetAttribute_v11020_params_st {
972
+ cudaMemPool_t memPool;
973
+ enum cudaMemPoolAttr attr;
974
+ void *value;
975
+ } cudaMemPoolGetAttribute_v11020_params;
976
+
977
+ typedef struct cudaMemPoolSetAccess_v11020_params_st {
978
+ cudaMemPool_t memPool;
979
+ const struct cudaMemAccessDesc *descList;
980
+ size_t count;
981
+ } cudaMemPoolSetAccess_v11020_params;
982
+
983
+ typedef struct cudaMemPoolGetAccess_v11020_params_st {
984
+ enum cudaMemAccessFlags *flags;
985
+ cudaMemPool_t memPool;
986
+ struct cudaMemLocation *location;
987
+ } cudaMemPoolGetAccess_v11020_params;
988
+
989
+ typedef struct cudaMemPoolCreate_v11020_params_st {
990
+ cudaMemPool_t *memPool;
991
+ const struct cudaMemPoolProps *poolProps;
992
+ } cudaMemPoolCreate_v11020_params;
993
+
994
+ typedef struct cudaMemPoolDestroy_v11020_params_st {
995
+ cudaMemPool_t memPool;
996
+ } cudaMemPoolDestroy_v11020_params;
997
+
998
+ typedef struct cudaMallocFromPoolAsync_ptsz_v11020_params_st {
999
+ void **ptr;
1000
+ size_t size;
1001
+ cudaMemPool_t memPool;
1002
+ cudaStream_t stream;
1003
+ } cudaMallocFromPoolAsync_ptsz_v11020_params;
1004
+
1005
+ typedef struct cudaMemPoolExportToShareableHandle_v11020_params_st {
1006
+ void *shareableHandle;
1007
+ cudaMemPool_t memPool;
1008
+ enum cudaMemAllocationHandleType handleType;
1009
+ unsigned int flags;
1010
+ } cudaMemPoolExportToShareableHandle_v11020_params;
1011
+
1012
+ typedef struct cudaMemPoolImportFromShareableHandle_v11020_params_st {
1013
+ cudaMemPool_t *memPool;
1014
+ void *shareableHandle;
1015
+ enum cudaMemAllocationHandleType handleType;
1016
+ unsigned int flags;
1017
+ } cudaMemPoolImportFromShareableHandle_v11020_params;
1018
+
1019
+ typedef struct cudaMemPoolExportPointer_v11020_params_st {
1020
+ struct cudaMemPoolPtrExportData *exportData;
1021
+ void *ptr;
1022
+ } cudaMemPoolExportPointer_v11020_params;
1023
+
1024
+ typedef struct cudaMemPoolImportPointer_v11020_params_st {
1025
+ void **ptr;
1026
+ cudaMemPool_t memPool;
1027
+ struct cudaMemPoolPtrExportData *exportData;
1028
+ } cudaMemPoolImportPointer_v11020_params;
1029
+
1030
+ typedef struct cudaPointerGetAttributes_v4000_params_st {
1031
+ struct cudaPointerAttributes *attributes;
1032
+ const void *ptr;
1033
+ } cudaPointerGetAttributes_v4000_params;
1034
+
1035
+ typedef struct cudaDeviceCanAccessPeer_v4000_params_st {
1036
+ int *canAccessPeer;
1037
+ int device;
1038
+ int peerDevice;
1039
+ } cudaDeviceCanAccessPeer_v4000_params;
1040
+
1041
+ typedef struct cudaDeviceEnablePeerAccess_v4000_params_st {
1042
+ int peerDevice;
1043
+ unsigned int flags;
1044
+ } cudaDeviceEnablePeerAccess_v4000_params;
1045
+
1046
+ typedef struct cudaDeviceDisablePeerAccess_v4000_params_st {
1047
+ int peerDevice;
1048
+ } cudaDeviceDisablePeerAccess_v4000_params;
1049
+
1050
+ typedef struct cudaGraphicsUnregisterResource_v3020_params_st {
1051
+ cudaGraphicsResource_t resource;
1052
+ } cudaGraphicsUnregisterResource_v3020_params;
1053
+
1054
+ typedef struct cudaGraphicsResourceSetMapFlags_v3020_params_st {
1055
+ cudaGraphicsResource_t resource;
1056
+ unsigned int flags;
1057
+ } cudaGraphicsResourceSetMapFlags_v3020_params;
1058
+
1059
+ typedef struct cudaGraphicsMapResources_v3020_params_st {
1060
+ int count;
1061
+ cudaGraphicsResource_t *resources;
1062
+ cudaStream_t stream;
1063
+ } cudaGraphicsMapResources_v3020_params;
1064
+
1065
+ typedef struct cudaGraphicsUnmapResources_v3020_params_st {
1066
+ int count;
1067
+ cudaGraphicsResource_t *resources;
1068
+ cudaStream_t stream;
1069
+ } cudaGraphicsUnmapResources_v3020_params;
1070
+
1071
+ typedef struct cudaGraphicsResourceGetMappedPointer_v3020_params_st {
1072
+ void **devPtr;
1073
+ size_t *size;
1074
+ cudaGraphicsResource_t resource;
1075
+ } cudaGraphicsResourceGetMappedPointer_v3020_params;
1076
+
1077
+ typedef struct cudaGraphicsSubResourceGetMappedArray_v3020_params_st {
1078
+ cudaArray_t *array;
1079
+ cudaGraphicsResource_t resource;
1080
+ unsigned int arrayIndex;
1081
+ unsigned int mipLevel;
1082
+ } cudaGraphicsSubResourceGetMappedArray_v3020_params;
1083
+
1084
+ typedef struct cudaGraphicsResourceGetMappedMipmappedArray_v5000_params_st {
1085
+ cudaMipmappedArray_t *mipmappedArray;
1086
+ cudaGraphicsResource_t resource;
1087
+ } cudaGraphicsResourceGetMappedMipmappedArray_v5000_params;
1088
+
1089
+ typedef struct cudaGetChannelDesc_v3020_params_st {
1090
+ struct cudaChannelFormatDesc *desc;
1091
+ cudaArray_const_t array;
1092
+ } cudaGetChannelDesc_v3020_params;
1093
+
1094
+ typedef struct cudaCreateChannelDesc_v3020_params_st {
1095
+ int x;
1096
+ int y;
1097
+ int z;
1098
+ int w;
1099
+ enum cudaChannelFormatKind f;
1100
+ } cudaCreateChannelDesc_v3020_params;
1101
+
1102
+ typedef struct cudaCreateTextureObject_v5000_params_st {
1103
+ cudaTextureObject_t *pTexObject;
1104
+ const struct cudaResourceDesc *pResDesc;
1105
+ const struct cudaTextureDesc *pTexDesc;
1106
+ const struct cudaResourceViewDesc *pResViewDesc;
1107
+ } cudaCreateTextureObject_v5000_params;
1108
+
1109
+ typedef struct cudaDestroyTextureObject_v5000_params_st {
1110
+ cudaTextureObject_t texObject;
1111
+ } cudaDestroyTextureObject_v5000_params;
1112
+
1113
+ typedef struct cudaGetTextureObjectResourceDesc_v5000_params_st {
1114
+ struct cudaResourceDesc *pResDesc;
1115
+ cudaTextureObject_t texObject;
1116
+ } cudaGetTextureObjectResourceDesc_v5000_params;
1117
+
1118
+ typedef struct cudaGetTextureObjectTextureDesc_v5000_params_st {
1119
+ struct cudaTextureDesc *pTexDesc;
1120
+ cudaTextureObject_t texObject;
1121
+ } cudaGetTextureObjectTextureDesc_v5000_params;
1122
+
1123
+ typedef struct cudaGetTextureObjectResourceViewDesc_v5000_params_st {
1124
+ struct cudaResourceViewDesc *pResViewDesc;
1125
+ cudaTextureObject_t texObject;
1126
+ } cudaGetTextureObjectResourceViewDesc_v5000_params;
1127
+
1128
+ typedef struct cudaCreateSurfaceObject_v5000_params_st {
1129
+ cudaSurfaceObject_t *pSurfObject;
1130
+ const struct cudaResourceDesc *pResDesc;
1131
+ } cudaCreateSurfaceObject_v5000_params;
1132
+
1133
+ typedef struct cudaDestroySurfaceObject_v5000_params_st {
1134
+ cudaSurfaceObject_t surfObject;
1135
+ } cudaDestroySurfaceObject_v5000_params;
1136
+
1137
+ typedef struct cudaGetSurfaceObjectResourceDesc_v5000_params_st {
1138
+ struct cudaResourceDesc *pResDesc;
1139
+ cudaSurfaceObject_t surfObject;
1140
+ } cudaGetSurfaceObjectResourceDesc_v5000_params;
1141
+
1142
+ typedef struct cudaDriverGetVersion_v3020_params_st {
1143
+ int *driverVersion;
1144
+ } cudaDriverGetVersion_v3020_params;
1145
+
1146
+ typedef struct cudaRuntimeGetVersion_v3020_params_st {
1147
+ int *runtimeVersion;
1148
+ } cudaRuntimeGetVersion_v3020_params;
1149
+
1150
+ typedef struct cudaGraphCreate_v10000_params_st {
1151
+ cudaGraph_t *pGraph;
1152
+ unsigned int flags;
1153
+ } cudaGraphCreate_v10000_params;
1154
+
1155
+ typedef struct cudaGraphAddKernelNode_v10000_params_st {
1156
+ cudaGraphNode_t *pGraphNode;
1157
+ cudaGraph_t graph;
1158
+ const cudaGraphNode_t *pDependencies;
1159
+ size_t numDependencies;
1160
+ const struct cudaKernelNodeParams *pNodeParams;
1161
+ } cudaGraphAddKernelNode_v10000_params;
1162
+
1163
+ typedef struct cudaGraphKernelNodeGetParams_v10000_params_st {
1164
+ cudaGraphNode_t node;
1165
+ struct cudaKernelNodeParams *pNodeParams;
1166
+ } cudaGraphKernelNodeGetParams_v10000_params;
1167
+
1168
+ typedef struct cudaGraphKernelNodeSetParams_v10000_params_st {
1169
+ cudaGraphNode_t node;
1170
+ const struct cudaKernelNodeParams *pNodeParams;
1171
+ } cudaGraphKernelNodeSetParams_v10000_params;
1172
+
1173
+ typedef struct cudaGraphKernelNodeCopyAttributes_v11000_params_st {
1174
+ cudaGraphNode_t hSrc;
1175
+ cudaGraphNode_t hDst;
1176
+ } cudaGraphKernelNodeCopyAttributes_v11000_params;
1177
+
1178
+ typedef struct cudaGraphKernelNodeGetAttribute_v11000_params_st {
1179
+ cudaGraphNode_t hNode;
1180
+ cudaKernelNodeAttrID attr;
1181
+ cudaKernelNodeAttrValue *value_out;
1182
+ } cudaGraphKernelNodeGetAttribute_v11000_params;
1183
+
1184
+ typedef struct cudaGraphKernelNodeSetAttribute_v11000_params_st {
1185
+ cudaGraphNode_t hNode;
1186
+ cudaKernelNodeAttrID attr;
1187
+ const cudaKernelNodeAttrValue *value;
1188
+ } cudaGraphKernelNodeSetAttribute_v11000_params;
1189
+
1190
+ typedef struct cudaGraphAddMemcpyNode_v10000_params_st {
1191
+ cudaGraphNode_t *pGraphNode;
1192
+ cudaGraph_t graph;
1193
+ const cudaGraphNode_t *pDependencies;
1194
+ size_t numDependencies;
1195
+ const struct cudaMemcpy3DParms *pCopyParams;
1196
+ } cudaGraphAddMemcpyNode_v10000_params;
1197
+
1198
+ typedef struct cudaGraphAddMemcpyNodeToSymbol_v11010_params_st {
1199
+ cudaGraphNode_t *pGraphNode;
1200
+ cudaGraph_t graph;
1201
+ const cudaGraphNode_t *pDependencies;
1202
+ size_t numDependencies;
1203
+ const void *symbol;
1204
+ const void *src;
1205
+ size_t count;
1206
+ size_t offset;
1207
+ enum cudaMemcpyKind kind;
1208
+ } cudaGraphAddMemcpyNodeToSymbol_v11010_params;
1209
+
1210
+ typedef struct cudaGraphAddMemcpyNodeFromSymbol_v11010_params_st {
1211
+ cudaGraphNode_t *pGraphNode;
1212
+ cudaGraph_t graph;
1213
+ const cudaGraphNode_t *pDependencies;
1214
+ size_t numDependencies;
1215
+ void *dst;
1216
+ const void *symbol;
1217
+ size_t count;
1218
+ size_t offset;
1219
+ enum cudaMemcpyKind kind;
1220
+ } cudaGraphAddMemcpyNodeFromSymbol_v11010_params;
1221
+
1222
+ typedef struct cudaGraphAddMemcpyNode1D_v11010_params_st {
1223
+ cudaGraphNode_t *pGraphNode;
1224
+ cudaGraph_t graph;
1225
+ const cudaGraphNode_t *pDependencies;
1226
+ size_t numDependencies;
1227
+ void *dst;
1228
+ const void *src;
1229
+ size_t count;
1230
+ enum cudaMemcpyKind kind;
1231
+ } cudaGraphAddMemcpyNode1D_v11010_params;
1232
+
1233
+ typedef struct cudaGraphMemcpyNodeGetParams_v10000_params_st {
1234
+ cudaGraphNode_t node;
1235
+ struct cudaMemcpy3DParms *pNodeParams;
1236
+ } cudaGraphMemcpyNodeGetParams_v10000_params;
1237
+
1238
+ typedef struct cudaGraphMemcpyNodeSetParams_v10000_params_st {
1239
+ cudaGraphNode_t node;
1240
+ const struct cudaMemcpy3DParms *pNodeParams;
1241
+ } cudaGraphMemcpyNodeSetParams_v10000_params;
1242
+
1243
+ typedef struct cudaGraphMemcpyNodeSetParamsToSymbol_v11010_params_st {
1244
+ cudaGraphNode_t node;
1245
+ const void *symbol;
1246
+ const void *src;
1247
+ size_t count;
1248
+ size_t offset;
1249
+ enum cudaMemcpyKind kind;
1250
+ } cudaGraphMemcpyNodeSetParamsToSymbol_v11010_params;
1251
+
1252
+ typedef struct cudaGraphMemcpyNodeSetParamsFromSymbol_v11010_params_st {
1253
+ cudaGraphNode_t node;
1254
+ void *dst;
1255
+ const void *symbol;
1256
+ size_t count;
1257
+ size_t offset;
1258
+ enum cudaMemcpyKind kind;
1259
+ } cudaGraphMemcpyNodeSetParamsFromSymbol_v11010_params;
1260
+
1261
+ typedef struct cudaGraphMemcpyNodeSetParams1D_v11010_params_st {
1262
+ cudaGraphNode_t node;
1263
+ void *dst;
1264
+ const void *src;
1265
+ size_t count;
1266
+ enum cudaMemcpyKind kind;
1267
+ } cudaGraphMemcpyNodeSetParams1D_v11010_params;
1268
+
1269
+ typedef struct cudaGraphAddMemsetNode_v10000_params_st {
1270
+ cudaGraphNode_t *pGraphNode;
1271
+ cudaGraph_t graph;
1272
+ const cudaGraphNode_t *pDependencies;
1273
+ size_t numDependencies;
1274
+ const struct cudaMemsetParams *pMemsetParams;
1275
+ } cudaGraphAddMemsetNode_v10000_params;
1276
+
1277
+ typedef struct cudaGraphMemsetNodeGetParams_v10000_params_st {
1278
+ cudaGraphNode_t node;
1279
+ struct cudaMemsetParams *pNodeParams;
1280
+ } cudaGraphMemsetNodeGetParams_v10000_params;
1281
+
1282
+ typedef struct cudaGraphMemsetNodeSetParams_v10000_params_st {
1283
+ cudaGraphNode_t node;
1284
+ const struct cudaMemsetParams *pNodeParams;
1285
+ } cudaGraphMemsetNodeSetParams_v10000_params;
1286
+
1287
+ typedef struct cudaGraphAddHostNode_v10000_params_st {
1288
+ cudaGraphNode_t *pGraphNode;
1289
+ cudaGraph_t graph;
1290
+ const cudaGraphNode_t *pDependencies;
1291
+ size_t numDependencies;
1292
+ const struct cudaHostNodeParams *pNodeParams;
1293
+ } cudaGraphAddHostNode_v10000_params;
1294
+
1295
+ typedef struct cudaGraphHostNodeGetParams_v10000_params_st {
1296
+ cudaGraphNode_t node;
1297
+ struct cudaHostNodeParams *pNodeParams;
1298
+ } cudaGraphHostNodeGetParams_v10000_params;
1299
+
1300
+ typedef struct cudaGraphHostNodeSetParams_v10000_params_st {
1301
+ cudaGraphNode_t node;
1302
+ const struct cudaHostNodeParams *pNodeParams;
1303
+ } cudaGraphHostNodeSetParams_v10000_params;
1304
+
1305
+ typedef struct cudaGraphAddChildGraphNode_v10000_params_st {
1306
+ cudaGraphNode_t *pGraphNode;
1307
+ cudaGraph_t graph;
1308
+ const cudaGraphNode_t *pDependencies;
1309
+ size_t numDependencies;
1310
+ cudaGraph_t childGraph;
1311
+ } cudaGraphAddChildGraphNode_v10000_params;
1312
+
1313
+ typedef struct cudaGraphChildGraphNodeGetGraph_v10000_params_st {
1314
+ cudaGraphNode_t node;
1315
+ cudaGraph_t *pGraph;
1316
+ } cudaGraphChildGraphNodeGetGraph_v10000_params;
1317
+
1318
+ typedef struct cudaGraphAddEmptyNode_v10000_params_st {
1319
+ cudaGraphNode_t *pGraphNode;
1320
+ cudaGraph_t graph;
1321
+ const cudaGraphNode_t *pDependencies;
1322
+ size_t numDependencies;
1323
+ } cudaGraphAddEmptyNode_v10000_params;
1324
+
1325
+ typedef struct cudaGraphAddEventRecordNode_v11010_params_st {
1326
+ cudaGraphNode_t *pGraphNode;
1327
+ cudaGraph_t graph;
1328
+ const cudaGraphNode_t *pDependencies;
1329
+ size_t numDependencies;
1330
+ cudaEvent_t event;
1331
+ } cudaGraphAddEventRecordNode_v11010_params;
1332
+
1333
+ typedef struct cudaGraphEventRecordNodeGetEvent_v11010_params_st {
1334
+ cudaGraphNode_t node;
1335
+ cudaEvent_t *event_out;
1336
+ } cudaGraphEventRecordNodeGetEvent_v11010_params;
1337
+
1338
+ typedef struct cudaGraphEventRecordNodeSetEvent_v11010_params_st {
1339
+ cudaGraphNode_t node;
1340
+ cudaEvent_t event;
1341
+ } cudaGraphEventRecordNodeSetEvent_v11010_params;
1342
+
1343
+ typedef struct cudaGraphAddEventWaitNode_v11010_params_st {
1344
+ cudaGraphNode_t *pGraphNode;
1345
+ cudaGraph_t graph;
1346
+ const cudaGraphNode_t *pDependencies;
1347
+ size_t numDependencies;
1348
+ cudaEvent_t event;
1349
+ } cudaGraphAddEventWaitNode_v11010_params;
1350
+
1351
+ typedef struct cudaGraphEventWaitNodeGetEvent_v11010_params_st {
1352
+ cudaGraphNode_t node;
1353
+ cudaEvent_t *event_out;
1354
+ } cudaGraphEventWaitNodeGetEvent_v11010_params;
1355
+
1356
+ typedef struct cudaGraphEventWaitNodeSetEvent_v11010_params_st {
1357
+ cudaGraphNode_t node;
1358
+ cudaEvent_t event;
1359
+ } cudaGraphEventWaitNodeSetEvent_v11010_params;
1360
+
1361
+ typedef struct cudaGraphAddExternalSemaphoresSignalNode_v11020_params_st {
1362
+ cudaGraphNode_t *pGraphNode;
1363
+ cudaGraph_t graph;
1364
+ const cudaGraphNode_t *pDependencies;
1365
+ size_t numDependencies;
1366
+ const struct cudaExternalSemaphoreSignalNodeParams *nodeParams;
1367
+ } cudaGraphAddExternalSemaphoresSignalNode_v11020_params;
1368
+
1369
+ typedef struct cudaGraphExternalSemaphoresSignalNodeGetParams_v11020_params_st {
1370
+ cudaGraphNode_t hNode;
1371
+ struct cudaExternalSemaphoreSignalNodeParams *params_out;
1372
+ } cudaGraphExternalSemaphoresSignalNodeGetParams_v11020_params;
1373
+
1374
+ typedef struct cudaGraphExternalSemaphoresSignalNodeSetParams_v11020_params_st {
1375
+ cudaGraphNode_t hNode;
1376
+ const struct cudaExternalSemaphoreSignalNodeParams *nodeParams;
1377
+ } cudaGraphExternalSemaphoresSignalNodeSetParams_v11020_params;
1378
+
1379
+ typedef struct cudaGraphAddExternalSemaphoresWaitNode_v11020_params_st {
1380
+ cudaGraphNode_t *pGraphNode;
1381
+ cudaGraph_t graph;
1382
+ const cudaGraphNode_t *pDependencies;
1383
+ size_t numDependencies;
1384
+ const struct cudaExternalSemaphoreWaitNodeParams *nodeParams;
1385
+ } cudaGraphAddExternalSemaphoresWaitNode_v11020_params;
1386
+
1387
+ typedef struct cudaGraphExternalSemaphoresWaitNodeGetParams_v11020_params_st {
1388
+ cudaGraphNode_t hNode;
1389
+ struct cudaExternalSemaphoreWaitNodeParams *params_out;
1390
+ } cudaGraphExternalSemaphoresWaitNodeGetParams_v11020_params;
1391
+
1392
+ typedef struct cudaGraphExternalSemaphoresWaitNodeSetParams_v11020_params_st {
1393
+ cudaGraphNode_t hNode;
1394
+ const struct cudaExternalSemaphoreWaitNodeParams *nodeParams;
1395
+ } cudaGraphExternalSemaphoresWaitNodeSetParams_v11020_params;
1396
+
1397
+ typedef struct cudaGraphAddMemAllocNode_v11040_params_st {
1398
+ cudaGraphNode_t *pGraphNode;
1399
+ cudaGraph_t graph;
1400
+ const cudaGraphNode_t *pDependencies;
1401
+ size_t numDependencies;
1402
+ struct cudaMemAllocNodeParams *nodeParams;
1403
+ } cudaGraphAddMemAllocNode_v11040_params;
1404
+
1405
+ typedef struct cudaGraphMemAllocNodeGetParams_v11040_params_st {
1406
+ cudaGraphNode_t node;
1407
+ struct cudaMemAllocNodeParams *params_out;
1408
+ } cudaGraphMemAllocNodeGetParams_v11040_params;
1409
+
1410
+ typedef struct cudaGraphAddMemFreeNode_v11040_params_st {
1411
+ cudaGraphNode_t *pGraphNode;
1412
+ cudaGraph_t graph;
1413
+ const cudaGraphNode_t *pDependencies;
1414
+ size_t numDependencies;
1415
+ void *dptr;
1416
+ } cudaGraphAddMemFreeNode_v11040_params;
1417
+
1418
+ typedef struct cudaGraphMemFreeNodeGetParams_v11040_params_st {
1419
+ cudaGraphNode_t node;
1420
+ void *dptr_out;
1421
+ } cudaGraphMemFreeNodeGetParams_v11040_params;
1422
+
1423
+ typedef struct cudaDeviceGraphMemTrim_v11040_params_st {
1424
+ int device;
1425
+ } cudaDeviceGraphMemTrim_v11040_params;
1426
+
1427
+ typedef struct cudaDeviceGetGraphMemAttribute_v11040_params_st {
1428
+ int device;
1429
+ enum cudaGraphMemAttributeType attr;
1430
+ void *value;
1431
+ } cudaDeviceGetGraphMemAttribute_v11040_params;
1432
+
1433
+ typedef struct cudaDeviceSetGraphMemAttribute_v11040_params_st {
1434
+ int device;
1435
+ enum cudaGraphMemAttributeType attr;
1436
+ void *value;
1437
+ } cudaDeviceSetGraphMemAttribute_v11040_params;
1438
+
1439
+ typedef struct cudaGraphClone_v10000_params_st {
1440
+ cudaGraph_t *pGraphClone;
1441
+ cudaGraph_t originalGraph;
1442
+ } cudaGraphClone_v10000_params;
1443
+
1444
+ typedef struct cudaGraphNodeFindInClone_v10000_params_st {
1445
+ cudaGraphNode_t *pNode;
1446
+ cudaGraphNode_t originalNode;
1447
+ cudaGraph_t clonedGraph;
1448
+ } cudaGraphNodeFindInClone_v10000_params;
1449
+
1450
+ typedef struct cudaGraphNodeGetType_v10000_params_st {
1451
+ cudaGraphNode_t node;
1452
+ enum cudaGraphNodeType *pType;
1453
+ } cudaGraphNodeGetType_v10000_params;
1454
+
1455
+ typedef struct cudaGraphGetNodes_v10000_params_st {
1456
+ cudaGraph_t graph;
1457
+ cudaGraphNode_t *nodes;
1458
+ size_t *numNodes;
1459
+ } cudaGraphGetNodes_v10000_params;
1460
+
1461
+ typedef struct cudaGraphGetRootNodes_v10000_params_st {
1462
+ cudaGraph_t graph;
1463
+ cudaGraphNode_t *pRootNodes;
1464
+ size_t *pNumRootNodes;
1465
+ } cudaGraphGetRootNodes_v10000_params;
1466
+
1467
+ typedef struct cudaGraphGetEdges_v10000_params_st {
1468
+ cudaGraph_t graph;
1469
+ cudaGraphNode_t *from;
1470
+ cudaGraphNode_t *to;
1471
+ size_t *numEdges;
1472
+ } cudaGraphGetEdges_v10000_params;
1473
+
1474
+ typedef struct cudaGraphGetEdges_v2_v12030_params_st {
1475
+ cudaGraph_t graph;
1476
+ cudaGraphNode_t *from;
1477
+ cudaGraphNode_t *to;
1478
+ cudaGraphEdgeData *edgeData;
1479
+ size_t *numEdges;
1480
+ } cudaGraphGetEdges_v2_v12030_params;
1481
+
1482
+ typedef struct cudaGraphNodeGetDependencies_v10000_params_st {
1483
+ cudaGraphNode_t node;
1484
+ cudaGraphNode_t *pDependencies;
1485
+ size_t *pNumDependencies;
1486
+ } cudaGraphNodeGetDependencies_v10000_params;
1487
+
1488
+ typedef struct cudaGraphNodeGetDependencies_v2_v12030_params_st {
1489
+ cudaGraphNode_t node;
1490
+ cudaGraphNode_t *pDependencies;
1491
+ cudaGraphEdgeData *edgeData;
1492
+ size_t *pNumDependencies;
1493
+ } cudaGraphNodeGetDependencies_v2_v12030_params;
1494
+
1495
+ typedef struct cudaGraphNodeGetDependentNodes_v10000_params_st {
1496
+ cudaGraphNode_t node;
1497
+ cudaGraphNode_t *pDependentNodes;
1498
+ size_t *pNumDependentNodes;
1499
+ } cudaGraphNodeGetDependentNodes_v10000_params;
1500
+
1501
+ typedef struct cudaGraphNodeGetDependentNodes_v2_v12030_params_st {
1502
+ cudaGraphNode_t node;
1503
+ cudaGraphNode_t *pDependentNodes;
1504
+ cudaGraphEdgeData *edgeData;
1505
+ size_t *pNumDependentNodes;
1506
+ } cudaGraphNodeGetDependentNodes_v2_v12030_params;
1507
+
1508
+ typedef struct cudaGraphAddDependencies_v10000_params_st {
1509
+ cudaGraph_t graph;
1510
+ const cudaGraphNode_t *from;
1511
+ const cudaGraphNode_t *to;
1512
+ size_t numDependencies;
1513
+ } cudaGraphAddDependencies_v10000_params;
1514
+
1515
+ typedef struct cudaGraphAddDependencies_v2_v12030_params_st {
1516
+ cudaGraph_t graph;
1517
+ const cudaGraphNode_t *from;
1518
+ const cudaGraphNode_t *to;
1519
+ const cudaGraphEdgeData *edgeData;
1520
+ size_t numDependencies;
1521
+ } cudaGraphAddDependencies_v2_v12030_params;
1522
+
1523
+ typedef struct cudaGraphRemoveDependencies_v10000_params_st {
1524
+ cudaGraph_t graph;
1525
+ const cudaGraphNode_t *from;
1526
+ const cudaGraphNode_t *to;
1527
+ size_t numDependencies;
1528
+ } cudaGraphRemoveDependencies_v10000_params;
1529
+
1530
+ typedef struct cudaGraphRemoveDependencies_v2_v12030_params_st {
1531
+ cudaGraph_t graph;
1532
+ const cudaGraphNode_t *from;
1533
+ const cudaGraphNode_t *to;
1534
+ const cudaGraphEdgeData *edgeData;
1535
+ size_t numDependencies;
1536
+ } cudaGraphRemoveDependencies_v2_v12030_params;
1537
+
1538
+ typedef struct cudaGraphDestroyNode_v10000_params_st {
1539
+ cudaGraphNode_t node;
1540
+ } cudaGraphDestroyNode_v10000_params;
1541
+
1542
+ typedef struct cudaGraphInstantiate_v12000_params_st {
1543
+ cudaGraphExec_t *pGraphExec;
1544
+ cudaGraph_t graph;
1545
+ unsigned long long flags;
1546
+ } cudaGraphInstantiate_v12000_params;
1547
+
1548
+ typedef struct cudaGraphInstantiateWithFlags_v11040_params_st {
1549
+ cudaGraphExec_t *pGraphExec;
1550
+ cudaGraph_t graph;
1551
+ unsigned long long flags;
1552
+ } cudaGraphInstantiateWithFlags_v11040_params;
1553
+
1554
+ typedef struct cudaGraphInstantiateWithParams_ptsz_v12000_params_st {
1555
+ cudaGraphExec_t *pGraphExec;
1556
+ cudaGraph_t graph;
1557
+ cudaGraphInstantiateParams *instantiateParams;
1558
+ } cudaGraphInstantiateWithParams_ptsz_v12000_params;
1559
+
1560
+ typedef struct cudaGraphExecGetFlags_v12000_params_st {
1561
+ cudaGraphExec_t graphExec;
1562
+ unsigned long long *flags;
1563
+ } cudaGraphExecGetFlags_v12000_params;
1564
+
1565
+ typedef struct cudaGraphExecKernelNodeSetParams_v10010_params_st {
1566
+ cudaGraphExec_t hGraphExec;
1567
+ cudaGraphNode_t node;
1568
+ const struct cudaKernelNodeParams *pNodeParams;
1569
+ } cudaGraphExecKernelNodeSetParams_v10010_params;
1570
+
1571
+ typedef struct cudaGraphExecMemcpyNodeSetParams_v10020_params_st {
1572
+ cudaGraphExec_t hGraphExec;
1573
+ cudaGraphNode_t node;
1574
+ const struct cudaMemcpy3DParms *pNodeParams;
1575
+ } cudaGraphExecMemcpyNodeSetParams_v10020_params;
1576
+
1577
+ typedef struct cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010_params_st {
1578
+ cudaGraphExec_t hGraphExec;
1579
+ cudaGraphNode_t node;
1580
+ const void *symbol;
1581
+ const void *src;
1582
+ size_t count;
1583
+ size_t offset;
1584
+ enum cudaMemcpyKind kind;
1585
+ } cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010_params;
1586
+
1587
+ typedef struct cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010_params_st {
1588
+ cudaGraphExec_t hGraphExec;
1589
+ cudaGraphNode_t node;
1590
+ void *dst;
1591
+ const void *symbol;
1592
+ size_t count;
1593
+ size_t offset;
1594
+ enum cudaMemcpyKind kind;
1595
+ } cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010_params;
1596
+
1597
+ typedef struct cudaGraphExecMemcpyNodeSetParams1D_v11010_params_st {
1598
+ cudaGraphExec_t hGraphExec;
1599
+ cudaGraphNode_t node;
1600
+ void *dst;
1601
+ const void *src;
1602
+ size_t count;
1603
+ enum cudaMemcpyKind kind;
1604
+ } cudaGraphExecMemcpyNodeSetParams1D_v11010_params;
1605
+
1606
+ typedef struct cudaGraphExecMemsetNodeSetParams_v10020_params_st {
1607
+ cudaGraphExec_t hGraphExec;
1608
+ cudaGraphNode_t node;
1609
+ const struct cudaMemsetParams *pNodeParams;
1610
+ } cudaGraphExecMemsetNodeSetParams_v10020_params;
1611
+
1612
+ typedef struct cudaGraphExecHostNodeSetParams_v10020_params_st {
1613
+ cudaGraphExec_t hGraphExec;
1614
+ cudaGraphNode_t node;
1615
+ const struct cudaHostNodeParams *pNodeParams;
1616
+ } cudaGraphExecHostNodeSetParams_v10020_params;
1617
+
1618
+ typedef struct cudaGraphExecChildGraphNodeSetParams_v11010_params_st {
1619
+ cudaGraphExec_t hGraphExec;
1620
+ cudaGraphNode_t node;
1621
+ cudaGraph_t childGraph;
1622
+ } cudaGraphExecChildGraphNodeSetParams_v11010_params;
1623
+
1624
+ typedef struct cudaGraphExecEventRecordNodeSetEvent_v11010_params_st {
1625
+ cudaGraphExec_t hGraphExec;
1626
+ cudaGraphNode_t hNode;
1627
+ cudaEvent_t event;
1628
+ } cudaGraphExecEventRecordNodeSetEvent_v11010_params;
1629
+
1630
+ typedef struct cudaGraphExecEventWaitNodeSetEvent_v11010_params_st {
1631
+ cudaGraphExec_t hGraphExec;
1632
+ cudaGraphNode_t hNode;
1633
+ cudaEvent_t event;
1634
+ } cudaGraphExecEventWaitNodeSetEvent_v11010_params;
1635
+
1636
+ typedef struct cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020_params_st {
1637
+ cudaGraphExec_t hGraphExec;
1638
+ cudaGraphNode_t hNode;
1639
+ const struct cudaExternalSemaphoreSignalNodeParams *nodeParams;
1640
+ } cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020_params;
1641
+
1642
+ typedef struct cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020_params_st {
1643
+ cudaGraphExec_t hGraphExec;
1644
+ cudaGraphNode_t hNode;
1645
+ const struct cudaExternalSemaphoreWaitNodeParams *nodeParams;
1646
+ } cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020_params;
1647
+
1648
+ typedef struct cudaGraphNodeSetEnabled_v11060_params_st {
1649
+ cudaGraphExec_t hGraphExec;
1650
+ cudaGraphNode_t hNode;
1651
+ unsigned int isEnabled;
1652
+ } cudaGraphNodeSetEnabled_v11060_params;
1653
+
1654
+ typedef struct cudaGraphNodeGetEnabled_v11060_params_st {
1655
+ cudaGraphExec_t hGraphExec;
1656
+ cudaGraphNode_t hNode;
1657
+ unsigned int *isEnabled;
1658
+ } cudaGraphNodeGetEnabled_v11060_params;
1659
+
1660
+ typedef struct cudaGraphExecUpdate_v10020_params_st {
1661
+ cudaGraphExec_t hGraphExec;
1662
+ cudaGraph_t hGraph;
1663
+ cudaGraphExecUpdateResultInfo *resultInfo;
1664
+ } cudaGraphExecUpdate_v10020_params;
1665
+
1666
+ typedef struct cudaGraphUpload_ptsz_v10000_params_st {
1667
+ cudaGraphExec_t graphExec;
1668
+ cudaStream_t stream;
1669
+ } cudaGraphUpload_ptsz_v10000_params;
1670
+
1671
+ typedef struct cudaGraphLaunch_ptsz_v10000_params_st {
1672
+ cudaGraphExec_t graphExec;
1673
+ cudaStream_t stream;
1674
+ } cudaGraphLaunch_ptsz_v10000_params;
1675
+
1676
+ typedef struct cudaGraphExecDestroy_v10000_params_st {
1677
+ cudaGraphExec_t graphExec;
1678
+ } cudaGraphExecDestroy_v10000_params;
1679
+
1680
+ typedef struct cudaGraphDestroy_v10000_params_st {
1681
+ cudaGraph_t graph;
1682
+ } cudaGraphDestroy_v10000_params;
1683
+
1684
+ typedef struct cudaGraphDebugDotPrint_v11030_params_st {
1685
+ cudaGraph_t graph;
1686
+ const char *path;
1687
+ unsigned int flags;
1688
+ } cudaGraphDebugDotPrint_v11030_params;
1689
+
1690
+ typedef struct cudaUserObjectCreate_v11030_params_st {
1691
+ cudaUserObject_t *object_out;
1692
+ void *ptr;
1693
+ cudaHostFn_t destroy;
1694
+ unsigned int initialRefcount;
1695
+ unsigned int flags;
1696
+ } cudaUserObjectCreate_v11030_params;
1697
+
1698
+ typedef struct cudaUserObjectRetain_v11030_params_st {
1699
+ cudaUserObject_t object;
1700
+ unsigned int count;
1701
+ } cudaUserObjectRetain_v11030_params;
1702
+
1703
+ typedef struct cudaUserObjectRelease_v11030_params_st {
1704
+ cudaUserObject_t object;
1705
+ unsigned int count;
1706
+ } cudaUserObjectRelease_v11030_params;
1707
+
1708
+ typedef struct cudaGraphRetainUserObject_v11030_params_st {
1709
+ cudaGraph_t graph;
1710
+ cudaUserObject_t object;
1711
+ unsigned int count;
1712
+ unsigned int flags;
1713
+ } cudaGraphRetainUserObject_v11030_params;
1714
+
1715
+ typedef struct cudaGraphReleaseUserObject_v11030_params_st {
1716
+ cudaGraph_t graph;
1717
+ cudaUserObject_t object;
1718
+ unsigned int count;
1719
+ } cudaGraphReleaseUserObject_v11030_params;
1720
+
1721
+ typedef struct cudaGraphAddNode_v12020_params_st {
1722
+ cudaGraphNode_t *pGraphNode;
1723
+ cudaGraph_t graph;
1724
+ const cudaGraphNode_t *pDependencies;
1725
+ size_t numDependencies;
1726
+ struct cudaGraphNodeParams *nodeParams;
1727
+ } cudaGraphAddNode_v12020_params;
1728
+
1729
+ typedef struct cudaGraphAddNode_v2_v12030_params_st {
1730
+ cudaGraphNode_t *pGraphNode;
1731
+ cudaGraph_t graph;
1732
+ const cudaGraphNode_t *pDependencies;
1733
+ const cudaGraphEdgeData *dependencyData;
1734
+ size_t numDependencies;
1735
+ struct cudaGraphNodeParams *nodeParams;
1736
+ } cudaGraphAddNode_v2_v12030_params;
1737
+
1738
+ typedef struct cudaGraphNodeSetParams_v12020_params_st {
1739
+ cudaGraphNode_t node;
1740
+ struct cudaGraphNodeParams *nodeParams;
1741
+ } cudaGraphNodeSetParams_v12020_params;
1742
+
1743
+ typedef struct cudaGraphExecNodeSetParams_v12020_params_st {
1744
+ cudaGraphExec_t graphExec;
1745
+ cudaGraphNode_t node;
1746
+ struct cudaGraphNodeParams *nodeParams;
1747
+ } cudaGraphExecNodeSetParams_v12020_params;
1748
+
1749
+ typedef struct cudaGraphConditionalHandleCreate_v12030_params_st {
1750
+ cudaGraphConditionalHandle *pHandle_out;
1751
+ cudaGraph_t graph;
1752
+ unsigned int defaultLaunchValue;
1753
+ unsigned int flags;
1754
+ } cudaGraphConditionalHandleCreate_v12030_params;
1755
+
1756
+ typedef struct cudaGetDriverEntryPoint_ptsz_v11030_params_st {
1757
+ const char *symbol;
1758
+ void **funcPtr;
1759
+ unsigned long long flags;
1760
+ enum cudaDriverEntryPointQueryResult *driverStatus;
1761
+ } cudaGetDriverEntryPoint_ptsz_v11030_params;
1762
+
1763
+ typedef struct cudaGetFuncBySymbol_v11000_params_st {
1764
+ cudaFunction_t *functionPtr;
1765
+ const void *symbolPtr;
1766
+ } cudaGetFuncBySymbol_v11000_params;
1767
+
1768
+ typedef struct cudaGetKernel_v12000_params_st {
1769
+ cudaKernel_t *kernelPtr;
1770
+ const void *entryFuncAddr;
1771
+ } cudaGetKernel_v12000_params;
1772
+
1773
+ typedef struct cudaMemcpy_v3020_params_st {
1774
+ void *dst;
1775
+ const void *src;
1776
+ size_t count;
1777
+ enum cudaMemcpyKind kind;
1778
+ } cudaMemcpy_v3020_params;
1779
+
1780
+ typedef struct cudaMemcpyToSymbol_v3020_params_st {
1781
+ const void *symbol;
1782
+ const void *src;
1783
+ size_t count;
1784
+ size_t offset;
1785
+ enum cudaMemcpyKind kind;
1786
+ } cudaMemcpyToSymbol_v3020_params;
1787
+
1788
+ typedef struct cudaMemcpyFromSymbol_v3020_params_st {
1789
+ void *dst;
1790
+ const void *symbol;
1791
+ size_t count;
1792
+ size_t offset;
1793
+ enum cudaMemcpyKind kind;
1794
+ } cudaMemcpyFromSymbol_v3020_params;
1795
+
1796
+ typedef struct cudaMemcpy2D_v3020_params_st {
1797
+ void *dst;
1798
+ size_t dpitch;
1799
+ const void *src;
1800
+ size_t spitch;
1801
+ size_t width;
1802
+ size_t height;
1803
+ enum cudaMemcpyKind kind;
1804
+ } cudaMemcpy2D_v3020_params;
1805
+
1806
+ typedef struct cudaMemcpyToArray_v3020_params_st {
1807
+ cudaArray_t dst;
1808
+ size_t wOffset;
1809
+ size_t hOffset;
1810
+ const void *src;
1811
+ size_t count;
1812
+ enum cudaMemcpyKind kind;
1813
+ } cudaMemcpyToArray_v3020_params;
1814
+
1815
+ typedef struct cudaMemcpy2DToArray_v3020_params_st {
1816
+ cudaArray_t dst;
1817
+ size_t wOffset;
1818
+ size_t hOffset;
1819
+ const void *src;
1820
+ size_t spitch;
1821
+ size_t width;
1822
+ size_t height;
1823
+ enum cudaMemcpyKind kind;
1824
+ } cudaMemcpy2DToArray_v3020_params;
1825
+
1826
+ typedef struct cudaMemcpyFromArray_v3020_params_st {
1827
+ void *dst;
1828
+ cudaArray_const_t src;
1829
+ size_t wOffset;
1830
+ size_t hOffset;
1831
+ size_t count;
1832
+ enum cudaMemcpyKind kind;
1833
+ } cudaMemcpyFromArray_v3020_params;
1834
+
1835
+ typedef struct cudaMemcpy2DFromArray_v3020_params_st {
1836
+ void *dst;
1837
+ size_t dpitch;
1838
+ cudaArray_const_t src;
1839
+ size_t wOffset;
1840
+ size_t hOffset;
1841
+ size_t width;
1842
+ size_t height;
1843
+ enum cudaMemcpyKind kind;
1844
+ } cudaMemcpy2DFromArray_v3020_params;
1845
+
1846
+ typedef struct cudaMemcpyArrayToArray_v3020_params_st {
1847
+ cudaArray_t dst;
1848
+ size_t wOffsetDst;
1849
+ size_t hOffsetDst;
1850
+ cudaArray_const_t src;
1851
+ size_t wOffsetSrc;
1852
+ size_t hOffsetSrc;
1853
+ size_t count;
1854
+ enum cudaMemcpyKind kind;
1855
+ } cudaMemcpyArrayToArray_v3020_params;
1856
+
1857
+ typedef struct cudaMemcpy2DArrayToArray_v3020_params_st {
1858
+ cudaArray_t dst;
1859
+ size_t wOffsetDst;
1860
+ size_t hOffsetDst;
1861
+ cudaArray_const_t src;
1862
+ size_t wOffsetSrc;
1863
+ size_t hOffsetSrc;
1864
+ size_t width;
1865
+ size_t height;
1866
+ enum cudaMemcpyKind kind;
1867
+ } cudaMemcpy2DArrayToArray_v3020_params;
1868
+
1869
+ typedef struct cudaMemcpy3D_v3020_params_st {
1870
+ const struct cudaMemcpy3DParms *p;
1871
+ } cudaMemcpy3D_v3020_params;
1872
+
1873
+ typedef struct cudaMemcpy3DPeer_v4000_params_st {
1874
+ const struct cudaMemcpy3DPeerParms *p;
1875
+ } cudaMemcpy3DPeer_v4000_params;
1876
+
1877
+ typedef struct cudaMemset_v3020_params_st {
1878
+ void *devPtr;
1879
+ int value;
1880
+ size_t count;
1881
+ } cudaMemset_v3020_params;
1882
+
1883
+ typedef struct cudaMemset2D_v3020_params_st {
1884
+ void *devPtr;
1885
+ size_t pitch;
1886
+ int value;
1887
+ size_t width;
1888
+ size_t height;
1889
+ } cudaMemset2D_v3020_params;
1890
+
1891
+ typedef struct cudaMemset3D_v3020_params_st {
1892
+ struct cudaPitchedPtr pitchedDevPtr;
1893
+ int value;
1894
+ struct cudaExtent extent;
1895
+ } cudaMemset3D_v3020_params;
1896
+
1897
+ typedef struct cudaMemcpyAsync_v3020_params_st {
1898
+ void *dst;
1899
+ const void *src;
1900
+ size_t count;
1901
+ enum cudaMemcpyKind kind;
1902
+ cudaStream_t stream;
1903
+ } cudaMemcpyAsync_v3020_params;
1904
+
1905
+ typedef struct cudaMemcpyToSymbolAsync_v3020_params_st {
1906
+ const void *symbol;
1907
+ const void *src;
1908
+ size_t count;
1909
+ size_t offset;
1910
+ enum cudaMemcpyKind kind;
1911
+ cudaStream_t stream;
1912
+ } cudaMemcpyToSymbolAsync_v3020_params;
1913
+
1914
+ typedef struct cudaMemcpyFromSymbolAsync_v3020_params_st {
1915
+ void *dst;
1916
+ const void *symbol;
1917
+ size_t count;
1918
+ size_t offset;
1919
+ enum cudaMemcpyKind kind;
1920
+ cudaStream_t stream;
1921
+ } cudaMemcpyFromSymbolAsync_v3020_params;
1922
+
1923
+ typedef struct cudaMemcpy2DAsync_v3020_params_st {
1924
+ void *dst;
1925
+ size_t dpitch;
1926
+ const void *src;
1927
+ size_t spitch;
1928
+ size_t width;
1929
+ size_t height;
1930
+ enum cudaMemcpyKind kind;
1931
+ cudaStream_t stream;
1932
+ } cudaMemcpy2DAsync_v3020_params;
1933
+
1934
+ typedef struct cudaMemcpyToArrayAsync_v3020_params_st {
1935
+ cudaArray_t dst;
1936
+ size_t wOffset;
1937
+ size_t hOffset;
1938
+ const void *src;
1939
+ size_t count;
1940
+ enum cudaMemcpyKind kind;
1941
+ cudaStream_t stream;
1942
+ } cudaMemcpyToArrayAsync_v3020_params;
1943
+
1944
+ typedef struct cudaMemcpy2DToArrayAsync_v3020_params_st {
1945
+ cudaArray_t dst;
1946
+ size_t wOffset;
1947
+ size_t hOffset;
1948
+ const void *src;
1949
+ size_t spitch;
1950
+ size_t width;
1951
+ size_t height;
1952
+ enum cudaMemcpyKind kind;
1953
+ cudaStream_t stream;
1954
+ } cudaMemcpy2DToArrayAsync_v3020_params;
1955
+
1956
+ typedef struct cudaMemcpyFromArrayAsync_v3020_params_st {
1957
+ void *dst;
1958
+ cudaArray_const_t src;
1959
+ size_t wOffset;
1960
+ size_t hOffset;
1961
+ size_t count;
1962
+ enum cudaMemcpyKind kind;
1963
+ cudaStream_t stream;
1964
+ } cudaMemcpyFromArrayAsync_v3020_params;
1965
+
1966
+ typedef struct cudaMemcpy2DFromArrayAsync_v3020_params_st {
1967
+ void *dst;
1968
+ size_t dpitch;
1969
+ cudaArray_const_t src;
1970
+ size_t wOffset;
1971
+ size_t hOffset;
1972
+ size_t width;
1973
+ size_t height;
1974
+ enum cudaMemcpyKind kind;
1975
+ cudaStream_t stream;
1976
+ } cudaMemcpy2DFromArrayAsync_v3020_params;
1977
+
1978
+ typedef struct cudaMemcpy3DAsync_v3020_params_st {
1979
+ const struct cudaMemcpy3DParms *p;
1980
+ cudaStream_t stream;
1981
+ } cudaMemcpy3DAsync_v3020_params;
1982
+
1983
+ typedef struct cudaMemcpy3DPeerAsync_v4000_params_st {
1984
+ const struct cudaMemcpy3DPeerParms *p;
1985
+ cudaStream_t stream;
1986
+ } cudaMemcpy3DPeerAsync_v4000_params;
1987
+
1988
+ typedef struct cudaMemsetAsync_v3020_params_st {
1989
+ void *devPtr;
1990
+ int value;
1991
+ size_t count;
1992
+ cudaStream_t stream;
1993
+ } cudaMemsetAsync_v3020_params;
1994
+
1995
+ typedef struct cudaMemset2DAsync_v3020_params_st {
1996
+ void *devPtr;
1997
+ size_t pitch;
1998
+ int value;
1999
+ size_t width;
2000
+ size_t height;
2001
+ cudaStream_t stream;
2002
+ } cudaMemset2DAsync_v3020_params;
2003
+
2004
+ typedef struct cudaMemset3DAsync_v3020_params_st {
2005
+ struct cudaPitchedPtr pitchedDevPtr;
2006
+ int value;
2007
+ struct cudaExtent extent;
2008
+ cudaStream_t stream;
2009
+ } cudaMemset3DAsync_v3020_params;
2010
+
2011
+ typedef struct cudaStreamQuery_v3020_params_st {
2012
+ cudaStream_t stream;
2013
+ } cudaStreamQuery_v3020_params;
2014
+
2015
+ typedef struct cudaStreamGetFlags_v5050_params_st {
2016
+ cudaStream_t hStream;
2017
+ unsigned int *flags;
2018
+ } cudaStreamGetFlags_v5050_params;
2019
+
2020
+ typedef struct cudaStreamGetId_v12000_params_st {
2021
+ cudaStream_t hStream;
2022
+ unsigned long long *streamId;
2023
+ } cudaStreamGetId_v12000_params;
2024
+
2025
+ typedef struct cudaStreamGetPriority_v5050_params_st {
2026
+ cudaStream_t hStream;
2027
+ int *priority;
2028
+ } cudaStreamGetPriority_v5050_params;
2029
+
2030
+ typedef struct cudaEventRecord_v3020_params_st {
2031
+ cudaEvent_t event;
2032
+ cudaStream_t stream;
2033
+ } cudaEventRecord_v3020_params;
2034
+
2035
+ typedef struct cudaEventRecordWithFlags_v11010_params_st {
2036
+ cudaEvent_t event;
2037
+ cudaStream_t stream;
2038
+ unsigned int flags;
2039
+ } cudaEventRecordWithFlags_v11010_params;
2040
+
2041
+ typedef struct cudaStreamWaitEvent_v3020_params_st {
2042
+ cudaStream_t stream;
2043
+ cudaEvent_t event;
2044
+ unsigned int flags;
2045
+ } cudaStreamWaitEvent_v3020_params;
2046
+
2047
+ typedef struct cudaStreamAddCallback_v5000_params_st {
2048
+ cudaStream_t stream;
2049
+ cudaStreamCallback_t callback;
2050
+ void *userData;
2051
+ unsigned int flags;
2052
+ } cudaStreamAddCallback_v5000_params;
2053
+
2054
+ typedef struct cudaStreamAttachMemAsync_v6000_params_st {
2055
+ cudaStream_t stream;
2056
+ void *devPtr;
2057
+ size_t length;
2058
+ unsigned int flags;
2059
+ } cudaStreamAttachMemAsync_v6000_params;
2060
+
2061
+ typedef struct cudaStreamSynchronize_v3020_params_st {
2062
+ cudaStream_t stream;
2063
+ } cudaStreamSynchronize_v3020_params;
2064
+
2065
+ typedef struct cudaLaunchKernel_v7000_params_st {
2066
+ const void *func;
2067
+ dim3 gridDim;
2068
+ dim3 blockDim;
2069
+ void **args;
2070
+ size_t sharedMem;
2071
+ cudaStream_t stream;
2072
+ } cudaLaunchKernel_v7000_params;
2073
+
2074
+ typedef struct cudaLaunchKernelExC_v11060_params_st {
2075
+ const cudaLaunchConfig_t *config;
2076
+ const void *func;
2077
+ void **args;
2078
+ } cudaLaunchKernelExC_v11060_params;
2079
+
2080
+ typedef struct cudaLaunchCooperativeKernel_v9000_params_st {
2081
+ const void *func;
2082
+ dim3 gridDim;
2083
+ dim3 blockDim;
2084
+ void **args;
2085
+ size_t sharedMem;
2086
+ cudaStream_t stream;
2087
+ } cudaLaunchCooperativeKernel_v9000_params;
2088
+
2089
+ typedef struct cudaLaunchHostFunc_v10000_params_st {
2090
+ cudaStream_t stream;
2091
+ cudaHostFn_t fn;
2092
+ void *userData;
2093
+ } cudaLaunchHostFunc_v10000_params;
2094
+
2095
+ typedef struct cudaMemPrefetchAsync_v8000_params_st {
2096
+ const void *devPtr;
2097
+ size_t count;
2098
+ int dstDevice;
2099
+ cudaStream_t stream;
2100
+ } cudaMemPrefetchAsync_v8000_params;
2101
+
2102
+ typedef struct cudaMemPrefetchAsync_v2_v12020_params_st {
2103
+ const void *devPtr;
2104
+ size_t count;
2105
+ struct cudaMemLocation location;
2106
+ unsigned int flags;
2107
+ cudaStream_t stream;
2108
+ } cudaMemPrefetchAsync_v2_v12020_params;
2109
+
2110
+ typedef struct cudaSignalExternalSemaphoresAsync_v10000_params_st {
2111
+ const cudaExternalSemaphore_t *extSemArray;
2112
+ const struct cudaExternalSemaphoreSignalParams_v1 *paramsArray;
2113
+ unsigned int numExtSems;
2114
+ cudaStream_t stream;
2115
+ } cudaSignalExternalSemaphoresAsync_v10000_params;
2116
+
2117
+ typedef struct cudaSignalExternalSemaphoresAsync_ptsz_v10000_params_st {
2118
+ const cudaExternalSemaphore_t *extSemArray;
2119
+ const struct cudaExternalSemaphoreSignalParams_v1 *paramsArray;
2120
+ unsigned int numExtSems;
2121
+ cudaStream_t stream;
2122
+ } cudaSignalExternalSemaphoresAsync_ptsz_v10000_params;
2123
+
2124
+ typedef struct cudaSignalExternalSemaphoresAsync_v2_v11020_params_st {
2125
+ const cudaExternalSemaphore_t *extSemArray;
2126
+ const struct cudaExternalSemaphoreSignalParams *paramsArray;
2127
+ unsigned int numExtSems;
2128
+ cudaStream_t stream;
2129
+ } cudaSignalExternalSemaphoresAsync_v2_v11020_params;
2130
+
2131
+ typedef struct cudaWaitExternalSemaphoresAsync_v10000_params_st {
2132
+ const cudaExternalSemaphore_t *extSemArray;
2133
+ const struct cudaExternalSemaphoreWaitParams_v1 *paramsArray;
2134
+ unsigned int numExtSems;
2135
+ cudaStream_t stream;
2136
+ } cudaWaitExternalSemaphoresAsync_v10000_params;
2137
+
2138
+ typedef struct cudaWaitExternalSemaphoresAsync_ptsz_v10000_params_st {
2139
+ const cudaExternalSemaphore_t *extSemArray;
2140
+ const struct cudaExternalSemaphoreWaitParams_v1 *paramsArray;
2141
+ unsigned int numExtSems;
2142
+ cudaStream_t stream;
2143
+ } cudaWaitExternalSemaphoresAsync_ptsz_v10000_params;
2144
+
2145
+ typedef struct cudaWaitExternalSemaphoresAsync_v2_v11020_params_st {
2146
+ const cudaExternalSemaphore_t *extSemArray;
2147
+ const struct cudaExternalSemaphoreWaitParams *paramsArray;
2148
+ unsigned int numExtSems;
2149
+ cudaStream_t stream;
2150
+ } cudaWaitExternalSemaphoresAsync_v2_v11020_params;
2151
+
2152
+ typedef struct cudaGraphInstantiateWithParams_v12000_params_st {
2153
+ cudaGraphExec_t *pGraphExec;
2154
+ cudaGraph_t graph;
2155
+ cudaGraphInstantiateParams *instantiateParams;
2156
+ } cudaGraphInstantiateWithParams_v12000_params;
2157
+
2158
+ typedef struct cudaGraphUpload_v10000_params_st {
2159
+ cudaGraphExec_t graphExec;
2160
+ cudaStream_t stream;
2161
+ } cudaGraphUpload_v10000_params;
2162
+
2163
+ typedef struct cudaGraphLaunch_v10000_params_st {
2164
+ cudaGraphExec_t graphExec;
2165
+ cudaStream_t stream;
2166
+ } cudaGraphLaunch_v10000_params;
2167
+
2168
+ typedef struct cudaStreamBeginCapture_v10000_params_st {
2169
+ cudaStream_t stream;
2170
+ enum cudaStreamCaptureMode mode;
2171
+ } cudaStreamBeginCapture_v10000_params;
2172
+
2173
+ typedef struct cudaStreamBeginCaptureToGraph_v12030_params_st {
2174
+ cudaStream_t stream;
2175
+ cudaGraph_t graph;
2176
+ const cudaGraphNode_t *dependencies;
2177
+ const cudaGraphEdgeData *dependencyData;
2178
+ size_t numDependencies;
2179
+ enum cudaStreamCaptureMode mode;
2180
+ } cudaStreamBeginCaptureToGraph_v12030_params;
2181
+
2182
+ typedef struct cudaStreamEndCapture_v10000_params_st {
2183
+ cudaStream_t stream;
2184
+ cudaGraph_t *pGraph;
2185
+ } cudaStreamEndCapture_v10000_params;
2186
+
2187
+ typedef struct cudaStreamIsCapturing_v10000_params_st {
2188
+ cudaStream_t stream;
2189
+ enum cudaStreamCaptureStatus *pCaptureStatus;
2190
+ } cudaStreamIsCapturing_v10000_params;
2191
+
2192
+ typedef struct cudaStreamGetCaptureInfo_v10010_params_st {
2193
+ cudaStream_t stream;
2194
+ enum cudaStreamCaptureStatus *captureStatus_out;
2195
+ unsigned long long *id_out;
2196
+ } cudaStreamGetCaptureInfo_v10010_params;
2197
+
2198
+ typedef struct cudaStreamGetCaptureInfo_ptsz_v10010_params_st {
2199
+ cudaStream_t stream;
2200
+ enum cudaStreamCaptureStatus *captureStatus_out;
2201
+ unsigned long long *id_out;
2202
+ } cudaStreamGetCaptureInfo_ptsz_v10010_params;
2203
+
2204
+ typedef struct cudaStreamGetCaptureInfo_v2_v11030_params_st {
2205
+ cudaStream_t stream;
2206
+ enum cudaStreamCaptureStatus *captureStatus_out;
2207
+ unsigned long long *id_out;
2208
+ cudaGraph_t *graph_out;
2209
+ const cudaGraphNode_t **dependencies_out;
2210
+ size_t *numDependencies_out;
2211
+ } cudaStreamGetCaptureInfo_v2_v11030_params;
2212
+
2213
+ typedef struct cudaStreamGetCaptureInfo_v3_v12030_params_st {
2214
+ cudaStream_t stream;
2215
+ enum cudaStreamCaptureStatus *captureStatus_out;
2216
+ unsigned long long *id_out;
2217
+ cudaGraph_t *graph_out;
2218
+ const cudaGraphNode_t **dependencies_out;
2219
+ const cudaGraphEdgeData **edgeData_out;
2220
+ size_t *numDependencies_out;
2221
+ } cudaStreamGetCaptureInfo_v3_v12030_params;
2222
+
2223
+ typedef struct cudaStreamUpdateCaptureDependencies_v11030_params_st {
2224
+ cudaStream_t stream;
2225
+ cudaGraphNode_t *dependencies;
2226
+ size_t numDependencies;
2227
+ unsigned int flags;
2228
+ } cudaStreamUpdateCaptureDependencies_v11030_params;
2229
+
2230
+ typedef struct cudaStreamUpdateCaptureDependencies_v2_v12030_params_st {
2231
+ cudaStream_t stream;
2232
+ cudaGraphNode_t *dependencies;
2233
+ const cudaGraphEdgeData *dependencyData;
2234
+ size_t numDependencies;
2235
+ unsigned int flags;
2236
+ } cudaStreamUpdateCaptureDependencies_v2_v12030_params;
2237
+
2238
+ typedef struct cudaStreamCopyAttributes_v11000_params_st {
2239
+ cudaStream_t dstStream;
2240
+ cudaStream_t srcStream;
2241
+ } cudaStreamCopyAttributes_v11000_params;
2242
+
2243
+ typedef struct cudaStreamGetAttribute_v11000_params_st {
2244
+ cudaStream_t stream;
2245
+ cudaStreamAttrID attr;
2246
+ cudaStreamAttrValue *value;
2247
+ } cudaStreamGetAttribute_v11000_params;
2248
+
2249
+ typedef struct cudaStreamSetAttribute_v11000_params_st {
2250
+ cudaStream_t stream;
2251
+ cudaStreamAttrID attr;
2252
+ const cudaStreamAttrValue *param;
2253
+ } cudaStreamSetAttribute_v11000_params;
2254
+
2255
+ typedef struct cudaMallocAsync_v11020_params_st {
2256
+ void **devPtr;
2257
+ size_t size;
2258
+ cudaStream_t hStream;
2259
+ } cudaMallocAsync_v11020_params;
2260
+
2261
+ typedef struct cudaFreeAsync_v11020_params_st {
2262
+ void *devPtr;
2263
+ cudaStream_t hStream;
2264
+ } cudaFreeAsync_v11020_params;
2265
+
2266
+ typedef struct cudaMallocFromPoolAsync_v11020_params_st {
2267
+ void **ptr;
2268
+ size_t size;
2269
+ cudaMemPool_t memPool;
2270
+ cudaStream_t stream;
2271
+ } cudaMallocFromPoolAsync_v11020_params;
2272
+
2273
+ typedef struct cudaGetDriverEntryPoint_v11030_params_st {
2274
+ const char *symbol;
2275
+ void **funcPtr;
2276
+ unsigned long long flags;
2277
+ enum cudaDriverEntryPointQueryResult *driverStatus;
2278
+ } cudaGetDriverEntryPoint_v11030_params;
2279
+
2280
+ typedef struct cudaGetDeviceProperties_v3020_params_st {
2281
+ struct cudaDeviceProp *prop;
2282
+ int device;
2283
+ } cudaGetDeviceProperties_v3020_params;
2284
+
2285
+ // Parameter trace structures for removed functions
2286
+
2287
+
2288
+ // End of parameter trace structures
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_vdpau_interop_meta.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // CUDA public interface, for type definitions and api function prototypes
4
+ #include "cuda_vdpau_interop.h"
5
+
6
+ // *************************************************************************
7
+ // Definitions of structs to hold parameters for each function
8
+ // *************************************************************************
9
+
10
+ // Currently used parameter trace structures
11
+ typedef struct cudaVDPAUGetDevice_v3020_params_st {
12
+ int *device;
13
+ VdpDevice vdpDevice;
14
+ VdpGetProcAddress *vdpGetProcAddress;
15
+ } cudaVDPAUGetDevice_v3020_params;
16
+
17
+ typedef struct cudaVDPAUSetVDPAUDevice_v3020_params_st {
18
+ int device;
19
+ VdpDevice vdpDevice;
20
+ VdpGetProcAddress *vdpGetProcAddress;
21
+ } cudaVDPAUSetVDPAUDevice_v3020_params;
22
+
23
+ typedef struct cudaGraphicsVDPAURegisterVideoSurface_v3020_params_st {
24
+ struct cudaGraphicsResource **resource;
25
+ VdpVideoSurface vdpSurface;
26
+ unsigned int flags;
27
+ } cudaGraphicsVDPAURegisterVideoSurface_v3020_params;
28
+
29
+ typedef struct cudaGraphicsVDPAURegisterOutputSurface_v3020_params_st {
30
+ struct cudaGraphicsResource **resource;
31
+ VdpOutputSurface vdpSurface;
32
+ unsigned int flags;
33
+ } cudaGraphicsVDPAURegisterOutputSurface_v3020_params;
34
+
35
+ // Parameter trace structures for removed functions
36
+
37
+
38
+ // End of parameter trace structures
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudart_removed_meta.h ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // CUDA public interface, for type definitions and api function prototypes
4
+ #include "cudart_removed.h"
5
+
6
+ // *************************************************************************
7
+ // Definitions of structs to hold parameters for each function
8
+ // *************************************************************************
9
+
10
+ // Currently used parameter trace structures
11
+ typedef struct cudaStreamDestroy_v3020_params_st {
12
+ cudaStream_t stream;
13
+ } cudaStreamDestroy_v3020_params;
14
+
15
+ typedef struct cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000_params_st {
16
+ int *numBlocks;
17
+ const void *func;
18
+ size_t numDynamicSmemBytes;
19
+ } cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000_params;
20
+
21
+ typedef struct cudaConfigureCall_v3020_params_st {
22
+ dim3 gridDim;
23
+ dim3 blockDim;
24
+ size_t sharedMem __dv;
25
+ cudaStream_t stream __dv;
26
+ } cudaConfigureCall_v3020_params;
27
+
28
+ typedef struct cudaSetupArgument_v3020_params_st {
29
+ const void *arg;
30
+ size_t size;
31
+ size_t offset;
32
+ } cudaSetupArgument_v3020_params;
33
+
34
+ typedef struct cudaLaunch_v3020_params_st {
35
+ const void *func;
36
+ } cudaLaunch_v3020_params;
37
+
38
+ typedef struct cudaLaunch_ptsz_v7000_params_st {
39
+ const void *func;
40
+ } cudaLaunch_ptsz_v7000_params;
41
+
42
+ typedef struct cudaStreamSetFlags_v10200_params_st {
43
+ cudaStream_t hStream;
44
+ unsigned int flags;
45
+ } cudaStreamSetFlags_v10200_params;
46
+
47
+ typedef struct cudaStreamSetFlags_ptsz_v10200_params_st {
48
+ cudaStream_t hStream;
49
+ unsigned int flags;
50
+ } cudaStreamSetFlags_ptsz_v10200_params;
51
+
52
+ typedef struct cudaProfilerInitialize_v4000_params_st {
53
+ const char *configFile;
54
+ const char *outputFile;
55
+ cudaOutputMode_t outputMode;
56
+ } cudaProfilerInitialize_v4000_params;
57
+
58
+ typedef struct cudaThreadSetLimit_v3020_params_st {
59
+ enum cudaLimit limit;
60
+ size_t value;
61
+ } cudaThreadSetLimit_v3020_params;
62
+
63
+ typedef struct cudaThreadGetLimit_v3020_params_st {
64
+ size_t *pValue;
65
+ enum cudaLimit limit;
66
+ } cudaThreadGetLimit_v3020_params;
67
+
68
+ typedef struct cudaThreadGetCacheConfig_v3020_params_st {
69
+ enum cudaFuncCache *pCacheConfig;
70
+ } cudaThreadGetCacheConfig_v3020_params;
71
+
72
+ typedef struct cudaThreadSetCacheConfig_v3020_params_st {
73
+ enum cudaFuncCache cacheConfig;
74
+ } cudaThreadSetCacheConfig_v3020_params;
75
+
76
+ typedef struct cudaSetDoubleForDevice_v3020_params_st {
77
+ double *d;
78
+ } cudaSetDoubleForDevice_v3020_params;
79
+
80
+ typedef struct cudaSetDoubleForHost_v3020_params_st {
81
+ double *d;
82
+ } cudaSetDoubleForHost_v3020_params;
83
+
84
+ typedef struct cudaCreateTextureObject_v2_v11080_params_st {
85
+ cudaTextureObject_t *pTexObject;
86
+ const struct cudaResourceDesc *pResDesc;
87
+ const struct cudaTextureDesc *pTexDesc;
88
+ const struct cudaResourceViewDesc *pResViewDesc;
89
+ } cudaCreateTextureObject_v2_v11080_params;
90
+
91
+ typedef struct cudaGetTextureObjectTextureDesc_v2_v11080_params_st {
92
+ struct cudaTextureDesc *pTexDesc;
93
+ cudaTextureObject_t texObject;
94
+ } cudaGetTextureObjectTextureDesc_v2_v11080_params;
95
+
96
+ typedef struct cudaBindTexture_v3020_params_st {
97
+ size_t *offset;
98
+ const struct textureReference *texref;
99
+ const void *devPtr;
100
+ const struct cudaChannelFormatDesc *desc;
101
+ size_t size __dv;
102
+ } cudaBindTexture_v3020_params;
103
+
104
+ typedef struct cudaBindTexture2D_v3020_params_st {
105
+ size_t *offset;
106
+ const struct textureReference *texref;
107
+ const void *devPtr;
108
+ const struct cudaChannelFormatDesc *desc;
109
+ size_t width;
110
+ size_t height;
111
+ size_t pitch;
112
+ } cudaBindTexture2D_v3020_params;
113
+
114
+ typedef struct cudaBindTextureToArray_v3020_params_st {
115
+ const struct textureReference *texref;
116
+ cudaArray_const_t array;
117
+ const struct cudaChannelFormatDesc *desc;
118
+ } cudaBindTextureToArray_v3020_params;
119
+
120
+ typedef struct cudaBindTextureToMipmappedArray_v5000_params_st {
121
+ const struct textureReference *texref;
122
+ cudaMipmappedArray_const_t mipmappedArray;
123
+ const struct cudaChannelFormatDesc *desc;
124
+ } cudaBindTextureToMipmappedArray_v5000_params;
125
+
126
+ typedef struct cudaUnbindTexture_v3020_params_st {
127
+ const struct textureReference *texref;
128
+ } cudaUnbindTexture_v3020_params;
129
+
130
+ typedef struct cudaGetTextureAlignmentOffset_v3020_params_st {
131
+ size_t *offset;
132
+ const struct textureReference *texref;
133
+ } cudaGetTextureAlignmentOffset_v3020_params;
134
+
135
+ typedef struct cudaGetTextureReference_v3020_params_st {
136
+ const struct textureReference **texref;
137
+ const void *symbol;
138
+ } cudaGetTextureReference_v3020_params;
139
+
140
+ typedef struct cudaBindSurfaceToArray_v3020_params_st {
141
+ const struct surfaceReference *surfref;
142
+ cudaArray_const_t array;
143
+ const struct cudaChannelFormatDesc *desc;
144
+ } cudaBindSurfaceToArray_v3020_params;
145
+
146
+ typedef struct cudaGetSurfaceReference_v3020_params_st {
147
+ const struct surfaceReference **surfref;
148
+ const void *symbol;
149
+ } cudaGetSurfaceReference_v3020_params;
150
+
151
+ typedef struct cudaGraphInstantiate_v10000_params_st {
152
+ cudaGraphExec_t *pGraphExec;
153
+ cudaGraph_t graph;
154
+ cudaGraphNode_t *pErrorNode;
155
+ char *pLogBuffer;
156
+ size_t bufferSize;
157
+ } cudaGraphInstantiate_v10000_params;
158
+
159
+ // Parameter trace structures for removed functions
160
+
161
+
162
+ // End of parameter trace structures
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_nvtx_meta.h ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2013-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
51
+ #pragma GCC visibility push(default)
52
+ #endif
53
+
54
+ // *************************************************************************
55
+ // Definitions of structs to hold parameters for each function
56
+ // *************************************************************************
57
+
58
+ typedef struct nvtxMarkEx_params_st {
59
+ const nvtxEventAttributes_t* eventAttrib;
60
+ } nvtxMarkEx_params;
61
+
62
+ typedef struct nvtxMarkA_params_st {
63
+ const char* message;
64
+ } nvtxMarkA_params;
65
+
66
+ typedef struct nvtxMarkW_params_st {
67
+ const wchar_t* message;
68
+ } nvtxMarkW_params;
69
+
70
+ typedef struct nvtxRangeStartEx_params_st {
71
+ const nvtxEventAttributes_t* eventAttrib;
72
+ } nvtxRangeStartEx_params;
73
+
74
+ typedef struct nvtxRangeStartA_params_st {
75
+ const char* message;
76
+ } nvtxRangeStartA_params;
77
+
78
+ typedef struct nvtxRangeStartW_params_st {
79
+ const wchar_t* message;
80
+ } nvtxRangeStartW_params;
81
+
82
+ typedef struct nvtxRangeEnd_params_st {
83
+ nvtxRangeId_t id;
84
+ } nvtxRangeEnd_params;
85
+
86
+ typedef struct nvtxRangePushEx_params_st {
87
+ const nvtxEventAttributes_t* eventAttrib;
88
+ } nvtxRangePushEx_params;
89
+
90
+ typedef struct nvtxRangePushA_params_st {
91
+ const char* message;
92
+ } nvtxRangePushA_params;
93
+
94
+ typedef struct nvtxRangePushW_params_st {
95
+ const wchar_t* message;
96
+ } nvtxRangePushW_params;
97
+
98
+ typedef struct nvtxRangePop_params_st {
99
+ /* WAR: Windows compiler doesn't allow empty structs */
100
+ /* This field shouldn't be used */
101
+ void *dummy;
102
+ } nvtxRangePop_params;
103
+
104
+ typedef struct nvtxNameCategoryA_params_st {
105
+ uint32_t category;
106
+ const char* name;
107
+ } nvtxNameCategoryA_params;
108
+
109
+ typedef struct nvtxNameCategoryW_params_st {
110
+ uint32_t category;
111
+ const wchar_t* name;
112
+ } nvtxNameCategoryW_params;
113
+
114
+ typedef struct nvtxNameOsThreadA_params_st {
115
+ uint32_t threadId;
116
+ const char* name;
117
+ } nvtxNameOsThreadA_params;
118
+
119
+ typedef struct nvtxNameOsThreadW_params_st {
120
+ uint32_t threadId;
121
+ const wchar_t* name;
122
+ } nvtxNameOsThreadW_params;
123
+
124
+ typedef struct nvtxNameCuDeviceA_params_st {
125
+ CUdevice device;
126
+ const char* name;
127
+ } nvtxNameCuDeviceA_params;
128
+
129
+ typedef struct nvtxNameCuDeviceW_params_st {
130
+ CUdevice device;
131
+ const wchar_t* name;
132
+ } nvtxNameCuDeviceW_params;
133
+
134
+ typedef struct nvtxNameCuContextA_params_st {
135
+ CUcontext context;
136
+ const char* name;
137
+ } nvtxNameCuContextA_params;
138
+
139
+ typedef struct nvtxNameCuContextW_params_st {
140
+ CUcontext context;
141
+ const wchar_t* name;
142
+ } nvtxNameCuContextW_params;
143
+
144
+ typedef struct nvtxNameCuStreamA_params_st {
145
+ CUstream stream;
146
+ const char* name;
147
+ } nvtxNameCuStreamA_params;
148
+
149
+ typedef struct nvtxNameCuStreamW_params_st {
150
+ CUstream stream;
151
+ const wchar_t* name;
152
+ } nvtxNameCuStreamW_params;
153
+
154
+ typedef struct nvtxNameCuEventA_params_st {
155
+ CUevent event;
156
+ const char* name;
157
+ } nvtxNameCuEventA_params;
158
+
159
+ typedef struct nvtxNameCuEventW_params_st {
160
+ CUevent event;
161
+ const wchar_t* name;
162
+ } nvtxNameCuEventW_params;
163
+
164
+ typedef struct nvtxNameCudaDeviceA_params_st {
165
+ int device;
166
+ const char* name;
167
+ } nvtxNameCudaDeviceA_params;
168
+
169
+ typedef struct nvtxNameCudaDeviceW_params_st {
170
+ int device;
171
+ const wchar_t* name;
172
+ } nvtxNameCudaDeviceW_params;
173
+
174
+ typedef struct nvtxNameCudaStreamA_params_st {
175
+ cudaStream_t stream;
176
+ const char* name;
177
+ } nvtxNameCudaStreamA_params;
178
+
179
+ typedef struct nvtxNameCudaStreamW_params_st {
180
+ cudaStream_t stream;
181
+ const wchar_t* name;
182
+ } nvtxNameCudaStreamW_params;
183
+
184
+ typedef struct nvtxNameCudaEventA_params_st {
185
+ cudaEvent_t event;
186
+ const char* name;
187
+ } nvtxNameCudaEventA_params;
188
+
189
+ typedef struct nvtxNameCudaEventW_params_st {
190
+ cudaEvent_t event;
191
+ const wchar_t* name;
192
+ } nvtxNameCudaEventW_params;
193
+
194
+ typedef struct nvtxDomainCreateA_params_st {
195
+ const char* name;
196
+ } nvtxDomainCreateA_params;
197
+
198
+ typedef struct nvtxDomainDestroy_params_st {
199
+ nvtxDomainHandle_t domain;
200
+ } nvtxDomainDestroy_params;
201
+
202
+ typedef struct nvtxDomainMarkEx_params_st {
203
+ nvtxDomainHandle_t domain;
204
+ nvtxMarkEx_params core;
205
+ } nvtxDomainMarkEx_params;
206
+
207
+ typedef struct nvtxDomainRangeStartEx_params_st {
208
+ nvtxDomainHandle_t domain;
209
+ nvtxRangeStartEx_params core;
210
+ } nvtxDomainRangeStartEx_params;
211
+
212
+ typedef struct nvtxDomainRangeEnd_params_st {
213
+ nvtxDomainHandle_t domain;
214
+ nvtxRangeEnd_params core;
215
+ } nvtxDomainRangeEnd_params;
216
+
217
+ typedef struct nvtxDomainRangePushEx_params_st {
218
+ nvtxDomainHandle_t domain;
219
+ nvtxRangePushEx_params core;
220
+ } nvtxDomainRangePushEx_params;
221
+
222
+ typedef struct nvtxDomainRangePop_params_st {
223
+ nvtxDomainHandle_t domain;
224
+ } nvtxDomainRangePop_params;
225
+
226
+ typedef struct nvtxSyncUserCreate_params_st {
227
+ nvtxDomainHandle_t domain;
228
+ const nvtxSyncUserAttributes_t* attribs;
229
+ } nvtxSyncUserCreate_params;
230
+
231
+ typedef struct nvtxSyncUserCommon_params_st {
232
+ nvtxSyncUser_t handle;
233
+ } nvtxSyncUserCommon_params;
234
+
235
+ typedef struct nvtxDomainRegisterStringA_params_st {
236
+ nvtxDomainHandle_t domain;
237
+ const char* string;
238
+ } nvtxDomainRegisterStringA_params;
239
+
240
+ typedef struct nvtxDomainRegisterStringW_params_st {
241
+ nvtxDomainHandle_t domain;
242
+ const char* string;
243
+ } nvtxDomainRegisterStringW_params;
244
+
245
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
246
+ #pragma GCC visibility pop
247
+ #endif
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_common.h ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NVPERF_COMMON_H
2
+ #define NVPERF_COMMON_H
3
+
4
+ /*
5
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
6
+ *
7
+ * NOTICE TO USER:
8
+ *
9
+ * This source code is subject to NVIDIA ownership rights under U.S. and
10
+ * international Copyright laws.
11
+ *
12
+ * This software and the information contained herein is PROPRIETARY and
13
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
14
+ * of a form of NVIDIA software license agreement.
15
+ *
16
+ * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
17
+ * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
18
+ * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
19
+ * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
20
+ * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
21
+ * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
22
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
23
+ * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
24
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
25
+ * OR PERFORMANCE OF THIS SOURCE CODE.
26
+ *
27
+ * U.S. Government End Users. This source code is a "commercial item" as
28
+ * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
29
+ * "commercial computer software" and "commercial computer software
30
+ * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
31
+ * and is provided to the U.S. Government only as a commercial end item.
32
+ * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
33
+ * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
34
+ * source code with only those rights set forth herein.
35
+ *
36
+ * Any use of this source code in individual and commercial software must
37
+ * include, in the user documentation and internal comments to the code,
38
+ * the above Disclaimer and U.S. Government End Users Notice.
39
+ */
40
+
41
+ #include <stddef.h>
42
+ #include <stdint.h>
43
+
44
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
45
+ #pragma GCC visibility push(default)
46
+ #if !defined(NVPW_LOCAL)
47
+ #define NVPW_LOCAL __attribute__ ((visibility ("hidden")))
48
+ #endif
49
+ #else
50
+ #if !defined(NVPW_LOCAL)
51
+ #define NVPW_LOCAL
52
+ #endif
53
+ #endif
54
+
55
+ #ifdef __cplusplus
56
+ extern "C" {
57
+ #endif
58
+
59
+ /**
60
+ * @file nvperf_common.h
61
+ */
62
+
63
+ #ifndef NVPERF_NVPA_STATUS_DEFINED
64
+ #define NVPERF_NVPA_STATUS_DEFINED
65
+
66
+ /// Error codes.
67
+ typedef enum NVPA_Status
68
+ {
69
+ /// Success
70
+ NVPA_STATUS_SUCCESS = 0,
71
+ /// Generic error.
72
+ NVPA_STATUS_ERROR = 1,
73
+ /// Internal error. Please file a bug!
74
+ NVPA_STATUS_INTERNAL_ERROR = 2,
75
+ /// NVPW_InitializeTarget() or NVPW_InitializeHost() has not been called yet.
76
+ NVPA_STATUS_NOT_INITIALIZED = 3,
77
+ /// The NvPerf DLL/DSO could not be loaded during NVPW_Initialize*(). Please ensure they are placed in the
78
+ /// appropriate location that can be founder by a dynamic linker. And on Linux systems, confirm that the
79
+ /// LD_LIBRARY_PATH environment variable is set correctly. Alternatively, you may utilize
80
+ /// NVPW_SetLibraryLoadPaths() to define additional library search paths.
81
+ NVPA_STATUS_NOT_LOADED = 4,
82
+ /// The function was not found in this version of the NvPerf DLL/DSO. Or if you are directly calling
83
+ /// NVPA_GetProcAddress(), please ensure the function name is spelled correctly.
84
+ NVPA_STATUS_FUNCTION_NOT_FOUND = 5,
85
+ /// The request was intentionally not supported.
86
+ NVPA_STATUS_NOT_SUPPORTED = 6,
87
+ /// The request was not implemented by this version.
88
+ NVPA_STATUS_NOT_IMPLEMENTED = 7,
89
+ /// Invalid argument.
90
+ NVPA_STATUS_INVALID_ARGUMENT = 8,
91
+ /// UNUSED
92
+ NVPA_STATUS_INVALID_METRIC_ID = 9,
93
+ /// No driver has been loaded via NVPW_*_LoadDriver().
94
+ NVPA_STATUS_DRIVER_NOT_LOADED = 10,
95
+ /// Failed memory allocation.
96
+ NVPA_STATUS_OUT_OF_MEMORY = 11,
97
+ /// UNUSED
98
+ NVPA_STATUS_INVALID_THREAD_STATE = 12,
99
+ /// UNUSED
100
+ NVPA_STATUS_FAILED_CONTEXT_ALLOC = 13,
101
+ /// The specified GPU is not supported. It is recommended to call IsGpuSupported() for more information
102
+ NVPA_STATUS_UNSUPPORTED_GPU = 14,
103
+ /// The installed NVIDIA driver is too old.
104
+ NVPA_STATUS_INSUFFICIENT_DRIVER_VERSION = 15,
105
+ /// UNUSED
106
+ NVPA_STATUS_OBJECT_NOT_REGISTERED = 16,
107
+ /// Profiling permission not granted; see https://developer.nvidia.com/nvidia-development-tools-solutions-
108
+ /// ERR_NVGPUCTRPERM-permission-issue-performance-counters
109
+ NVPA_STATUS_INSUFFICIENT_PRIVILEGE = 17,
110
+ /// UNUSED
111
+ NVPA_STATUS_INVALID_CONTEXT_STATE = 18,
112
+ /// UNUSED
113
+ NVPA_STATUS_INVALID_OBJECT_STATE = 19,
114
+ /// The request could not be fulfilled because a system resource is already in use.
115
+ NVPA_STATUS_RESOURCE_UNAVAILABLE = 20,
116
+ /// UNUSED
117
+ NVPA_STATUS_DRIVER_LOADED_TOO_LATE = 21,
118
+ /// The provided buffer is not large enough.
119
+ NVPA_STATUS_INSUFFICIENT_SPACE = 22,
120
+ /// UNUSED
121
+ NVPA_STATUS_OBJECT_MISMATCH = 23,
122
+ /// Virtualized GPU (vGPU) is not supported.
123
+ NVPA_STATUS_VIRTUALIZED_DEVICE_NOT_SUPPORTED = 24,
124
+ /// Profiling permission was not granted or the device was disabled.
125
+ NVPA_STATUS_PROFILING_NOT_ALLOWED = 25,
126
+ NVPA_STATUS__COUNT
127
+ } NVPA_Status;
128
+
129
+
130
+ inline void NVPW_NVPAStatusToString(NVPA_Status status, const char** ppStatusStr, const char** ppCommentStr)
131
+ {
132
+ switch (status)
133
+ {
134
+ case NVPA_STATUS_SUCCESS:
135
+ *ppStatusStr = "NVPA_STATUS_SUCCESS";
136
+ *ppCommentStr = "Success";
137
+ return;
138
+ case NVPA_STATUS_ERROR:
139
+ *ppStatusStr = "NVPA_STATUS_ERROR";
140
+ *ppCommentStr = "Generic error.";
141
+ return;
142
+ case NVPA_STATUS_INTERNAL_ERROR:
143
+ *ppStatusStr = "NVPA_STATUS_INTERNAL_ERROR";
144
+ *ppCommentStr = "Internal error. Please file a bug!";
145
+ return;
146
+ case NVPA_STATUS_NOT_INITIALIZED:
147
+ *ppStatusStr = "NVPA_STATUS_NOT_INITIALIZED";
148
+ *ppCommentStr = "NVPW_InitializeTarget() or NVPW_InitializeHost() has not been called yet.";
149
+ return;
150
+ case NVPA_STATUS_NOT_LOADED:
151
+ *ppStatusStr = "NVPA_STATUS_NOT_LOADED";
152
+ *ppCommentStr = "The NvPerf DLL/DSO could not be loaded during NVPW_Initialize*(). Please ensure they are placed in the appropriate location that can be founder by a dynamic linker. And on Linux systems, confirm that the LD_LIBRARY_PATH environment variable is set correctly. Alternatively, you may utilize NVPW_SetLibraryLoadPaths() to define additional library search paths.";
153
+ return;
154
+ case NVPA_STATUS_FUNCTION_NOT_FOUND:
155
+ *ppStatusStr = "NVPA_STATUS_FUNCTION_NOT_FOUND";
156
+ *ppCommentStr = "The function was not found in this version of the NvPerf DLL/DSO. Or if you are directly calling NVPA_GetProcAddress(), please ensure the function name is spelled correctly.";
157
+ return;
158
+ case NVPA_STATUS_NOT_SUPPORTED:
159
+ *ppStatusStr = "NVPA_STATUS_NOT_SUPPORTED";
160
+ *ppCommentStr = "The request was intentionally not supported.";
161
+ return;
162
+ case NVPA_STATUS_NOT_IMPLEMENTED:
163
+ *ppStatusStr = "NVPA_STATUS_NOT_IMPLEMENTED";
164
+ *ppCommentStr = "The request was not implemented by this version.";
165
+ return;
166
+ case NVPA_STATUS_INVALID_ARGUMENT:
167
+ *ppStatusStr = "NVPA_STATUS_INVALID_ARGUMENT";
168
+ *ppCommentStr = "Invalid argument.";
169
+ return;
170
+ case NVPA_STATUS_INVALID_METRIC_ID:
171
+ *ppStatusStr = "NVPA_STATUS_INVALID_METRIC_ID";
172
+ *ppCommentStr = "UNUSED";
173
+ return;
174
+ case NVPA_STATUS_DRIVER_NOT_LOADED:
175
+ *ppStatusStr = "NVPA_STATUS_DRIVER_NOT_LOADED";
176
+ *ppCommentStr = "No driver has been loaded via NVPW_*_LoadDriver().";
177
+ return;
178
+ case NVPA_STATUS_OUT_OF_MEMORY:
179
+ *ppStatusStr = "NVPA_STATUS_OUT_OF_MEMORY";
180
+ *ppCommentStr = "Failed memory allocation.";
181
+ return;
182
+ case NVPA_STATUS_INVALID_THREAD_STATE:
183
+ *ppStatusStr = "NVPA_STATUS_INVALID_THREAD_STATE";
184
+ *ppCommentStr = "UNUSED";
185
+ return;
186
+ case NVPA_STATUS_FAILED_CONTEXT_ALLOC:
187
+ *ppStatusStr = "NVPA_STATUS_FAILED_CONTEXT_ALLOC";
188
+ *ppCommentStr = "UNUSED";
189
+ return;
190
+ case NVPA_STATUS_UNSUPPORTED_GPU:
191
+ *ppStatusStr = "NVPA_STATUS_UNSUPPORTED_GPU";
192
+ *ppCommentStr = "The specified GPU is not supported. It is recommended to call IsGpuSupported() for more information";
193
+ return;
194
+ case NVPA_STATUS_INSUFFICIENT_DRIVER_VERSION:
195
+ *ppStatusStr = "NVPA_STATUS_INSUFFICIENT_DRIVER_VERSION";
196
+ *ppCommentStr = "The installed NVIDIA driver is too old.";
197
+ return;
198
+ case NVPA_STATUS_OBJECT_NOT_REGISTERED:
199
+ *ppStatusStr = "NVPA_STATUS_OBJECT_NOT_REGISTERED";
200
+ *ppCommentStr = "UNUSED";
201
+ return;
202
+ case NVPA_STATUS_INSUFFICIENT_PRIVILEGE:
203
+ *ppStatusStr = "NVPA_STATUS_INSUFFICIENT_PRIVILEGE";
204
+ *ppCommentStr = "Profiling permission not granted; see https://developer.nvidia.com/nvidia-development-tools-solutions-ERR_NVGPUCTRPERM-permission-issue-performance-counters";
205
+ return;
206
+ case NVPA_STATUS_INVALID_CONTEXT_STATE:
207
+ *ppStatusStr = "NVPA_STATUS_INVALID_CONTEXT_STATE";
208
+ *ppCommentStr = "UNUSED";
209
+ return;
210
+ case NVPA_STATUS_INVALID_OBJECT_STATE:
211
+ *ppStatusStr = "NVPA_STATUS_INVALID_OBJECT_STATE";
212
+ *ppCommentStr = "UNUSED";
213
+ return;
214
+ case NVPA_STATUS_RESOURCE_UNAVAILABLE:
215
+ *ppStatusStr = "NVPA_STATUS_RESOURCE_UNAVAILABLE";
216
+ *ppCommentStr = "The request could not be fulfilled because a system resource is already in use.";
217
+ return;
218
+ case NVPA_STATUS_DRIVER_LOADED_TOO_LATE:
219
+ *ppStatusStr = "NVPA_STATUS_DRIVER_LOADED_TOO_LATE";
220
+ *ppCommentStr = "UNUSED";
221
+ return;
222
+ case NVPA_STATUS_INSUFFICIENT_SPACE:
223
+ *ppStatusStr = "NVPA_STATUS_INSUFFICIENT_SPACE";
224
+ *ppCommentStr = "The provided buffer is not large enough.";
225
+ return;
226
+ case NVPA_STATUS_OBJECT_MISMATCH:
227
+ *ppStatusStr = "NVPA_STATUS_OBJECT_MISMATCH";
228
+ *ppCommentStr = "UNUSED";
229
+ return;
230
+ case NVPA_STATUS_VIRTUALIZED_DEVICE_NOT_SUPPORTED:
231
+ *ppStatusStr = "NVPA_STATUS_VIRTUALIZED_DEVICE_NOT_SUPPORTED";
232
+ *ppCommentStr = "Virtualized GPU (vGPU) is not supported.";
233
+ return;
234
+ case NVPA_STATUS_PROFILING_NOT_ALLOWED:
235
+ *ppStatusStr = "NVPA_STATUS_PROFILING_NOT_ALLOWED";
236
+ *ppCommentStr = "Profiling permission was not granted or the device was disabled.";
237
+ return;
238
+ default:
239
+ *ppStatusStr = "Unrecognized status";
240
+ *ppCommentStr = "This status is unrecognized. Is it coming from a newer version of NvPerf library?";
241
+ return;
242
+ }
243
+ }
244
+
245
+
246
+ #endif // NVPERF_NVPA_STATUS_DEFINED
247
+
248
+
249
+ #ifndef NVPERF_NVPA_ACTIVITY_KIND_DEFINED
250
+ #define NVPERF_NVPA_ACTIVITY_KIND_DEFINED
251
+
252
+ /// The configuration's activity-kind dictates which types of data may be collected.
253
+ typedef enum NVPA_ActivityKind
254
+ {
255
+ /// Invalid value.
256
+ NVPA_ACTIVITY_KIND_INVALID = 0,
257
+ /// A workload-centric activity for serialized and pipelined collection.
258
+ ///
259
+ /// Profiler is capable of collecting both serialized and pipelined metrics. The library introduces any
260
+ /// synchronization required to collect serialized metrics.
261
+ NVPA_ACTIVITY_KIND_PROFILER,
262
+ /// A realtime activity for sampling counters from the CPU or GPU.
263
+ NVPA_ACTIVITY_KIND_REALTIME_SAMPLED,
264
+ /// A realtime activity for profiling counters from the CPU or GPU without CPU/GPU synchronizations.
265
+ NVPA_ACTIVITY_KIND_REALTIME_PROFILER,
266
+ NVPA_ACTIVITY_KIND__COUNT
267
+ } NVPA_ActivityKind;
268
+
269
+
270
+ #endif // NVPERF_NVPA_ACTIVITY_KIND_DEFINED
271
+
272
+
273
+ #ifndef NVPERF_NVPA_BOOL_DEFINED
274
+ #define NVPERF_NVPA_BOOL_DEFINED
275
+ /// The type used for boolean values.
276
+ typedef uint8_t NVPA_Bool;
277
+ #endif // NVPERF_NVPA_BOOL_DEFINED
278
+
279
+ #ifndef NVPA_STRUCT_SIZE
280
+ #define NVPA_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
281
+ #endif // NVPA_STRUCT_SIZE
282
+
283
+ #ifndef NVPW_FIELD_EXISTS
284
+ #define NVPW_FIELD_EXISTS(pParams_, name_) \
285
+ ((pParams_)->structSize >= (size_t)((const uint8_t*)(&(pParams_)->name_) + sizeof(pParams_)->name_ - (const uint8_t*)(pParams_)))
286
+ #endif // NVPW_FIELD_EXISTS
287
+
288
+
289
+ #ifndef NVPERF_NVPA_GETPROCADDRESS_DEFINED
290
+ #define NVPERF_NVPA_GETPROCADDRESS_DEFINED
291
+
292
+ typedef NVPA_Status (*NVPA_GenericFn)(void);
293
+
294
+
295
+ ///
296
+ /// Gets the address of an NvPerf API function.
297
+ ///
298
+ /// \return A function pointer to the function, or NULL if the function is not available.
299
+ ///
300
+ /// \param pFunctionName [in] Name of the function to retrieve.
301
+ NVPA_GenericFn NVPA_GetProcAddress(const char* pFunctionName);
302
+
303
+ #endif
304
+
305
+ #ifndef NVPERF_NVPW_SETLIBRARYLOADPATHS_DEFINED
306
+ #define NVPERF_NVPW_SETLIBRARYLOADPATHS_DEFINED
307
+
308
+
309
+ typedef struct NVPW_SetLibraryLoadPaths_Params
310
+ {
311
+ /// [in]
312
+ size_t structSize;
313
+ /// [in] assign to NULL
314
+ void* pPriv;
315
+ /// [in] number of paths in ppPaths
316
+ size_t numPaths;
317
+ /// [in] array of null-terminated paths
318
+ const char** ppPaths;
319
+ } NVPW_SetLibraryLoadPaths_Params;
320
+ #define NVPW_SetLibraryLoadPaths_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_SetLibraryLoadPaths_Params, ppPaths)
321
+
322
+ /// Sets library search path for \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget().
323
+ /// \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget load the NvPerf DLL/DSO. This function sets
324
+ /// ordered paths that will be searched with the LoadLibrary() or dlopen() call.
325
+ /// If load paths are set by this function, the default set of load paths
326
+ /// will not be attempted.
327
+ /// Each path must point at a directory (not a file name).
328
+ /// This function is not thread-safe.
329
+ /// Example Usage:
330
+ /// \code
331
+ /// const char* paths[] = {
332
+ /// "path1", "path2", etc
333
+ /// };
334
+ /// NVPW_SetLibraryLoadPaths_Params params{NVPW_SetLibraryLoadPaths_Params_STRUCT_SIZE};
335
+ /// params.numPaths = sizeof(paths)/sizeof(paths[0]);
336
+ /// params.ppPaths = paths;
337
+ /// NVPW_SetLibraryLoadPaths(&params);
338
+ /// NVPW_InitializeHost();
339
+ /// params.numPaths = 0;
340
+ /// params.ppPaths = NULL;
341
+ /// NVPW_SetLibraryLoadPaths(&params);
342
+ /// \endcode
343
+ NVPA_Status NVPW_SetLibraryLoadPaths(NVPW_SetLibraryLoadPaths_Params* pParams);
344
+
345
+ typedef struct NVPW_SetLibraryLoadPathsW_Params
346
+ {
347
+ /// [in]
348
+ size_t structSize;
349
+ /// [in] assign to NULL
350
+ void* pPriv;
351
+ /// [in] number of paths in ppwPaths
352
+ size_t numPaths;
353
+ /// [in] array of null-terminated paths
354
+ const wchar_t** ppwPaths;
355
+ } NVPW_SetLibraryLoadPathsW_Params;
356
+ #define NVPW_SetLibraryLoadPathsW_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_SetLibraryLoadPathsW_Params, ppwPaths)
357
+
358
+ /// Sets library search path for \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget().
359
+ /// \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget load the NvPerf DLL/DSO. This function sets
360
+ /// ordered paths that will be searched with the LoadLibrary() or dlopen() call.
361
+ /// If load paths are set by this function, the default set of load paths
362
+ /// will not be attempted.
363
+ /// Each path must point at a directory (not a file name).
364
+ /// This function is not thread-safe.
365
+ /// Example Usage:
366
+ /// \code
367
+ /// const wchar_t* wpaths[] = {
368
+ /// L"path1", L"path2", etc
369
+ /// };
370
+ /// NVPW_SetLibraryLoadPathsW_Params params{NVPW_SetLibraryLoadPathsW_Params_STRUCT_SIZE};
371
+ /// params.numPaths = sizeof(wpaths)/sizeof(wpaths[0]);
372
+ /// params.ppwPaths = wpaths;
373
+ /// NVPW_SetLibraryLoadPathsW(&params);
374
+ /// NVPW_InitializeHost();
375
+ /// params.numPaths = 0;
376
+ /// params.ppwPaths = NULL;
377
+ /// NVPW_SetLibraryLoadPathsW(&params);
378
+ /// \endcode
379
+ NVPA_Status NVPW_SetLibraryLoadPathsW(NVPW_SetLibraryLoadPathsW_Params* pParams);
380
+
381
+ #endif
382
+
383
+
384
+
385
+ #ifdef __cplusplus
386
+ } // extern "C"
387
+ #endif
388
+
389
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
390
+ #pragma GCC visibility pop
391
+ #endif
392
+
393
+ #endif // NVPERF_COMMON_H
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_cuda_host.h ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NVPERF_CUDA_HOST_H
2
+ #define NVPERF_CUDA_HOST_H
3
+
4
+ /*
5
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
6
+ *
7
+ * NOTICE TO USER:
8
+ *
9
+ * This source code is subject to NVIDIA ownership rights under U.S. and
10
+ * international Copyright laws.
11
+ *
12
+ * This software and the information contained herein is PROPRIETARY and
13
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
14
+ * of a form of NVIDIA software license agreement.
15
+ *
16
+ * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
17
+ * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
18
+ * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
19
+ * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
20
+ * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
21
+ * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
22
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
23
+ * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
24
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
25
+ * OR PERFORMANCE OF THIS SOURCE CODE.
26
+ *
27
+ * U.S. Government End Users. This source code is a "commercial item" as
28
+ * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
29
+ * "commercial computer software" and "commercial computer software
30
+ * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
31
+ * and is provided to the U.S. Government only as a commercial end item.
32
+ * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
33
+ * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
34
+ * source code with only those rights set forth herein.
35
+ *
36
+ * Any use of this source code in individual and commercial software must
37
+ * include, in the user documentation and internal comments to the code,
38
+ * the above Disclaimer and U.S. Government End Users Notice.
39
+ */
40
+
41
+ #include <stddef.h>
42
+ #include <stdint.h>
43
+ #include "nvperf_common.h"
44
+ #include "nvperf_host.h"
45
+
46
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
47
+ #pragma GCC visibility push(default)
48
+ #if !defined(NVPW_LOCAL)
49
+ #define NVPW_LOCAL __attribute__ ((visibility ("hidden")))
50
+ #endif
51
+ #else
52
+ #if !defined(NVPW_LOCAL)
53
+ #define NVPW_LOCAL
54
+ #endif
55
+ #endif
56
+
57
+ #ifdef __cplusplus
58
+ extern "C" {
59
+ #endif
60
+
61
+ /**
62
+ * @file nvperf_cuda_host.h
63
+ */
64
+
65
+ /// 'NVPA_MetricsContext' and its APIs are deprecated, please use 'NVPW_MetricsEvaluator' and its APIs instead.
66
+ typedef struct NVPA_MetricsContext NVPA_MetricsContext;
67
+
68
+ typedef struct NVPW_CUDA_MetricsContext_Create_Params
69
+ {
70
+ /// [in]
71
+ size_t structSize;
72
+ /// [in] assign to NULL
73
+ void* pPriv;
74
+ /// [in]
75
+ const char* pChipName;
76
+ /// [out]
77
+ struct NVPA_MetricsContext* pMetricsContext;
78
+ } NVPW_CUDA_MetricsContext_Create_Params;
79
+ #define NVPW_CUDA_MetricsContext_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_MetricsContext_Create_Params, pMetricsContext)
80
+
81
+ NVPA_Status NVPW_CUDA_MetricsContext_Create(NVPW_CUDA_MetricsContext_Create_Params* pParams);
82
+
83
+ typedef struct NVPW_CUDA_RawMetricsConfig_Create_Params
84
+ {
85
+ /// [in]
86
+ size_t structSize;
87
+ /// [in] assign to NULL
88
+ void* pPriv;
89
+ /// [in]
90
+ NVPA_ActivityKind activityKind;
91
+ /// [in]
92
+ const char* pChipName;
93
+ /// [out] new NVPA_RawMetricsConfig object
94
+ struct NVPA_RawMetricsConfig* pRawMetricsConfig;
95
+ } NVPW_CUDA_RawMetricsConfig_Create_Params;
96
+ #define NVPW_CUDA_RawMetricsConfig_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_RawMetricsConfig_Create_Params, pRawMetricsConfig)
97
+
98
+ NVPA_Status NVPW_CUDA_RawMetricsConfig_Create(NVPW_CUDA_RawMetricsConfig_Create_Params* pParams);
99
+
100
+ typedef struct NVPW_CUDA_RawMetricsConfig_Create_V2_Params
101
+ {
102
+ /// [in]
103
+ size_t structSize;
104
+ /// [in] assign to NULL
105
+ void* pPriv;
106
+ /// [in]
107
+ NVPA_ActivityKind activityKind;
108
+ /// [in] accepted for chips supported at the time-of-release.
109
+ const char* pChipName;
110
+ /// [in] buffer with counter availability image - required for future chip support
111
+ const uint8_t* pCounterAvailabilityImage;
112
+ /// [out] new NVPA_RawMetricsConfig object
113
+ struct NVPA_RawMetricsConfig* pRawMetricsConfig;
114
+ } NVPW_CUDA_RawMetricsConfig_Create_V2_Params;
115
+ #define NVPW_CUDA_RawMetricsConfig_Create_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_RawMetricsConfig_Create_V2_Params, pRawMetricsConfig)
116
+
117
+ /// Use either 'pChipName' or 'pCounterAvailabilityImage'.
118
+ NVPA_Status NVPW_CUDA_RawMetricsConfig_Create_V2(NVPW_CUDA_RawMetricsConfig_Create_V2_Params* pParams);
119
+
120
+ typedef struct NVPW_CUDA_CounterDataBuilder_Create_Params
121
+ {
122
+ /// [in]
123
+ size_t structSize;
124
+ /// [in] assign to NULL
125
+ void* pPriv;
126
+ /// [in] accepted for chips supported at the time-of-release.
127
+ const char* pChipName;
128
+ /// [in] buffer with counter availability image - required for future chip support
129
+ const uint8_t* pCounterAvailabilityImage;
130
+ /// [out] new NVPA_CounterDataBuilder object
131
+ struct NVPA_CounterDataBuilder* pCounterDataBuilder;
132
+ } NVPW_CUDA_CounterDataBuilder_Create_Params;
133
+ #define NVPW_CUDA_CounterDataBuilder_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_CounterDataBuilder_Create_Params, pCounterDataBuilder)
134
+
135
+ /// Use either 'pChipName' or 'pCounterAvailabilityImage'.
136
+ NVPA_Status NVPW_CUDA_CounterDataBuilder_Create(NVPW_CUDA_CounterDataBuilder_Create_Params* pParams);
137
+
138
+ typedef struct NVPW_MetricsEvaluator NVPW_MetricsEvaluator;
139
+
140
+ typedef struct NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params
141
+ {
142
+ /// [in]
143
+ size_t structSize;
144
+ /// [in] assign to NULL
145
+ void* pPriv;
146
+ /// [in] accepted for chips supported at the time-of-release.
147
+ const char* pChipName;
148
+ /// [in] buffer with counter availability image - required for future chip support
149
+ const uint8_t* pCounterAvailabilityImage;
150
+ /// [out]
151
+ size_t scratchBufferSize;
152
+ } NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params;
153
+ #define NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params, scratchBufferSize)
154
+
155
+ /// Use either 'pChipName' or 'pCounterAvailabilityImage'.
156
+ NVPA_Status NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize(NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params* pParams);
157
+
158
+ typedef struct NVPW_CUDA_MetricsEvaluator_Initialize_Params
159
+ {
160
+ /// [in]
161
+ size_t structSize;
162
+ /// [in] assign to NULL
163
+ void* pPriv;
164
+ /// [in]
165
+ uint8_t* pScratchBuffer;
166
+ /// [in] the size of the 'pScratchBuffer' array, should be at least the size of the 'scratchBufferSize' returned
167
+ /// by 'NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize'
168
+ size_t scratchBufferSize;
169
+ /// [in] accepted for chips supported at the time-of-release.
170
+ const char* pChipName;
171
+ /// [in] buffer with counter availability image - required for future chip support
172
+ const uint8_t* pCounterAvailabilityImage;
173
+ /// [in]
174
+ const uint8_t* pCounterDataImage;
175
+ /// [in] must be provided if 'pCounterDataImage' is not NULL
176
+ size_t counterDataImageSize;
177
+ /// [out]
178
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
179
+ } NVPW_CUDA_MetricsEvaluator_Initialize_Params;
180
+ #define NVPW_CUDA_MetricsEvaluator_Initialize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_MetricsEvaluator_Initialize_Params, pMetricsEvaluator)
181
+
182
+ /// Use one of 'pChipName', 'pCounterAvailabilityImage', or 'pCounterDataImage'. 'pChipName' or
183
+ /// 'pCounterAvailabilityImage' will create a metrics evaluator based on a virtual device while 'pCounterDataImage'
184
+ /// will create a metrics evaluator based on the actual device.
185
+ NVPA_Status NVPW_CUDA_MetricsEvaluator_Initialize(NVPW_CUDA_MetricsEvaluator_Initialize_Params* pParams);
186
+
187
+
188
+
189
+ #ifdef __cplusplus
190
+ } // extern "C"
191
+ #endif
192
+
193
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
194
+ #pragma GCC visibility pop
195
+ #endif
196
+
197
+ #endif // NVPERF_CUDA_HOST_H
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_host.h ADDED
@@ -0,0 +1,1578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NVPERF_HOST_H
2
+ #define NVPERF_HOST_H
3
+
4
+ /*
5
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
6
+ *
7
+ * NOTICE TO USER:
8
+ *
9
+ * This source code is subject to NVIDIA ownership rights under U.S. and
10
+ * international Copyright laws.
11
+ *
12
+ * This software and the information contained herein is PROPRIETARY and
13
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
14
+ * of a form of NVIDIA software license agreement.
15
+ *
16
+ * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
17
+ * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
18
+ * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
19
+ * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
20
+ * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
21
+ * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
22
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
23
+ * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
24
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
25
+ * OR PERFORMANCE OF THIS SOURCE CODE.
26
+ *
27
+ * U.S. Government End Users. This source code is a "commercial item" as
28
+ * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
29
+ * "commercial computer software" and "commercial computer software
30
+ * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
31
+ * and is provided to the U.S. Government only as a commercial end item.
32
+ * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
33
+ * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
34
+ * source code with only those rights set forth herein.
35
+ *
36
+ * Any use of this source code in individual and commercial software must
37
+ * include, in the user documentation and internal comments to the code,
38
+ * the above Disclaimer and U.S. Government End Users Notice.
39
+ */
40
+
41
+ #include <stddef.h>
42
+ #include <stdint.h>
43
+ #include "nvperf_common.h"
44
+
45
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
46
+ #pragma GCC visibility push(default)
47
+ #if !defined(NVPW_LOCAL)
48
+ #define NVPW_LOCAL __attribute__ ((visibility ("hidden")))
49
+ #endif
50
+ #else
51
+ #if !defined(NVPW_LOCAL)
52
+ #define NVPW_LOCAL
53
+ #endif
54
+ #endif
55
+
56
+ #ifdef __cplusplus
57
+ extern "C" {
58
+ #endif
59
+
60
+ /**
61
+ * @file nvperf_host.h
62
+ */
63
+
64
+
65
+ // Guard against multiple definition of NvPerf host types
66
+ #ifndef NVPERF_HOST_API_DEFINED
67
+ #define NVPERF_HOST_API_DEFINED
68
+
69
+
70
+ /***************************************************************************//**
71
+ * @name Host Configuration
72
+ * @{
73
+ */
74
+
75
+ typedef struct NVPW_InitializeHost_Params
76
+ {
77
+ /// [in]
78
+ size_t structSize;
79
+ /// [in] assign to NULL
80
+ void* pPriv;
81
+ } NVPW_InitializeHost_Params;
82
+ #define NVPW_InitializeHost_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_InitializeHost_Params, pPriv)
83
+
84
+ /// Load the host library.
85
+ NVPA_Status NVPW_InitializeHost(NVPW_InitializeHost_Params* pParams);
86
+
87
+ typedef struct NVPW_CounterData_CalculateCounterDataImageCopySize_Params
88
+ {
89
+ /// [in]
90
+ size_t structSize;
91
+ /// [in] assign to NULL
92
+ void* pPriv;
93
+ /// The CounterDataPrefix generated from e.g. nvperf2 initdata or
94
+ /// NVPW_CounterDataBuilder_GetCounterDataPrefix(). Must be align(8).
95
+ const uint8_t* pCounterDataPrefix;
96
+ size_t counterDataPrefixSize;
97
+ /// max number of ranges that can be profiled
98
+ uint32_t maxNumRanges;
99
+ /// max number of RangeTree nodes; must be >= maxNumRanges
100
+ uint32_t maxNumRangeTreeNodes;
101
+ /// max string length of each RangeName, including the trailing NUL character
102
+ uint32_t maxRangeNameLength;
103
+ const uint8_t* pCounterDataSrc;
104
+ /// [out] required size of the copy buffer
105
+ size_t copyDataImageCounterSize;
106
+ } NVPW_CounterData_CalculateCounterDataImageCopySize_Params;
107
+ #define NVPW_CounterData_CalculateCounterDataImageCopySize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_CalculateCounterDataImageCopySize_Params, copyDataImageCounterSize)
108
+
109
+ NVPA_Status NVPW_CounterData_CalculateCounterDataImageCopySize(NVPW_CounterData_CalculateCounterDataImageCopySize_Params* pParams);
110
+
111
+ typedef struct NVPW_CounterData_InitializeCounterDataImageCopy_Params
112
+ {
113
+ /// [in]
114
+ size_t structSize;
115
+ /// [in] assign to NULL
116
+ void* pPriv;
117
+ /// The CounterDataPrefix generated from e.g. nvperf2 initdata or
118
+ /// NVPW_CounterDataBuilder_GetCounterDataPrefix(). Must be align(8).
119
+ const uint8_t* pCounterDataPrefix;
120
+ size_t counterDataPrefixSize;
121
+ /// max number of ranges that can be profiled
122
+ uint32_t maxNumRanges;
123
+ /// max number of RangeTree nodes; must be >= maxNumRanges
124
+ uint32_t maxNumRangeTreeNodes;
125
+ /// max string length of each RangeName, including the trailing NUL character
126
+ uint32_t maxRangeNameLength;
127
+ const uint8_t* pCounterDataSrc;
128
+ uint8_t* pCounterDataDst;
129
+ } NVPW_CounterData_InitializeCounterDataImageCopy_Params;
130
+ #define NVPW_CounterData_InitializeCounterDataImageCopy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_InitializeCounterDataImageCopy_Params, pCounterDataDst)
131
+
132
+ NVPA_Status NVPW_CounterData_InitializeCounterDataImageCopy(NVPW_CounterData_InitializeCounterDataImageCopy_Params* pParams);
133
+
134
+ typedef struct NVPA_CounterDataCombiner NVPA_CounterDataCombiner;
135
+
136
+ typedef struct NVPW_CounterDataCombiner_Create_Params
137
+ {
138
+ /// [in]
139
+ size_t structSize;
140
+ /// [in] assign to NULL
141
+ void* pPriv;
142
+ /// The destination counter data into which the source datas will be combined
143
+ uint8_t* pCounterDataDst;
144
+ /// [out] The created counter data combiner
145
+ NVPA_CounterDataCombiner* pCounterDataCombiner;
146
+ } NVPW_CounterDataCombiner_Create_Params;
147
+ #define NVPW_CounterDataCombiner_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_Create_Params, pCounterDataCombiner)
148
+
149
+ NVPA_Status NVPW_CounterDataCombiner_Create(NVPW_CounterDataCombiner_Create_Params* pParams);
150
+
151
+ typedef struct NVPW_CounterDataCombiner_Destroy_Params
152
+ {
153
+ /// [in]
154
+ size_t structSize;
155
+ /// [in] assign to NULL
156
+ void* pPriv;
157
+ NVPA_CounterDataCombiner* pCounterDataCombiner;
158
+ } NVPW_CounterDataCombiner_Destroy_Params;
159
+ #define NVPW_CounterDataCombiner_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_Destroy_Params, pCounterDataCombiner)
160
+
161
+ NVPA_Status NVPW_CounterDataCombiner_Destroy(NVPW_CounterDataCombiner_Destroy_Params* pParams);
162
+
163
+ typedef struct NVPW_CounterDataCombiner_CreateRange_Params
164
+ {
165
+ /// [in]
166
+ size_t structSize;
167
+ /// [in] assign to NULL
168
+ void* pPriv;
169
+ NVPA_CounterDataCombiner* pCounterDataCombiner;
170
+ size_t numDescriptions;
171
+ const char* const* ppDescriptions;
172
+ /// [out]
173
+ size_t rangeIndexDst;
174
+ } NVPW_CounterDataCombiner_CreateRange_Params;
175
+ #define NVPW_CounterDataCombiner_CreateRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_CreateRange_Params, rangeIndexDst)
176
+
177
+ NVPA_Status NVPW_CounterDataCombiner_CreateRange(NVPW_CounterDataCombiner_CreateRange_Params* pParams);
178
+
179
+ typedef struct NVPW_CounterDataCombiner_CopyIntoRange_Params
180
+ {
181
+ /// [in]
182
+ size_t structSize;
183
+ /// [in] assign to NULL
184
+ void* pPriv;
185
+ /// [in]
186
+ NVPA_CounterDataCombiner* pCounterDataCombiner;
187
+ /// [in]
188
+ size_t rangeIndexDst;
189
+ /// [in]
190
+ const uint8_t* pCounterDataSrc;
191
+ /// [in]
192
+ size_t rangeIndexSrc;
193
+ } NVPW_CounterDataCombiner_CopyIntoRange_Params;
194
+ #define NVPW_CounterDataCombiner_CopyIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_CopyIntoRange_Params, rangeIndexSrc)
195
+
196
+ /// In order to use this API, the source counter data and the destination counter data must have identical counters
197
+ NVPA_Status NVPW_CounterDataCombiner_CopyIntoRange(NVPW_CounterDataCombiner_CopyIntoRange_Params* pParams);
198
+
199
+ typedef struct NVPW_CounterDataCombiner_AccumulateIntoRange_Params
200
+ {
201
+ /// [in]
202
+ size_t structSize;
203
+ /// [in] assign to NULL
204
+ void* pPriv;
205
+ NVPA_CounterDataCombiner* pCounterDataCombiner;
206
+ size_t rangeIndexDst;
207
+ uint32_t dstMultiplier;
208
+ const uint8_t* pCounterDataSrc;
209
+ size_t rangeIndexSrc;
210
+ uint32_t srcMultiplier;
211
+ } NVPW_CounterDataCombiner_AccumulateIntoRange_Params;
212
+ #define NVPW_CounterDataCombiner_AccumulateIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_AccumulateIntoRange_Params, srcMultiplier)
213
+
214
+ NVPA_Status NVPW_CounterDataCombiner_AccumulateIntoRange(NVPW_CounterDataCombiner_AccumulateIntoRange_Params* pParams);
215
+
216
+ typedef struct NVPW_CounterDataCombiner_SumIntoRange_Params
217
+ {
218
+ /// [in]
219
+ size_t structSize;
220
+ /// [in] assign to NULL
221
+ void* pPriv;
222
+ NVPA_CounterDataCombiner* pCounterDataCombiner;
223
+ size_t rangeIndexDst;
224
+ const uint8_t* pCounterDataSrc;
225
+ size_t rangeIndexSrc;
226
+ } NVPW_CounterDataCombiner_SumIntoRange_Params;
227
+ #define NVPW_CounterDataCombiner_SumIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_SumIntoRange_Params, rangeIndexSrc)
228
+
229
+ NVPA_Status NVPW_CounterDataCombiner_SumIntoRange(NVPW_CounterDataCombiner_SumIntoRange_Params* pParams);
230
+
231
+ typedef struct NVPW_CounterDataCombiner_WeightedSumIntoRange_Params
232
+ {
233
+ /// [in]
234
+ size_t structSize;
235
+ /// [in] assign to NULL
236
+ void* pPriv;
237
+ NVPA_CounterDataCombiner* pCounterDataCombiner;
238
+ size_t rangeIndexDst;
239
+ double dstMultiplier;
240
+ const uint8_t* pCounterDataSrc;
241
+ size_t rangeIndexSrc;
242
+ double srcMultiplier;
243
+ } NVPW_CounterDataCombiner_WeightedSumIntoRange_Params;
244
+ #define NVPW_CounterDataCombiner_WeightedSumIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_WeightedSumIntoRange_Params, srcMultiplier)
245
+
246
+ NVPA_Status NVPW_CounterDataCombiner_WeightedSumIntoRange(NVPW_CounterDataCombiner_WeightedSumIntoRange_Params* pParams);
247
+
248
+ /**
249
+ * @}
250
+ ******************************************************************************/
251
+
252
+ /***************************************************************************//**
253
+ * @name Metrics Configuration
254
+ * @{
255
+ */
256
+
257
+ typedef struct NVPA_RawMetricsConfig NVPA_RawMetricsConfig;
258
+
259
+ typedef struct NVPA_RawMetricRequest
260
+ {
261
+ /// [in]
262
+ size_t structSize;
263
+ /// [in] assign to NULL
264
+ void* pPriv;
265
+ /// in
266
+ const char* pMetricName;
267
+ /// in
268
+ NVPA_Bool isolated;
269
+ /// in; ignored by AddMetric but observed by CounterData initialization
270
+ NVPA_Bool keepInstances;
271
+ } NVPA_RawMetricRequest;
272
+ #define NVPA_RAW_METRIC_REQUEST_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPA_RawMetricRequest, keepInstances)
273
+
274
+ typedef struct NVPW_GetSupportedChipNames_Params
275
+ {
276
+ /// [in]
277
+ size_t structSize;
278
+ /// [in] assign to NULL
279
+ void* pPriv;
280
+ /// [out]
281
+ const char* const* ppChipNames;
282
+ /// [out]
283
+ size_t numChipNames;
284
+ } NVPW_GetSupportedChipNames_Params;
285
+ #define NVPW_GetSupportedChipNames_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_GetSupportedChipNames_Params, numChipNames)
286
+
287
+ NVPA_Status NVPW_GetSupportedChipNames(NVPW_GetSupportedChipNames_Params* pParams);
288
+
289
+ typedef struct NVPW_RawMetricsConfig_Destroy_Params
290
+ {
291
+ /// [in]
292
+ size_t structSize;
293
+ /// [in] assign to NULL
294
+ void* pPriv;
295
+ NVPA_RawMetricsConfig* pRawMetricsConfig;
296
+ } NVPW_RawMetricsConfig_Destroy_Params;
297
+ #define NVPW_RawMetricsConfig_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_Destroy_Params, pRawMetricsConfig)
298
+
299
+ NVPA_Status NVPW_RawMetricsConfig_Destroy(NVPW_RawMetricsConfig_Destroy_Params* pParams);
300
+
301
+ typedef struct NVPW_RawMetricsConfig_SetCounterAvailability_Params
302
+ {
303
+ /// [in]
304
+ size_t structSize;
305
+ /// [in] assign to NULL
306
+ void* pPriv;
307
+ NVPA_RawMetricsConfig* pRawMetricsConfig;
308
+ /// [in] buffer with counter availability image
309
+ const uint8_t* pCounterAvailabilityImage;
310
+ } NVPW_RawMetricsConfig_SetCounterAvailability_Params;
311
+ #define NVPW_RawMetricsConfig_SetCounterAvailability_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_SetCounterAvailability_Params, pCounterAvailabilityImage)
312
+
313
+ NVPA_Status NVPW_RawMetricsConfig_SetCounterAvailability(NVPW_RawMetricsConfig_SetCounterAvailability_Params* pParams);
314
+
315
+ typedef struct NVPW_RawMetricsConfig_BeginPassGroup_Params
316
+ {
317
+ /// [in]
318
+ size_t structSize;
319
+ /// [in] assign to NULL
320
+ void* pPriv;
321
+ NVPA_RawMetricsConfig* pRawMetricsConfig;
322
+ size_t maxPassCount;
323
+ } NVPW_RawMetricsConfig_BeginPassGroup_Params;
324
+ #define NVPW_RawMetricsConfig_BeginPassGroup_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_BeginPassGroup_Params, maxPassCount)
325
+
326
+ NVPA_Status NVPW_RawMetricsConfig_BeginPassGroup(NVPW_RawMetricsConfig_BeginPassGroup_Params* pParams);
327
+
328
+ typedef struct NVPW_RawMetricsConfig_EndPassGroup_Params
329
+ {
330
+ /// [in]
331
+ size_t structSize;
332
+ /// [in] assign to NULL
333
+ void* pPriv;
334
+ NVPA_RawMetricsConfig* pRawMetricsConfig;
335
+ } NVPW_RawMetricsConfig_EndPassGroup_Params;
336
+ #define NVPW_RawMetricsConfig_EndPassGroup_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_EndPassGroup_Params, pRawMetricsConfig)
337
+
338
+ NVPA_Status NVPW_RawMetricsConfig_EndPassGroup(NVPW_RawMetricsConfig_EndPassGroup_Params* pParams);
339
+
340
+ typedef struct NVPW_RawMetricsConfig_GetNumMetrics_Params
341
+ {
342
+ /// [in]
343
+ size_t structSize;
344
+ /// [in] assign to NULL
345
+ void* pPriv;
346
+ const NVPA_RawMetricsConfig* pRawMetricsConfig;
347
+ /// [out]
348
+ size_t numMetrics;
349
+ } NVPW_RawMetricsConfig_GetNumMetrics_Params;
350
+ #define NVPW_RawMetricsConfig_GetNumMetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetNumMetrics_Params, numMetrics)
351
+
352
+ NVPA_Status NVPW_RawMetricsConfig_GetNumMetrics(NVPW_RawMetricsConfig_GetNumMetrics_Params* pParams);
353
+
354
+ typedef struct NVPW_RawMetricsConfig_GetMetricProperties_Params
355
+ {
356
+ /// [in]
357
+ size_t structSize;
358
+ /// [in] assign to NULL
359
+ void* pPriv;
360
+ const NVPA_RawMetricsConfig* pRawMetricsConfig;
361
+ size_t metricIndex;
362
+ /// [out]
363
+ const char* pMetricName;
364
+ /// [out]
365
+ NVPA_Bool supportsPipelined;
366
+ /// [out]
367
+ NVPA_Bool supportsIsolated;
368
+ } NVPW_RawMetricsConfig_GetMetricProperties_Params;
369
+ #define NVPW_RawMetricsConfig_GetMetricProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetMetricProperties_Params, supportsIsolated)
370
+
371
+ NVPA_Status NVPW_RawMetricsConfig_GetMetricProperties(NVPW_RawMetricsConfig_GetMetricProperties_Params* pParams);
372
+
373
+ typedef struct NVPW_RawMetricsConfig_GetMetricProperties_V2_Params
374
+ {
375
+ /// [in]
376
+ size_t structSize;
377
+ /// [in] assign to NULL
378
+ void* pPriv;
379
+ const NVPA_RawMetricsConfig* pRawMetricsConfig;
380
+ size_t metricIndex;
381
+ /// [out]
382
+ const char* pMetricName;
383
+ } NVPW_RawMetricsConfig_GetMetricProperties_V2_Params;
384
+ #define NVPW_RawMetricsConfig_GetMetricProperties_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetMetricProperties_V2_Params, pMetricName)
385
+
386
+ NVPA_Status NVPW_RawMetricsConfig_GetMetricProperties_V2(NVPW_RawMetricsConfig_GetMetricProperties_V2_Params* pParams);
387
+
388
+ typedef struct NVPW_RawMetricsConfig_AddMetrics_Params
389
+ {
390
+ /// [in]
391
+ size_t structSize;
392
+ /// [in] assign to NULL
393
+ void* pPriv;
394
+ NVPA_RawMetricsConfig* pRawMetricsConfig;
395
+ const NVPA_RawMetricRequest* pRawMetricRequests;
396
+ size_t numMetricRequests;
397
+ } NVPW_RawMetricsConfig_AddMetrics_Params;
398
+ #define NVPW_RawMetricsConfig_AddMetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_AddMetrics_Params, numMetricRequests)
399
+
400
+ NVPA_Status NVPW_RawMetricsConfig_AddMetrics(NVPW_RawMetricsConfig_AddMetrics_Params* pParams);
401
+
402
+ typedef struct NVPW_RawMetricsConfig_IsAddMetricsPossible_Params
403
+ {
404
+ /// [in]
405
+ size_t structSize;
406
+ /// [in] assign to NULL
407
+ void* pPriv;
408
+ const NVPA_RawMetricsConfig* pRawMetricsConfig;
409
+ const NVPA_RawMetricRequest* pRawMetricRequests;
410
+ size_t numMetricRequests;
411
+ /// [out]
412
+ NVPA_Bool isPossible;
413
+ } NVPW_RawMetricsConfig_IsAddMetricsPossible_Params;
414
+ #define NVPW_RawMetricsConfig_IsAddMetricsPossible_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_IsAddMetricsPossible_Params, isPossible)
415
+
416
+ NVPA_Status NVPW_RawMetricsConfig_IsAddMetricsPossible(NVPW_RawMetricsConfig_IsAddMetricsPossible_Params* pParams);
417
+
418
+ typedef struct NVPW_RawMetricsConfig_GenerateConfigImage_Params
419
+ {
420
+ /// [in]
421
+ size_t structSize;
422
+ /// [in] assign to NULL
423
+ void* pPriv;
424
+ NVPA_RawMetricsConfig* pRawMetricsConfig;
425
+ /// [in] If true, all existing pass groups may be merged to reduce number of passes.
426
+ /// If merge was successful, distribution of counters in passes may be updated as a side-effect. The effects
427
+ /// will be persistent in pRawMetricsConfig.
428
+ NVPA_Bool mergeAllPassGroups;
429
+ } NVPW_RawMetricsConfig_GenerateConfigImage_Params;
430
+ #define NVPW_RawMetricsConfig_GenerateConfigImage_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GenerateConfigImage_Params, mergeAllPassGroups)
431
+
432
+ /// This API may fail if called inside a pass group with `mergeAllPassGroups` = true.
433
+ NVPA_Status NVPW_RawMetricsConfig_GenerateConfigImage(NVPW_RawMetricsConfig_GenerateConfigImage_Params* pParams);
434
+
435
+ typedef struct NVPW_RawMetricsConfig_GetConfigImage_Params
436
+ {
437
+ /// [in]
438
+ size_t structSize;
439
+ /// [in] assign to NULL
440
+ void* pPriv;
441
+ const NVPA_RawMetricsConfig* pRawMetricsConfig;
442
+ /// [in] Number of bytes allocated for pBuffer
443
+ size_t bytesAllocated;
444
+ /// [out] [optional] Buffer receiving the config image
445
+ uint8_t* pBuffer;
446
+ /// [out] Count of bytes that would be copied into pBuffer
447
+ size_t bytesCopied;
448
+ } NVPW_RawMetricsConfig_GetConfigImage_Params;
449
+ #define NVPW_RawMetricsConfig_GetConfigImage_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetConfigImage_Params, bytesCopied)
450
+
451
+ NVPA_Status NVPW_RawMetricsConfig_GetConfigImage(NVPW_RawMetricsConfig_GetConfigImage_Params* pParams);
452
+
453
+ typedef struct NVPW_RawMetricsConfig_GetNumPasses_Params
454
+ {
455
+ /// [in]
456
+ size_t structSize;
457
+ /// [in] assign to NULL
458
+ void* pPriv;
459
+ const NVPA_RawMetricsConfig* pRawMetricsConfig;
460
+ /// [out]
461
+ size_t numPipelinedPasses;
462
+ /// [out]
463
+ size_t numIsolatedPasses;
464
+ } NVPW_RawMetricsConfig_GetNumPasses_Params;
465
+ #define NVPW_RawMetricsConfig_GetNumPasses_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetNumPasses_Params, numIsolatedPasses)
466
+
467
+ /// Total num passes = numPipelinedPasses + numIsolatedPasses * numNestingLevels
468
+ NVPA_Status NVPW_RawMetricsConfig_GetNumPasses(NVPW_RawMetricsConfig_GetNumPasses_Params* pParams);
469
+
470
+ typedef struct NVPW_RawMetricsConfig_GetNumPasses_V2_Params
471
+ {
472
+ /// [in]
473
+ size_t structSize;
474
+ /// [in] assign to NULL
475
+ void* pPriv;
476
+ /// [in]
477
+ const NVPA_RawMetricsConfig* pRawMetricsConfig;
478
+ /// [out]
479
+ size_t numPasses;
480
+ } NVPW_RawMetricsConfig_GetNumPasses_V2_Params;
481
+ #define NVPW_RawMetricsConfig_GetNumPasses_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetNumPasses_V2_Params, numPasses)
482
+
483
+ /// Total num passes = numPasses * numNestingLevels
484
+ NVPA_Status NVPW_RawMetricsConfig_GetNumPasses_V2(NVPW_RawMetricsConfig_GetNumPasses_V2_Params* pParams);
485
+
486
+ typedef struct NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params
487
+ {
488
+ /// [in]
489
+ size_t structSize;
490
+ /// [in] assign to NULL
491
+ void* pPriv;
492
+ /// [in] Typically created by e.g. NVPW_RawMetricsConfig_GetConfigImage(), must be align(8).
493
+ const uint8_t* pConfig;
494
+ /// [in]
495
+ size_t configSize;
496
+ /// [out]
497
+ size_t sampleSize;
498
+ } NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params;
499
+ #define NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params, sampleSize)
500
+
501
+ /// Estimate per sample records size based on a virtual device
502
+ NVPA_Status NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize(NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params* pParams);
503
+
504
+ typedef struct NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params
505
+ {
506
+ /// [in]
507
+ size_t structSize;
508
+ /// [in] assign to NULL
509
+ void* pPriv;
510
+ /// [in] Typically created by e.g. NVPW_RawMetricsConfig_GetConfigImage(), must be align(8).
511
+ const uint8_t* pConfig;
512
+ /// [in]
513
+ size_t configSize;
514
+ /// [out]
515
+ size_t sampleSize;
516
+ } NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params;
517
+ #define NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params, sampleSize)
518
+
519
+ /// Estimate per sample records size based on a virtual device
520
+ NVPA_Status NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize(NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params* pParams);
521
+
522
+ /**
523
+ * @}
524
+ ******************************************************************************/
525
+
526
+ typedef struct NVPW_Config_GetRawCounterInfo_Params
527
+ {
528
+ /// [in]
529
+ size_t structSize;
530
+ /// [in] assign to NULL
531
+ void* pPriv;
532
+ /// [in]
533
+ const uint8_t* pConfig;
534
+ /// [in]
535
+ size_t configSize;
536
+ /// [in]
537
+ const char* pRawCounterName;
538
+ /// [inout] array containing indices of passes the counter resides in. 'pPassIndices' is in, '*pPassIndices' is
539
+ /// out.
540
+ size_t* pPassIndices;
541
+ /// [inout] if 'pPassIndices' is NULL, the count of passes this counter resides in will be returned; otherwise
542
+ /// it should be set to the capacity of 'pPassIndices' array, and on return, it will be overwritten to reflect
543
+ /// the actual count filled into 'pPassIndices'
544
+ size_t numPassIndices;
545
+ } NVPW_Config_GetRawCounterInfo_Params;
546
+ #define NVPW_Config_GetRawCounterInfo_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Config_GetRawCounterInfo_Params, numPassIndices)
547
+
548
+ NVPA_Status NVPW_Config_GetRawCounterInfo(NVPW_Config_GetRawCounterInfo_Params* pParams);
549
+
550
+ typedef struct NVPW_Config_GetRawCounters_Params
551
+ {
552
+ /// [in]
553
+ size_t structSize;
554
+ /// [in] assign to NULL
555
+ void* pPriv;
556
+ /// [in]
557
+ const uint8_t* pConfig;
558
+ /// [in]
559
+ size_t configSize;
560
+ /// [in]
561
+ size_t passIndex;
562
+ /// [inout] array containing raw counter names. 'ppRawCounterNames' is in, '*ppRawCounterNames' is out.
563
+ const char** ppRawCounterNames;
564
+ /// [inout] if 'ppRawCounterNames' is NULL, the count of raw counters will be returned; otherwise it should be
565
+ /// set to the capacity of 'ppRawCounterNames' array, and on return, it will be overwritten to reflect the
566
+ /// actual count filled into 'ppRawCounterNames'
567
+ size_t numRawCounters;
568
+ } NVPW_Config_GetRawCounters_Params;
569
+ #define NVPW_Config_GetRawCounters_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Config_GetRawCounters_Params, numRawCounters)
570
+
571
+ NVPA_Status NVPW_Config_GetRawCounters(NVPW_Config_GetRawCounters_Params* pParams);
572
+
573
+ /***************************************************************************//**
574
+ * @name CounterData Creation
575
+ * @{
576
+ */
577
+
578
+ typedef struct NVPA_CounterDataBuilder NVPA_CounterDataBuilder;
579
+
580
+ typedef struct NVPW_CounterDataBuilder_Create_Params
581
+ {
582
+ /// [in]
583
+ size_t structSize;
584
+ /// [in] assign to NULL
585
+ void* pPriv;
586
+ /// [out]
587
+ NVPA_CounterDataBuilder* pCounterDataBuilder;
588
+ const char* pChipName;
589
+ } NVPW_CounterDataBuilder_Create_Params;
590
+ #define NVPW_CounterDataBuilder_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_Create_Params, pChipName)
591
+
592
+ NVPA_Status NVPW_CounterDataBuilder_Create(NVPW_CounterDataBuilder_Create_Params* pParams);
593
+
594
+ typedef struct NVPW_CounterDataBuilder_Destroy_Params
595
+ {
596
+ /// [in]
597
+ size_t structSize;
598
+ /// [in] assign to NULL
599
+ void* pPriv;
600
+ NVPA_CounterDataBuilder* pCounterDataBuilder;
601
+ } NVPW_CounterDataBuilder_Destroy_Params;
602
+ #define NVPW_CounterDataBuilder_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_Destroy_Params, pCounterDataBuilder)
603
+
604
+ NVPA_Status NVPW_CounterDataBuilder_Destroy(NVPW_CounterDataBuilder_Destroy_Params* pParams);
605
+
606
+ typedef struct NVPW_CounterDataBuilder_AddMetrics_Params
607
+ {
608
+ /// [in]
609
+ size_t structSize;
610
+ /// [in] assign to NULL
611
+ void* pPriv;
612
+ NVPA_CounterDataBuilder* pCounterDataBuilder;
613
+ const NVPA_RawMetricRequest* pRawMetricRequests;
614
+ size_t numMetricRequests;
615
+ } NVPW_CounterDataBuilder_AddMetrics_Params;
616
+ #define NVPW_CounterDataBuilder_AddMetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_AddMetrics_Params, numMetricRequests)
617
+
618
+ NVPA_Status NVPW_CounterDataBuilder_AddMetrics(NVPW_CounterDataBuilder_AddMetrics_Params* pParams);
619
+
620
+ typedef struct NVPW_CounterDataBuilder_GetCounterDataPrefix_Params
621
+ {
622
+ /// [in]
623
+ size_t structSize;
624
+ /// [in] assign to NULL
625
+ void* pPriv;
626
+ NVPA_CounterDataBuilder* pCounterDataBuilder;
627
+ /// [in] Number of bytes allocated for pBuffer
628
+ size_t bytesAllocated;
629
+ /// [out] [optional] Buffer receiving the counter data prefix
630
+ uint8_t* pBuffer;
631
+ /// [out] Count of bytes that would be copied to pBuffer
632
+ size_t bytesCopied;
633
+ } NVPW_CounterDataBuilder_GetCounterDataPrefix_Params;
634
+ #define NVPW_CounterDataBuilder_GetCounterDataPrefix_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_GetCounterDataPrefix_Params, bytesCopied)
635
+
636
+ NVPA_Status NVPW_CounterDataBuilder_GetCounterDataPrefix(NVPW_CounterDataBuilder_GetCounterDataPrefix_Params* pParams);
637
+
638
+ /**
639
+ * @}
640
+ ******************************************************************************/
641
+
642
+ /***************************************************************************//**
643
+ * @name MetricsContext - metric configuration and evaluation
644
+ * @{
645
+ */
646
+
647
+ /// 'NVPA_MetricsContext' and its APIs are deprecated, please use 'NVPW_MetricsEvaluator' and its APIs instead.
648
+ typedef struct NVPA_MetricsContext NVPA_MetricsContext;
649
+
650
+ typedef enum NVPA_MetricDetailLevel
651
+ {
652
+ NVPA_METRIC_DETAIL_LEVEL_INVALID,
653
+ NVPA_METRIC_DETAIL_LEVEL_GPU,
654
+ NVPA_METRIC_DETAIL_LEVEL_ALL,
655
+ NVPA_METRIC_DETAIL_LEVEL_GPU_AND_LEAF_INSTANCES,
656
+ NVPA_METRIC_DETAIL_LEVEL__COUNT
657
+ } NVPA_MetricDetailLevel;
658
+
659
+ typedef struct NVPW_MetricsContext_Destroy_Params
660
+ {
661
+ /// [in]
662
+ size_t structSize;
663
+ /// [in] assign to NULL
664
+ void* pPriv;
665
+ NVPA_MetricsContext* pMetricsContext;
666
+ } NVPW_MetricsContext_Destroy_Params;
667
+ #define NVPW_MetricsContext_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_Destroy_Params, pMetricsContext)
668
+
669
+ NVPA_Status NVPW_MetricsContext_Destroy(NVPW_MetricsContext_Destroy_Params* pParams);
670
+
671
+ typedef struct NVPW_MetricsContext_RunScript_Params
672
+ {
673
+ /// [in]
674
+ size_t structSize;
675
+ /// [in] assign to NULL
676
+ void* pPriv;
677
+ NVPA_MetricsContext* pMetricsContext;
678
+ /// in : if true, upon error, calls PyErr_Print() which causes exceptions to be logged to stderr
679
+ NVPA_Bool printErrors;
680
+ /// in : the script source code
681
+ const char* pSource;
682
+ /// in : the filename reported in stack traces; if NULL, uses an auto-generated name
683
+ const char* pFileName;
684
+ } NVPW_MetricsContext_RunScript_Params;
685
+ #define NVPW_MetricsContext_RunScript_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_RunScript_Params, pFileName)
686
+
687
+ /// Runs code in the metrics module. Additional metrics can be added through this interface.
688
+ /// If printErrors is true, calls PyErr_Print() which causes exceptions to be logged to stderr.
689
+ /// Equivalent to:
690
+ /// exec(source, metrics.__dict__, metrics.__dict__)
691
+ NVPA_Status NVPW_MetricsContext_RunScript(NVPW_MetricsContext_RunScript_Params* pParams);
692
+
693
+ typedef struct NVPW_MetricsContext_ExecScript_Begin_Params
694
+ {
695
+ /// [in]
696
+ size_t structSize;
697
+ /// [in] assign to NULL
698
+ void* pPriv;
699
+ NVPA_MetricsContext* pMetricsContext;
700
+ /// in : if true, treats pSource as a statement to be eval'd; otherwise, calls exec.
701
+ NVPA_Bool isStatement;
702
+ /// in : if true, upon error, calls PyErr_Print() which causes exceptions to be logged to stderr
703
+ NVPA_Bool printErrors;
704
+ /// in : the script source code
705
+ const char* pSource;
706
+ /// in : the filename reported in stack traces; if NULL, uses an auto-generated name
707
+ const char* pFileName;
708
+ /// out: if isStatement, points at a string form of the evaluation; if !isStatement, points at
709
+ /// str(locals()['result'])
710
+ const char* pResultStr;
711
+ } NVPW_MetricsContext_ExecScript_Begin_Params;
712
+ #define NVPW_MetricsContext_ExecScript_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_ExecScript_Begin_Params, pResultStr)
713
+
714
+ /// Executes a script in the metrics module, but does not modify its contents (for ordinary queries).
715
+ /// Equivalent to one of:
716
+ /// eval(source, metrics.__dict__, {}) # isStatement true
717
+ /// exec(source, metrics.__dict__, {}) # isStatement false
718
+ NVPA_Status NVPW_MetricsContext_ExecScript_Begin(NVPW_MetricsContext_ExecScript_Begin_Params* pParams);
719
+
720
+ typedef struct NVPW_MetricsContext_ExecScript_End_Params
721
+ {
722
+ /// [in]
723
+ size_t structSize;
724
+ /// [in] assign to NULL
725
+ void* pPriv;
726
+ NVPA_MetricsContext* pMetricsContext;
727
+ } NVPW_MetricsContext_ExecScript_End_Params;
728
+ #define NVPW_MetricsContext_ExecScript_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_ExecScript_End_Params, pMetricsContext)
729
+
730
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_ExecScript_Begin.
731
+ NVPA_Status NVPW_MetricsContext_ExecScript_End(NVPW_MetricsContext_ExecScript_End_Params* pParams);
732
+
733
+ typedef struct NVPW_MetricsContext_GetCounterNames_Begin_Params
734
+ {
735
+ /// [in]
736
+ size_t structSize;
737
+ /// [in] assign to NULL
738
+ void* pPriv;
739
+ NVPA_MetricsContext* pMetricsContext;
740
+ /// [out]
741
+ size_t numCounters;
742
+ /// [out]
743
+ const char* const* ppCounterNames;
744
+ } NVPW_MetricsContext_GetCounterNames_Begin_Params;
745
+ #define NVPW_MetricsContext_GetCounterNames_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetCounterNames_Begin_Params, ppCounterNames)
746
+
747
+ /// Outputs (size, pointer) to an array of "const char* pCounterName". The lifetime of the array is tied to
748
+ /// MetricsContext. The names are sorted.
749
+ /// Impl: lazily creates list
750
+ NVPA_Status NVPW_MetricsContext_GetCounterNames_Begin(NVPW_MetricsContext_GetCounterNames_Begin_Params* pParams);
751
+
752
+ typedef struct NVPW_MetricsContext_GetCounterNames_End_Params
753
+ {
754
+ /// [in]
755
+ size_t structSize;
756
+ /// [in] assign to NULL
757
+ void* pPriv;
758
+ NVPA_MetricsContext* pMetricsContext;
759
+ } NVPW_MetricsContext_GetCounterNames_End_Params;
760
+ #define NVPW_MetricsContext_GetCounterNames_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetCounterNames_End_Params, pMetricsContext)
761
+
762
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetCounterNames_Begin.
763
+ NVPA_Status NVPW_MetricsContext_GetCounterNames_End(NVPW_MetricsContext_GetCounterNames_End_Params* pParams);
764
+
765
+ typedef struct NVPW_MetricsContext_GetThroughputNames_Begin_Params
766
+ {
767
+ /// [in]
768
+ size_t structSize;
769
+ /// [in] assign to NULL
770
+ void* pPriv;
771
+ NVPA_MetricsContext* pMetricsContext;
772
+ /// [out]
773
+ size_t numThroughputs;
774
+ /// [out]
775
+ const char* const* ppThroughputNames;
776
+ } NVPW_MetricsContext_GetThroughputNames_Begin_Params;
777
+ #define NVPW_MetricsContext_GetThroughputNames_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetThroughputNames_Begin_Params, ppThroughputNames)
778
+
779
+ /// Outputs (size, pointer) to an array of "const char* pThroughputName". The lifetime of the array is tied to
780
+ /// MetricsContext. The names are sorted.
781
+ /// Impl: lazily creates list
782
+ NVPA_Status NVPW_MetricsContext_GetThroughputNames_Begin(NVPW_MetricsContext_GetThroughputNames_Begin_Params* pParams);
783
+
784
+ typedef struct NVPW_MetricsContext_GetThroughputNames_End_Params
785
+ {
786
+ /// [in]
787
+ size_t structSize;
788
+ /// [in] assign to NULL
789
+ void* pPriv;
790
+ NVPA_MetricsContext* pMetricsContext;
791
+ } NVPW_MetricsContext_GetThroughputNames_End_Params;
792
+ #define NVPW_MetricsContext_GetThroughputNames_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetThroughputNames_End_Params, pMetricsContext)
793
+
794
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetThroughputNames_Begin.
795
+ NVPA_Status NVPW_MetricsContext_GetThroughputNames_End(NVPW_MetricsContext_GetThroughputNames_End_Params* pParams);
796
+
797
+ typedef struct NVPW_MetricsContext_GetRatioNames_Begin_Params
798
+ {
799
+ /// [in]
800
+ size_t structSize;
801
+ /// [in] assign to NULL
802
+ void* pPriv;
803
+ NVPA_MetricsContext* pMetricsContext;
804
+ /// [out]
805
+ size_t numRatios;
806
+ /// [out]
807
+ const char* const* ppRatioNames;
808
+ } NVPW_MetricsContext_GetRatioNames_Begin_Params;
809
+ #define NVPW_MetricsContext_GetRatioNames_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetRatioNames_Begin_Params, ppRatioNames)
810
+
811
+ /// Outputs (size, pointer) to an array of "const char* pRatioName". The lifetime of the array is tied to
812
+ /// MetricsContext. The names are sorted.
813
+ /// Impl: lazily creates list
814
+ NVPA_Status NVPW_MetricsContext_GetRatioNames_Begin(NVPW_MetricsContext_GetRatioNames_Begin_Params* pParams);
815
+
816
+ typedef struct NVPW_MetricsContext_GetRatioNames_End_Params
817
+ {
818
+ /// [in]
819
+ size_t structSize;
820
+ /// [in] assign to NULL
821
+ void* pPriv;
822
+ NVPA_MetricsContext* pMetricsContext;
823
+ } NVPW_MetricsContext_GetRatioNames_End_Params;
824
+ #define NVPW_MetricsContext_GetRatioNames_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetRatioNames_End_Params, pMetricsContext)
825
+
826
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetCounterNames_Begin.
827
+ NVPA_Status NVPW_MetricsContext_GetRatioNames_End(NVPW_MetricsContext_GetRatioNames_End_Params* pParams);
828
+
829
+ typedef struct NVPW_MetricsContext_GetMetricNames_Begin_Params
830
+ {
831
+ /// [in]
832
+ size_t structSize;
833
+ /// [in] assign to NULL
834
+ void* pPriv;
835
+ NVPA_MetricsContext* pMetricsContext;
836
+ /// out: number of elements in array ppMetricNames
837
+ size_t numMetrics;
838
+ /// out: pointer to array of 'const char* pMetricName'
839
+ const char* const* ppMetricNames;
840
+ /// in : if true, doesn't enumerate \<metric\>.peak_{burst, sustained}
841
+ NVPA_Bool hidePeakSubMetrics;
842
+ /// in : if true, doesn't enumerate \<metric\>.per_{active,elapsed,region,frame}_cycle
843
+ NVPA_Bool hidePerCycleSubMetrics;
844
+ /// in : if true, doesn't enumerate \<metric\>.pct_of_peak_{burst,sustained}_{active,elapsed,region,frame}
845
+ NVPA_Bool hidePctOfPeakSubMetrics;
846
+ /// in : if false, enumerate \<unit\>__throughput.pct_of_peak_sustained_elapsed even if hidePctOfPeakSubMetrics
847
+ /// is true
848
+ NVPA_Bool hidePctOfPeakSubMetricsOnThroughputs;
849
+ } NVPW_MetricsContext_GetMetricNames_Begin_Params;
850
+ #define NVPW_MetricsContext_GetMetricNames_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricNames_Begin_Params, hidePctOfPeakSubMetricsOnThroughputs)
851
+
852
+ /// Outputs (size, pointer) to an array of "const char* pMetricName". The lifetime of the array is tied to
853
+ /// MetricsContext. The names are sorted.
854
+ /// Enumerates all metrics at all levels. Includes:
855
+ /// * counter.{sum,avg,min,max}
856
+ /// * throughput.{avg,min,max}
857
+ /// * \<metric\>.peak_{burst, sustained}
858
+ /// * \<metric\>.per_{active,elapsed,region,frame}_cycle
859
+ /// * \<metric\>.pct_of_peak_{burst,sustained}_{active,elapsed,region,frame}
860
+ /// * \<metric\>.per.{other, other_pct}
861
+ NVPA_Status NVPW_MetricsContext_GetMetricNames_Begin(NVPW_MetricsContext_GetMetricNames_Begin_Params* pParams);
862
+
863
+ typedef struct NVPW_MetricsContext_GetMetricNames_End_Params
864
+ {
865
+ /// [in]
866
+ size_t structSize;
867
+ /// [in] assign to NULL
868
+ void* pPriv;
869
+ NVPA_MetricsContext* pMetricsContext;
870
+ } NVPW_MetricsContext_GetMetricNames_End_Params;
871
+ #define NVPW_MetricsContext_GetMetricNames_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricNames_End_Params, pMetricsContext)
872
+
873
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetMetricNames_Begin.
874
+ NVPA_Status NVPW_MetricsContext_GetMetricNames_End(NVPW_MetricsContext_GetMetricNames_End_Params* pParams);
875
+
876
+ typedef struct NVPW_MetricsContext_GetThroughputBreakdown_Begin_Params
877
+ {
878
+ /// [in]
879
+ size_t structSize;
880
+ /// [in] assign to NULL
881
+ void* pPriv;
882
+ NVPA_MetricsContext* pMetricsContext;
883
+ const char* pThroughputName;
884
+ const char* const* ppCounterNames;
885
+ const char* const* ppSubThroughputNames;
886
+ } NVPW_MetricsContext_GetThroughputBreakdown_Begin_Params;
887
+ #define NVPW_MetricsContext_GetThroughputBreakdown_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetThroughputBreakdown_Begin_Params, ppSubThroughputNames)
888
+
889
+ /// After this function returns, the lifetimes of strings pointed to by {ppCounterNames, ppSubThroughputNames,
890
+ /// ppSubMetricNames} are guaranteed until NVPW_MetricsContext_GetThroughputBreakdown_End, or until pMetricsContext
891
+ /// is destroyed
892
+ NVPA_Status NVPW_MetricsContext_GetThroughputBreakdown_Begin(NVPW_MetricsContext_GetThroughputBreakdown_Begin_Params* pParams);
893
+
894
+ typedef struct NVPW_MetricsContext_GetThroughputBreakdown_End_Params
895
+ {
896
+ /// [in]
897
+ size_t structSize;
898
+ /// [in] assign to NULL
899
+ void* pPriv;
900
+ NVPA_MetricsContext* pMetricsContext;
901
+ } NVPW_MetricsContext_GetThroughputBreakdown_End_Params;
902
+ #define NVPW_MetricsContext_GetThroughputBreakdown_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetThroughputBreakdown_End_Params, pMetricsContext)
903
+
904
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetThroughputBreakdown_Begin.
905
+ NVPA_Status NVPW_MetricsContext_GetThroughputBreakdown_End(NVPW_MetricsContext_GetThroughputBreakdown_End_Params* pParams);
906
+
907
+ typedef struct NVPW_MetricsContext_GetMetricProperties_Begin_Params
908
+ {
909
+ /// [in]
910
+ size_t structSize;
911
+ /// [in] assign to NULL
912
+ void* pPriv;
913
+ NVPA_MetricsContext* pMetricsContext;
914
+ const char* pMetricName;
915
+ /// out
916
+ const char* pDescription;
917
+ /// out
918
+ const char* pDimUnits;
919
+ /// out: a NULL-terminated array of pointers to RawMetric names that can be passed to
920
+ /// NVPW_RawMetricsConfig_AddMetrics()
921
+ const char** ppRawMetricDependencies;
922
+ /// out: metric.peak_burst.value.gpu
923
+ double gpuBurstRate;
924
+ /// out: metric.peak_sustained.value.gpu
925
+ double gpuSustainedRate;
926
+ /// out: a NULL-terminated array of pointers to RawMetric names that can be passed to
927
+ /// NVPW_RawMetricsConfig_AddMetrics().
928
+ const char** ppOptionalRawMetricDependencies;
929
+ } NVPW_MetricsContext_GetMetricProperties_Begin_Params;
930
+ #define NVPW_MetricsContext_GetMetricProperties_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricProperties_Begin_Params, ppOptionalRawMetricDependencies)
931
+
932
+ /// After this function returns, the lifetimes of strings pointed to by pMetricProperties or
933
+ /// ppOptionalRawMetricDependencies are guaranteed until NVPW_MetricsContext_GetMetricProperties_End, or until
934
+ /// pMetricsContext is destroyed.
935
+ NVPA_Status NVPW_MetricsContext_GetMetricProperties_Begin(NVPW_MetricsContext_GetMetricProperties_Begin_Params* pParams);
936
+
937
+ typedef struct NVPW_MetricsContext_GetMetricProperties_End_Params
938
+ {
939
+ /// [in]
940
+ size_t structSize;
941
+ /// [in] assign to NULL
942
+ void* pPriv;
943
+ NVPA_MetricsContext* pMetricsContext;
944
+ } NVPW_MetricsContext_GetMetricProperties_End_Params;
945
+ #define NVPW_MetricsContext_GetMetricProperties_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricProperties_End_Params, pMetricsContext)
946
+
947
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetMetricProperties_Begin.
948
+ NVPA_Status NVPW_MetricsContext_GetMetricProperties_End(NVPW_MetricsContext_GetMetricProperties_End_Params* pParams);
949
+
950
+ typedef struct NVPW_MetricsContext_SetCounterData_Params
951
+ {
952
+ /// [in]
953
+ size_t structSize;
954
+ /// [in] assign to NULL
955
+ void* pPriv;
956
+ NVPA_MetricsContext* pMetricsContext;
957
+ const uint8_t* pCounterDataImage;
958
+ size_t rangeIndex;
959
+ NVPA_Bool isolated;
960
+ } NVPW_MetricsContext_SetCounterData_Params;
961
+ #define NVPW_MetricsContext_SetCounterData_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_SetCounterData_Params, isolated)
962
+
963
+ /// Sets data for subsequent evaluation calls.
964
+ /// Only one (CounterData, range, isolated) set of counters can be active at a time; subsequent calls will overwrite
965
+ /// previous calls' data.
966
+ NVPA_Status NVPW_MetricsContext_SetCounterData(NVPW_MetricsContext_SetCounterData_Params* pParams);
967
+
968
+ typedef struct NVPW_MetricsContext_SetUserData_Params
969
+ {
970
+ /// [in]
971
+ size_t structSize;
972
+ /// [in] assign to NULL
973
+ void* pPriv;
974
+ NVPA_MetricsContext* pMetricsContext;
975
+ /// duration in ns of user defined frame
976
+ double frameDuration;
977
+ /// duration in ns of user defined region
978
+ double regionDuration;
979
+ } NVPW_MetricsContext_SetUserData_Params;
980
+ #define NVPW_MetricsContext_SetUserData_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_SetUserData_Params, regionDuration)
981
+
982
+ /// Sets user data for subsequent evaluation calls.
983
+ NVPA_Status NVPW_MetricsContext_SetUserData(NVPW_MetricsContext_SetUserData_Params* pParams);
984
+
985
+ typedef struct NVPW_MetricsContext_EvaluateToGpuValues_Params
986
+ {
987
+ /// [in]
988
+ size_t structSize;
989
+ /// [in] assign to NULL
990
+ void* pPriv;
991
+ NVPA_MetricsContext* pMetricsContext;
992
+ size_t numMetrics;
993
+ const char* const* ppMetricNames;
994
+ /// [out]
995
+ double* pMetricValues;
996
+ } NVPW_MetricsContext_EvaluateToGpuValues_Params;
997
+ #define NVPW_MetricsContext_EvaluateToGpuValues_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_EvaluateToGpuValues_Params, pMetricValues)
998
+
999
+ /// Evaluate multiple metrics to retrieve their GPU values.
1000
+ NVPA_Status NVPW_MetricsContext_EvaluateToGpuValues(NVPW_MetricsContext_EvaluateToGpuValues_Params* pParams);
1001
+
1002
+ typedef struct NVPW_MetricsContext_GetMetricSuffix_Begin_Params
1003
+ {
1004
+ /// [in]
1005
+ size_t structSize;
1006
+ /// [in] assign to NULL
1007
+ void* pPriv;
1008
+ NVPA_MetricsContext* pMetricsContext;
1009
+ /// in: pointer to the metric name
1010
+ const char* pMetricName;
1011
+ /// out: number of elements in array ppSuffixes
1012
+ size_t numSuffixes;
1013
+ /// out: pointer to array of 'const char* pSuffixes'
1014
+ const char* const* ppSuffixes;
1015
+ /// in : if true, doesn't enumerate \<metric\>.peak_{burst, sustained}
1016
+ NVPA_Bool hidePeakSubMetrics;
1017
+ /// in : if true, doesn't enumerate \<metric\>.per_{active,elapsed,region,frame}_cycle
1018
+ NVPA_Bool hidePerCycleSubMetrics;
1019
+ /// in : if true, doesn't enumerate \<metric\>.pct_of_peak_{burst,sustained}_{active,elapsed,region,frame}
1020
+ NVPA_Bool hidePctOfPeakSubMetrics;
1021
+ /// in : if false, enumerate \<unit\>__throughput.pct_of_peak_sustained_elapsed even if hidePctOfPeakSubMetrics
1022
+ /// is true
1023
+ NVPA_Bool hidePctOfPeakSubMetricsOnThroughputs;
1024
+ } NVPW_MetricsContext_GetMetricSuffix_Begin_Params;
1025
+ #define NVPW_MetricsContext_GetMetricSuffix_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricSuffix_Begin_Params, hidePctOfPeakSubMetricsOnThroughputs)
1026
+
1027
+ /// Outputs (size, pointer) to an array of "const char* pSuffixes". The lifetime of the array is tied to
1028
+ /// MetricsContext.
1029
+ /// return all the suffixes the metric has. the possible suffixes include:
1030
+ /// * counter.{sum,avg,min,max}
1031
+ /// * throughput.{avg,min,max}
1032
+ /// * \<metric\>.peak_{burst, sustained}
1033
+ /// * \<metric\>.per_{active,elapsed,region,frame}_cycle
1034
+ /// * \<metric\>.pct_of_peak_{burst,sustained}_{active,elapsed,region,frame}
1035
+ /// * \<metric\>.per.{other, other_pct}
1036
+ NVPA_Status NVPW_MetricsContext_GetMetricSuffix_Begin(NVPW_MetricsContext_GetMetricSuffix_Begin_Params* pParams);
1037
+
1038
+ typedef struct NVPW_MetricsContext_GetMetricSuffix_End_Params
1039
+ {
1040
+ /// [in]
1041
+ size_t structSize;
1042
+ /// [in] assign to NULL
1043
+ void* pPriv;
1044
+ NVPA_MetricsContext* pMetricsContext;
1045
+ } NVPW_MetricsContext_GetMetricSuffix_End_Params;
1046
+ #define NVPW_MetricsContext_GetMetricSuffix_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricSuffix_End_Params, pMetricsContext)
1047
+
1048
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetMetricSuffix_Begin.
1049
+ NVPA_Status NVPW_MetricsContext_GetMetricSuffix_End(NVPW_MetricsContext_GetMetricSuffix_End_Params* pParams);
1050
+
1051
+ typedef struct NVPW_MetricsContext_GetMetricBaseNames_Begin_Params
1052
+ {
1053
+ /// [in]
1054
+ size_t structSize;
1055
+ /// [in] assign to NULL
1056
+ void* pPriv;
1057
+ NVPA_MetricsContext* pMetricsContext;
1058
+ /// out: number of elements in array pMetricsBaseNames
1059
+ size_t numMetricBaseNames;
1060
+ /// out: pointer to array of 'const char* pMetricsBaseName'
1061
+ const char* const* ppMetricBaseNames;
1062
+ } NVPW_MetricsContext_GetMetricBaseNames_Begin_Params;
1063
+ #define NVPW_MetricsContext_GetMetricBaseNames_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricBaseNames_Begin_Params, ppMetricBaseNames)
1064
+
1065
+ /// Outputs (size, pointer) to an array of "const char* ppMetricBaseNames". The lifetime of the array is tied to
1066
+ /// MetricsContext.
1067
+ /// return all the metric base names.
1068
+ NVPA_Status NVPW_MetricsContext_GetMetricBaseNames_Begin(NVPW_MetricsContext_GetMetricBaseNames_Begin_Params* pParams);
1069
+
1070
+ typedef struct NVPW_MetricsContext_GetMetricBaseNames_End_Params
1071
+ {
1072
+ /// [in]
1073
+ size_t structSize;
1074
+ /// [in] assign to NULL
1075
+ void* pPriv;
1076
+ NVPA_MetricsContext* pMetricsContext;
1077
+ } NVPW_MetricsContext_GetMetricBaseNames_End_Params;
1078
+ #define NVPW_MetricsContext_GetMetricBaseNames_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricBaseNames_End_Params, pMetricsContext)
1079
+
1080
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetMetricBaseNames_Begin.
1081
+ NVPA_Status NVPW_MetricsContext_GetMetricBaseNames_End(NVPW_MetricsContext_GetMetricBaseNames_End_Params* pParams);
1082
+
1083
+ /**
1084
+ * @}
1085
+ ******************************************************************************/
1086
+
1087
+ /***************************************************************************//**
1088
+ * @name Metrics Evaluator
1089
+ * @{
1090
+ */
1091
+
1092
+ typedef struct NVPW_MetricsEvaluator NVPW_MetricsEvaluator;
1093
+
1094
+ #ifndef NVPW_DIM_UNIT_DEFINED
1095
+ #define NVPW_DIM_UNIT_DEFINED
1096
+ typedef enum NVPW_DimUnitName
1097
+ {
1098
+ NVPW_DIM_UNIT_INVALID = 3518299157,
1099
+ NVPW_DIM_UNIT_UNITLESS = 2126137902,
1100
+ NVPW_DIM_UNIT_ATTRIBUTES = 3776338729,
1101
+ NVPW_DIM_UNIT_BYTES = 3797850191,
1102
+ NVPW_DIM_UNIT_CTAS = 1960564139,
1103
+ NVPW_DIM_UNIT_CTC_CYCLES = 2224883873,
1104
+ NVPW_DIM_UNIT_DRAM_CYCLES = 2650981327,
1105
+ NVPW_DIM_UNIT_FBP_CYCLES = 1785238957,
1106
+ NVPW_DIM_UNIT_FE_OPS = 2919159083,
1107
+ NVPW_DIM_UNIT_GPC_CYCLES = 1222631184,
1108
+ NVPW_DIM_UNIT_IDC_REQUESTS = 2012649669,
1109
+ NVPW_DIM_UNIT_INSTRUCTIONS = 1418625543,
1110
+ NVPW_DIM_UNIT_KILOBYTES = 1335980302,
1111
+ NVPW_DIM_UNIT_L1DATA_BANK_ACCESSES = 1479493682,
1112
+ NVPW_DIM_UNIT_L1DATA_BANK_CONFLICTS = 3433170787,
1113
+ NVPW_DIM_UNIT_L1TEX_REQUESTS = 1306473767,
1114
+ NVPW_DIM_UNIT_L1TEX_TAGS = 26573010,
1115
+ NVPW_DIM_UNIT_L1TEX_WAVEFRONTS = 129373765,
1116
+ NVPW_DIM_UNIT_L2_REQUESTS = 1143695106,
1117
+ NVPW_DIM_UNIT_L2_SECTORS = 3424101564,
1118
+ NVPW_DIM_UNIT_L2_TAGS = 3755612781,
1119
+ NVPW_DIM_UNIT_MCC_CYCLES = 1826685787,
1120
+ NVPW_DIM_UNIT_NANOSECONDS = 3047500672,
1121
+ NVPW_DIM_UNIT_NVLRX_CYCLES = 4059934930,
1122
+ NVPW_DIM_UNIT_NVLTX_CYCLES = 1814350488,
1123
+ NVPW_DIM_UNIT_PCIE_CYCLES = 1230450943,
1124
+ NVPW_DIM_UNIT_PERCENT = 1284354694,
1125
+ NVPW_DIM_UNIT_PIXELS = 4227616663,
1126
+ NVPW_DIM_UNIT_PIXEL_SHADER_BARRIERS = 3705502518,
1127
+ NVPW_DIM_UNIT_PRIMITIVES = 2373084002,
1128
+ NVPW_DIM_UNIT_QUADS = 1539753497,
1129
+ NVPW_DIM_UNIT_REGISTERS = 2837260947,
1130
+ NVPW_DIM_UNIT_SAMPLES = 746046551,
1131
+ NVPW_DIM_UNIT_SECONDS = 1164825258,
1132
+ NVPW_DIM_UNIT_SYS_CYCLES = 3310821688,
1133
+ NVPW_DIM_UNIT_TEXELS = 1293214069,
1134
+ NVPW_DIM_UNIT_THREADS = 164261907,
1135
+ NVPW_DIM_UNIT_VERTICES = 1873662209,
1136
+ NVPW_DIM_UNIT_WARPS = 97951949,
1137
+ NVPW_DIM_UNIT_WORKLOADS = 1728142656
1138
+ } NVPW_DimUnitName;
1139
+ #endif //NVPW_DIM_UNIT_DEFINED
1140
+
1141
+ #ifndef NVPW_HW_UNIT_DEFINED
1142
+ #define NVPW_HW_UNIT_DEFINED
1143
+ typedef enum NVPW_HwUnit
1144
+ {
1145
+ NVPW_HW_UNIT_INVALID = 3498035701,
1146
+ NVPW_HW_UNIT_CROP = 2872137846,
1147
+ NVPW_HW_UNIT_DRAM = 1662616918,
1148
+ NVPW_HW_UNIT_DRAMC = 1401232876,
1149
+ NVPW_HW_UNIT_FBP = 2947194306,
1150
+ NVPW_HW_UNIT_FBPA = 690045803,
1151
+ NVPW_HW_UNIT_FE = 2204924321,
1152
+ NVPW_HW_UNIT_GPC = 1911735839,
1153
+ NVPW_HW_UNIT_GPU = 1014363534,
1154
+ NVPW_HW_UNIT_GR = 2933618517,
1155
+ NVPW_HW_UNIT_IDC = 842765289,
1156
+ NVPW_HW_UNIT_L1TEX = 893940957,
1157
+ NVPW_HW_UNIT_LTS = 2333266697,
1158
+ NVPW_HW_UNIT_MCC = 3980130194,
1159
+ NVPW_HW_UNIT_NVLRX = 3091684901,
1160
+ NVPW_HW_UNIT_NVLTX = 869679659,
1161
+ NVPW_HW_UNIT_PCIE = 3433264174,
1162
+ NVPW_HW_UNIT_PDA = 345193251,
1163
+ NVPW_HW_UNIT_PES = 804128425,
1164
+ NVPW_HW_UNIT_PROP = 3339255507,
1165
+ NVPW_HW_UNIT_RASTER = 187932504,
1166
+ NVPW_HW_UNIT_SM = 724224710,
1167
+ NVPW_HW_UNIT_SMSP = 2837616917,
1168
+ NVPW_HW_UNIT_SYS = 768990063,
1169
+ NVPW_HW_UNIT_TPC = 1889024613,
1170
+ NVPW_HW_UNIT_VAF = 753670509,
1171
+ NVPW_HW_UNIT_VPC = 275561583,
1172
+ NVPW_HW_UNIT_ZROP = 979500456
1173
+ } NVPW_HwUnit;
1174
+ #endif //NVPW_HW_UNIT_DEFINED
1175
+
1176
+ typedef enum NVPW_RollupOp
1177
+ {
1178
+ NVPW_ROLLUP_OP_AVG = 0,
1179
+ NVPW_ROLLUP_OP_MAX,
1180
+ NVPW_ROLLUP_OP_MIN,
1181
+ NVPW_ROLLUP_OP_SUM,
1182
+ NVPW_ROLLUP_OP__COUNT
1183
+ } NVPW_RollupOp;
1184
+
1185
+ typedef enum NVPW_MetricType
1186
+ {
1187
+ NVPW_METRIC_TYPE_COUNTER = 0,
1188
+ NVPW_METRIC_TYPE_RATIO,
1189
+ NVPW_METRIC_TYPE_THROUGHPUT,
1190
+ NVPW_METRIC_TYPE__COUNT
1191
+ } NVPW_MetricType;
1192
+
1193
+ typedef enum NVPW_Submetric
1194
+ {
1195
+ NVPW_SUBMETRIC_NONE = 0,
1196
+ NVPW_SUBMETRIC_PEAK_SUSTAINED = 1,
1197
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_ACTIVE = 2,
1198
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_ACTIVE_PER_SECOND = 3,
1199
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_ELAPSED = 4,
1200
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_ELAPSED_PER_SECOND = 5,
1201
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_FRAME = 6,
1202
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_FRAME_PER_SECOND = 7,
1203
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_REGION = 8,
1204
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_REGION_PER_SECOND = 9,
1205
+ NVPW_SUBMETRIC_PER_CYCLE_ACTIVE = 10,
1206
+ NVPW_SUBMETRIC_PER_CYCLE_ELAPSED = 11,
1207
+ NVPW_SUBMETRIC_PER_CYCLE_IN_FRAME = 12,
1208
+ NVPW_SUBMETRIC_PER_CYCLE_IN_REGION = 13,
1209
+ NVPW_SUBMETRIC_PER_SECOND = 14,
1210
+ NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_ACTIVE = 15,
1211
+ NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_ELAPSED = 16,
1212
+ NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_FRAME = 17,
1213
+ NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_REGION = 18,
1214
+ NVPW_SUBMETRIC_MAX_RATE = 19,
1215
+ NVPW_SUBMETRIC_PCT = 20,
1216
+ NVPW_SUBMETRIC_RATIO = 21,
1217
+ NVPW_SUBMETRIC__COUNT
1218
+ } NVPW_Submetric;
1219
+
1220
+ typedef struct NVPW_MetricEvalRequest
1221
+ {
1222
+ /// the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames'
1223
+ size_t metricIndex;
1224
+ /// one of 'NVPW_MetricType'
1225
+ uint8_t metricType;
1226
+ /// one of 'NVPW_RollupOp', required for Counter and Throughput, doesn't apply to Ratio
1227
+ uint8_t rollupOp;
1228
+ /// one of 'NVPW_Submetric', required for Ratio and Throughput, optional for Counter
1229
+ uint16_t submetric;
1230
+ } NVPW_MetricEvalRequest;
1231
+ #define NVPW_MetricEvalRequest_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricEvalRequest, submetric)
1232
+
1233
+ typedef struct NVPW_DimUnitFactor
1234
+ {
1235
+ /// one of 'NVPW_DimUnitName'
1236
+ uint32_t dimUnit;
1237
+ int8_t exponent;
1238
+ } NVPW_DimUnitFactor;
1239
+ #define NVPW_DimUnitFactor_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_DimUnitFactor, exponent)
1240
+
1241
+ typedef struct NVPW_MetricsEvaluator_Destroy_Params
1242
+ {
1243
+ /// [in]
1244
+ size_t structSize;
1245
+ /// [in] assign to NULL
1246
+ void* pPriv;
1247
+ /// [in]
1248
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1249
+ } NVPW_MetricsEvaluator_Destroy_Params;
1250
+ #define NVPW_MetricsEvaluator_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_Destroy_Params, pMetricsEvaluator)
1251
+
1252
+ NVPA_Status NVPW_MetricsEvaluator_Destroy(NVPW_MetricsEvaluator_Destroy_Params* pParams);
1253
+
1254
+ typedef struct NVPW_MetricsEvaluator_GetMetricNames_Params
1255
+ {
1256
+ /// [in]
1257
+ size_t structSize;
1258
+ /// [in] assign to NULL
1259
+ void* pPriv;
1260
+ /// [in]
1261
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1262
+ /// [in] one of 'NVPW_MetricType'
1263
+ uint8_t metricType;
1264
+ /// [out]
1265
+ const char* pMetricNames;
1266
+ /// [out]
1267
+ const size_t* pMetricNameBeginIndices;
1268
+ /// [out]
1269
+ size_t numMetrics;
1270
+ } NVPW_MetricsEvaluator_GetMetricNames_Params;
1271
+ #define NVPW_MetricsEvaluator_GetMetricNames_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricNames_Params, numMetrics)
1272
+
1273
+ NVPA_Status NVPW_MetricsEvaluator_GetMetricNames(NVPW_MetricsEvaluator_GetMetricNames_Params* pParams);
1274
+
1275
+ typedef struct NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params
1276
+ {
1277
+ /// [in]
1278
+ size_t structSize;
1279
+ /// [in] assign to NULL
1280
+ void* pPriv;
1281
+ /// [in]
1282
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1283
+ /// [in] can be either a base metric or a metric
1284
+ const char* pMetricName;
1285
+ /// [out] one of 'NVPW_MetricType'
1286
+ uint8_t metricType;
1287
+ /// [out] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames'
1288
+ size_t metricIndex;
1289
+ } NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params;
1290
+ #define NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params, metricIndex)
1291
+
1292
+ NVPA_Status NVPW_MetricsEvaluator_GetMetricTypeAndIndex(NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params* pParams);
1293
+
1294
+ typedef struct NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params
1295
+ {
1296
+ /// [in]
1297
+ size_t structSize;
1298
+ /// [in] assign to NULL
1299
+ void* pPriv;
1300
+ /// [in]
1301
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1302
+ /// [in]
1303
+ const char* pMetricName;
1304
+ /// [inout] 'pMetricEvalRequest' is in, '*pMetricEvalRequest' is out
1305
+ struct NVPW_MetricEvalRequest* pMetricEvalRequest;
1306
+ /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE'
1307
+ size_t metricEvalRequestStructSize;
1308
+ } NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params;
1309
+ #define NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params, metricEvalRequestStructSize)
1310
+
1311
+ NVPA_Status NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest(NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params* pParams);
1312
+
1313
+ typedef struct NVPW_MetricsEvaluator_HwUnitToString_Params
1314
+ {
1315
+ /// [in]
1316
+ size_t structSize;
1317
+ /// [in] assign to NULL
1318
+ void* pPriv;
1319
+ /// [in]
1320
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1321
+ /// [in] one of 'NVPW_HwUnit'
1322
+ uint32_t hwUnit;
1323
+ /// [out]
1324
+ const char* pHwUnitName;
1325
+ } NVPW_MetricsEvaluator_HwUnitToString_Params;
1326
+ #define NVPW_MetricsEvaluator_HwUnitToString_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_HwUnitToString_Params, pHwUnitName)
1327
+
1328
+ NVPA_Status NVPW_MetricsEvaluator_HwUnitToString(NVPW_MetricsEvaluator_HwUnitToString_Params* pParams);
1329
+
1330
+ typedef struct NVPW_MetricsEvaluator_GetCounterProperties_Params
1331
+ {
1332
+ /// [in]
1333
+ size_t structSize;
1334
+ /// [in] assign to NULL
1335
+ void* pPriv;
1336
+ /// [in]
1337
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1338
+ /// [in] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames'
1339
+ size_t counterIndex;
1340
+ /// [out]
1341
+ const char* pDescription;
1342
+ /// [out] one of 'NVPW_HwUnit'
1343
+ uint32_t hwUnit;
1344
+ } NVPW_MetricsEvaluator_GetCounterProperties_Params;
1345
+ #define NVPW_MetricsEvaluator_GetCounterProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetCounterProperties_Params, hwUnit)
1346
+
1347
+ NVPA_Status NVPW_MetricsEvaluator_GetCounterProperties(NVPW_MetricsEvaluator_GetCounterProperties_Params* pParams);
1348
+
1349
+ typedef struct NVPW_MetricsEvaluator_GetRatioMetricProperties_Params
1350
+ {
1351
+ /// [in]
1352
+ size_t structSize;
1353
+ /// [in] assign to NULL
1354
+ void* pPriv;
1355
+ /// [in]
1356
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1357
+ /// [in] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames'
1358
+ size_t ratioMetricIndex;
1359
+ /// [out]
1360
+ const char* pDescription;
1361
+ /// [out]
1362
+ uint64_t hwUnit;
1363
+ } NVPW_MetricsEvaluator_GetRatioMetricProperties_Params;
1364
+ #define NVPW_MetricsEvaluator_GetRatioMetricProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetRatioMetricProperties_Params, hwUnit)
1365
+
1366
+ NVPA_Status NVPW_MetricsEvaluator_GetRatioMetricProperties(NVPW_MetricsEvaluator_GetRatioMetricProperties_Params* pParams);
1367
+
1368
+ typedef struct NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params
1369
+ {
1370
+ /// [in]
1371
+ size_t structSize;
1372
+ /// [in] assign to NULL
1373
+ void* pPriv;
1374
+ /// [in]
1375
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1376
+ /// [in] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames'
1377
+ size_t throughputMetricIndex;
1378
+ /// [out]
1379
+ const char* pDescription;
1380
+ /// [out]
1381
+ uint32_t hwUnit;
1382
+ /// [out] number of constituent counters for the throughput metric
1383
+ size_t numCounters;
1384
+ /// [out] metric indices as in 'NVPW_MetricsEvaluator_GetMetricNames', valid if 'numCounters' > 0, otherwise
1385
+ /// returned as nullptr
1386
+ const size_t* pCounterIndices;
1387
+ /// [out] number of constituent sub-throughputs for the throughput metric
1388
+ size_t numSubThroughputs;
1389
+ /// [out] metric indices as in 'NVPW_MetricsEvaluator_GetMetricNames', valid if 'numSubThroughputs' > 0,
1390
+ /// otherwise returned as nullptr
1391
+ const size_t* pSubThroughputIndices;
1392
+ } NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params;
1393
+ #define NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params, pSubThroughputIndices)
1394
+
1395
+ NVPA_Status NVPW_MetricsEvaluator_GetThroughputMetricProperties(NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params* pParams);
1396
+
1397
+ typedef struct NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params
1398
+ {
1399
+ /// [in]
1400
+ size_t structSize;
1401
+ /// [in] assign to NULL
1402
+ void* pPriv;
1403
+ /// [in]
1404
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1405
+ /// [in] one of 'NVPW_MetricType'
1406
+ uint8_t metricType;
1407
+ /// [out] an array of 'NVPW_Submetric'
1408
+ const uint16_t* pSupportedSubmetrics;
1409
+ /// [out]
1410
+ size_t numSupportedSubmetrics;
1411
+ } NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params;
1412
+ #define NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params, numSupportedSubmetrics)
1413
+
1414
+ NVPA_Status NVPW_MetricsEvaluator_GetSupportedSubmetrics(NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params* pParams);
1415
+
1416
+ typedef struct NVPW_MetricsEvaluator_GetMetricRawDependencies_Params
1417
+ {
1418
+ /// [in]
1419
+ size_t structSize;
1420
+ /// [in] assign to NULL
1421
+ void* pPriv;
1422
+ /// [in]
1423
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1424
+ /// [in]
1425
+ const struct NVPW_MetricEvalRequest* pMetricEvalRequests;
1426
+ /// [in]
1427
+ size_t numMetricEvalRequests;
1428
+ /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE'
1429
+ size_t metricEvalRequestStructSize;
1430
+ /// [in] set to sizeof('NVPW_MetricEvalRequest')
1431
+ size_t metricEvalRequestStrideSize;
1432
+ /// [inout] 'ppRawDependencies' is in, '*ppRawDependencies' is out
1433
+ const char** ppRawDependencies;
1434
+ /// [inout] if 'ppRawDependencies' is NULL, number of raw dependencies available will be returned; otherwise it
1435
+ /// should be set to the number of elements allocated for 'ppRawDependencies', and on return, it will be
1436
+ /// overwritten by number of elements copied to 'ppRawDependencies'
1437
+ size_t numRawDependencies;
1438
+ /// [inout] 'ppOptionalRawDependencies' is in, '*ppOptionalRawDependencies' is out
1439
+ const char** ppOptionalRawDependencies;
1440
+ /// [inout] if 'ppOptionalRawDependencies' is NULL, number of optional raw dependencies available will be
1441
+ /// returned; otherwise it should be set to the number of elements allocated for 'ppOptionalRawDependencies',
1442
+ /// and on return, it will be overwritten by number of elements copied to 'ppOptionalRawDependencies'
1443
+ size_t numOptionalRawDependencies;
1444
+ } NVPW_MetricsEvaluator_GetMetricRawDependencies_Params;
1445
+ #define NVPW_MetricsEvaluator_GetMetricRawDependencies_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricRawDependencies_Params, numOptionalRawDependencies)
1446
+
1447
+ NVPA_Status NVPW_MetricsEvaluator_GetMetricRawDependencies(NVPW_MetricsEvaluator_GetMetricRawDependencies_Params* pParams);
1448
+
1449
+ typedef struct NVPW_MetricsEvaluator_DimUnitToString_Params
1450
+ {
1451
+ /// [in]
1452
+ size_t structSize;
1453
+ /// [in] assign to NULL
1454
+ void* pPriv;
1455
+ /// [in]
1456
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1457
+ /// [in] one of 'NVPW_DimUnitName'
1458
+ uint32_t dimUnit;
1459
+ /// [out]
1460
+ const char* pSingularName;
1461
+ /// [out]
1462
+ const char* pPluralName;
1463
+ } NVPW_MetricsEvaluator_DimUnitToString_Params;
1464
+ #define NVPW_MetricsEvaluator_DimUnitToString_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_DimUnitToString_Params, pPluralName)
1465
+
1466
+ NVPA_Status NVPW_MetricsEvaluator_DimUnitToString(NVPW_MetricsEvaluator_DimUnitToString_Params* pParams);
1467
+
1468
+ typedef struct NVPW_MetricsEvaluator_GetMetricDimUnits_Params
1469
+ {
1470
+ /// [in]
1471
+ size_t structSize;
1472
+ /// [in] assign to NULL
1473
+ void* pPriv;
1474
+ /// [in]
1475
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1476
+ /// [in]
1477
+ const struct NVPW_MetricEvalRequest* pMetricEvalRequest;
1478
+ /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE'
1479
+ size_t metricEvalRequestStructSize;
1480
+ /// [inout] 'pDimUnits' is in, '*pDimUnits' is out
1481
+ NVPW_DimUnitFactor* pDimUnits;
1482
+ /// [inout] if 'pDimUnits' is NULL, number of dim-units available will be returned; otherwise it should be set
1483
+ /// to the number of elements allocated for 'pDimUnits', and on return, it will be overwritten by number of
1484
+ /// elements copied to 'pDimUnits'
1485
+ size_t numDimUnits;
1486
+ /// [in] set to 'NVPW_DimUnitFactor_STRUCT_SIZE'
1487
+ size_t dimUnitFactorStructSize;
1488
+ } NVPW_MetricsEvaluator_GetMetricDimUnits_Params;
1489
+ #define NVPW_MetricsEvaluator_GetMetricDimUnits_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricDimUnits_Params, dimUnitFactorStructSize)
1490
+
1491
+ NVPA_Status NVPW_MetricsEvaluator_GetMetricDimUnits(NVPW_MetricsEvaluator_GetMetricDimUnits_Params* pParams);
1492
+
1493
+ typedef struct NVPW_MetricsEvaluator_SetUserData_Params
1494
+ {
1495
+ /// [in]
1496
+ size_t structSize;
1497
+ /// [in] assign to NULL
1498
+ void* pPriv;
1499
+ /// [in]
1500
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1501
+ /// [in] duration in ns of user defined frame
1502
+ double frameDuration;
1503
+ /// [in] duration in ns of user defined region
1504
+ double regionDuration;
1505
+ /// [in]
1506
+ NVPA_Bool isolated;
1507
+ } NVPW_MetricsEvaluator_SetUserData_Params;
1508
+ #define NVPW_MetricsEvaluator_SetUserData_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_SetUserData_Params, isolated)
1509
+
1510
+ NVPA_Status NVPW_MetricsEvaluator_SetUserData(NVPW_MetricsEvaluator_SetUserData_Params* pParams);
1511
+
1512
+ typedef struct NVPW_MetricsEvaluator_EvaluateToGpuValues_Params
1513
+ {
1514
+ /// [in]
1515
+ size_t structSize;
1516
+ /// [in] assign to NULL
1517
+ void* pPriv;
1518
+ /// [in]
1519
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1520
+ /// [in]
1521
+ const struct NVPW_MetricEvalRequest* pMetricEvalRequests;
1522
+ /// [in]
1523
+ size_t numMetricEvalRequests;
1524
+ /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE'
1525
+ size_t metricEvalRequestStructSize;
1526
+ /// [in] set to sizeof('NVPW_MetricEvalRequest')
1527
+ size_t metricEvalRequestStrideSize;
1528
+ /// [in]
1529
+ const uint8_t* pCounterDataImage;
1530
+ /// [in]
1531
+ size_t counterDataImageSize;
1532
+ /// [in]
1533
+ size_t rangeIndex;
1534
+ /// [in]
1535
+ NVPA_Bool isolated;
1536
+ /// [inout] 'pMetricValues' is in, '*pMetricValues' is out
1537
+ double* pMetricValues;
1538
+ } NVPW_MetricsEvaluator_EvaluateToGpuValues_Params;
1539
+ #define NVPW_MetricsEvaluator_EvaluateToGpuValues_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_EvaluateToGpuValues_Params, pMetricValues)
1540
+
1541
+ NVPA_Status NVPW_MetricsEvaluator_EvaluateToGpuValues(NVPW_MetricsEvaluator_EvaluateToGpuValues_Params* pParams);
1542
+
1543
+ typedef struct NVPW_MetricsEvaluator_SetDeviceAttributes_Params
1544
+ {
1545
+ /// [in]
1546
+ size_t structSize;
1547
+ /// [in] assign to NULL
1548
+ void* pPriv;
1549
+ /// [in]
1550
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1551
+ /// [in]
1552
+ const uint8_t* pCounterDataImage;
1553
+ /// [in]
1554
+ size_t counterDataImageSize;
1555
+ } NVPW_MetricsEvaluator_SetDeviceAttributes_Params;
1556
+ #define NVPW_MetricsEvaluator_SetDeviceAttributes_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_SetDeviceAttributes_Params, counterDataImageSize)
1557
+
1558
+ NVPA_Status NVPW_MetricsEvaluator_SetDeviceAttributes(NVPW_MetricsEvaluator_SetDeviceAttributes_Params* pParams);
1559
+
1560
+ /**
1561
+ * @}
1562
+ ******************************************************************************/
1563
+
1564
+
1565
+ #endif // NVPERF_HOST_API_DEFINED
1566
+
1567
+
1568
+
1569
+
1570
+ #ifdef __cplusplus
1571
+ } // extern "C"
1572
+ #endif
1573
+
1574
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
1575
+ #pragma GCC visibility pop
1576
+ #endif
1577
+
1578
+ #endif // NVPERF_HOST_H
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_target.h ADDED
@@ -0,0 +1,597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NVPERF_TARGET_H
2
+ #define NVPERF_TARGET_H
3
+
4
+ /*
5
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
6
+ *
7
+ * NOTICE TO USER:
8
+ *
9
+ * This source code is subject to NVIDIA ownership rights under U.S. and
10
+ * international Copyright laws.
11
+ *
12
+ * This software and the information contained herein is PROPRIETARY and
13
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
14
+ * of a form of NVIDIA software license agreement.
15
+ *
16
+ * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
17
+ * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
18
+ * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
19
+ * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
20
+ * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
21
+ * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
22
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
23
+ * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
24
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
25
+ * OR PERFORMANCE OF THIS SOURCE CODE.
26
+ *
27
+ * U.S. Government End Users. This source code is a "commercial item" as
28
+ * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
29
+ * "commercial computer software" and "commercial computer software
30
+ * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
31
+ * and is provided to the U.S. Government only as a commercial end item.
32
+ * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
33
+ * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
34
+ * source code with only those rights set forth herein.
35
+ *
36
+ * Any use of this source code in individual and commercial software must
37
+ * include, in the user documentation and internal comments to the code,
38
+ * the above Disclaimer and U.S. Government End Users Notice.
39
+ */
40
+
41
+ #include <stddef.h>
42
+ #include <stdint.h>
43
+ #include "nvperf_common.h"
44
+
45
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
46
+ #pragma GCC visibility push(default)
47
+ #if !defined(NVPW_LOCAL)
48
+ #define NVPW_LOCAL __attribute__ ((visibility ("hidden")))
49
+ #endif
50
+ #else
51
+ #if !defined(NVPW_LOCAL)
52
+ #define NVPW_LOCAL
53
+ #endif
54
+ #endif
55
+
56
+ #ifdef __cplusplus
57
+ extern "C" {
58
+ #endif
59
+
60
+ /**
61
+ * @file nvperf_target.h
62
+ */
63
+
64
+ #ifndef NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_DEFINED
65
+ #define NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_DEFINED
66
+ /// GPU architecture support level
67
+ typedef enum NVPW_GpuArchitectureSupportLevel
68
+ {
69
+ NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_UNKNOWN = 0,
70
+ NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_UNSUPPORTED,
71
+ NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_SUPPORTED
72
+ } NVPW_GpuArchitectureSupportLevel;
73
+ #endif //NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_DEFINED
74
+
75
+ #ifndef NVPW_SLI_SUPPORT_LEVEL_DEFINED
76
+ #define NVPW_SLI_SUPPORT_LEVEL_DEFINED
77
+ /// SLI configuration support level
78
+ typedef enum NVPW_SliSupportLevel
79
+ {
80
+ NVPW_SLI_SUPPORT_LEVEL_UNKNOWN = 0,
81
+ NVPW_SLI_SUPPORT_LEVEL_UNSUPPORTED,
82
+ /// Only Non-SLI configurations are supported.
83
+ NVPW_SLI_SUPPORT_LEVEL_SUPPORTED_NON_SLI_CONFIGURATION
84
+ } NVPW_SliSupportLevel;
85
+ #endif //NVPW_SLI_SUPPORT_LEVEL_DEFINED
86
+
87
+ #ifndef NVPW_VGPU_SUPPORT_LEVEL_DEFINED
88
+ #define NVPW_VGPU_SUPPORT_LEVEL_DEFINED
89
+ /// Virtualized GPU configuration support level
90
+ typedef enum NVPW_VGpuSupportLevel
91
+ {
92
+ NVPW_VGPU_SUPPORT_LEVEL_UNKNOWN = 0,
93
+ NVPW_VGPU_SUPPORT_LEVEL_UNSUPPORTED,
94
+ /// Supported but not allowed by system admin.
95
+ NVPW_VGPU_SUPPORT_LEVEL_SUPPORTED_DISALLOWED,
96
+ NVPW_VGPU_SUPPORT_LEVEL_SUPPORTED_ALLOWED,
97
+ NVPW_VGPU_SUPPORT_LEVEL_SUPPORTED_NON_VGPU_CONFIGURATION
98
+ } NVPW_VGpuSupportLevel;
99
+ #endif //NVPW_VGPU_SUPPORT_LEVEL_DEFINED
100
+
101
+ #ifndef NVPW_CONF_COMPUTE_SUPPORT_LEVEL_DEFINED
102
+ #define NVPW_CONF_COMPUTE_SUPPORT_LEVEL_DEFINED
103
+ /// Confidential Compute mode support level
104
+ typedef enum NVPW_ConfidentialComputeSupportLevel
105
+ {
106
+ NVPW_CONF_COMPUTE_SUPPORT_LEVEL_UNKNOWN = 0,
107
+ NVPW_CONF_COMPUTE_SUPPORT_LEVEL_UNSUPPORTED,
108
+ NVPW_CONF_COMPUTE_SUPPORT_LEVEL_SUPPORTED_NON_CONF_COMPUTE_CONFIGURATION,
109
+ NVPW_CONF_COMPUTE_SUPPORT_LEVEL_SUPPORTED_CONF_COMPUTE_DEVTOOLS_MODE
110
+ } NVPW_ConfidentialComputeSupportLevel;
111
+ #endif //NVPW_CONF_COMPUTE_SUPPORT_LEVEL_DEFINED
112
+
113
+ #ifndef NVPW_CMP_SUPPORT_LEVEL_DEFINED
114
+ #define NVPW_CMP_SUPPORT_LEVEL_DEFINED
115
+ /// CMP support level
116
+ typedef enum NVPW_CmpSupportLevel
117
+ {
118
+ NVPW_CMP_SUPPORT_LEVEL_UNKNOWN = 0,
119
+ NVPW_CMP_SUPPORT_LEVEL_UNSUPPORTED,
120
+ NVPW_CMP_SUPPORT_LEVEL_SUPPORTED_NON_CMP_CONFIGURATON
121
+ } NVPW_CmpSupportLevel;
122
+ #endif //NVPW_CMP_SUPPORT_LEVEL_DEFINED
123
+
124
+ #ifndef NVPW_WSL_SUPPORT_LEVEL_DEFINED
125
+ #define NVPW_WSL_SUPPORT_LEVEL_DEFINED
126
+ /// WSL support level
127
+ typedef enum NVPW_WslSupportLevel
128
+ {
129
+ NVPW_WSL_SUPPORT_LEVEL_UNKNOWN = 0,
130
+ NVPW_WSL_SUPPORT_LEVEL_UNSUPPORTED_INSUFFICIENT_DRIVER_VERSION,
131
+ NVPW_WSL_SUPPORT_LEVEL_SUPPORTED,
132
+ NVPW_WSL_SUPPORT_LEVEL_SUPPORTED_NON_WSL_CONFIGURATION
133
+ } NVPW_WslSupportLevel;
134
+ #endif //NVPW_WSL_SUPPORT_LEVEL_DEFINED
135
+
136
+ typedef struct NVPW_InitializeTarget_Params
137
+ {
138
+ /// [in]
139
+ size_t structSize;
140
+ /// [in] assign to NULL
141
+ void* pPriv;
142
+ } NVPW_InitializeTarget_Params;
143
+ #define NVPW_InitializeTarget_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_InitializeTarget_Params, pPriv)
144
+
145
+ /// Load the target library.
146
+ NVPA_Status NVPW_InitializeTarget(NVPW_InitializeTarget_Params* pParams);
147
+
148
+ typedef struct NVPW_GetDeviceCount_Params
149
+ {
150
+ /// [in]
151
+ size_t structSize;
152
+ /// [in] assign to NULL
153
+ void* pPriv;
154
+ size_t numDevices;
155
+ } NVPW_GetDeviceCount_Params;
156
+ #define NVPW_GetDeviceCount_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_GetDeviceCount_Params, numDevices)
157
+
158
+ NVPA_Status NVPW_GetDeviceCount(NVPW_GetDeviceCount_Params* pParams);
159
+
160
+ typedef struct NVPW_Device_GetNames_Params
161
+ {
162
+ /// [in]
163
+ size_t structSize;
164
+ /// [in] assign to NULL
165
+ void* pPriv;
166
+ size_t deviceIndex;
167
+ const char* pDeviceName;
168
+ const char* pChipName;
169
+ } NVPW_Device_GetNames_Params;
170
+ #define NVPW_Device_GetNames_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Device_GetNames_Params, pChipName)
171
+
172
+ NVPA_Status NVPW_Device_GetNames(NVPW_Device_GetNames_Params* pParams);
173
+
174
+ typedef struct NVPW_PciBusId
175
+ {
176
+ /// The PCI domain on which the device bus resides.
177
+ uint32_t domain;
178
+ /// The bus on which the device resides.
179
+ uint16_t bus;
180
+ /// device ID.
181
+ uint16_t device;
182
+ } NVPW_PciBusId;
183
+ #define NVPW_PciBusId_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PciBusId, device)
184
+
185
+ typedef struct NVPW_Device_GetPciBusIds_Params
186
+ {
187
+ /// [in]
188
+ size_t structSize;
189
+ /// [in] assign to NULL
190
+ void* pPriv;
191
+ /// [in] caller-allocated array of NVPW_PciBusId, indexed by NVPW deviceIndex
192
+ NVPW_PciBusId* pBusIds;
193
+ /// [in] size of the pBusIDs array; use result from NVPW_GetDeviceCount
194
+ size_t numDevices;
195
+ } NVPW_Device_GetPciBusIds_Params;
196
+ #define NVPW_Device_GetPciBusIds_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Device_GetPciBusIds_Params, numDevices)
197
+
198
+ NVPA_Status NVPW_Device_GetPciBusIds(NVPW_Device_GetPciBusIds_Params* pParams);
199
+
200
+
201
+ #define NVPW_DEVICE_MIG_GPU_INSTANCE_ID_INVALID 0xFFFFFFFFu
202
+ #define NVPW_DEVICE_MIG_GPU_INSTANCE_ID_FULLCHIP 0xFFFFFFFEu
203
+
204
+
205
+ typedef struct NVPW_Device_GetMigAttributes_Params
206
+ {
207
+ /// [in]
208
+ size_t structSize;
209
+ /// [in] assign to NULL
210
+ void* pPriv;
211
+ /// [in]
212
+ size_t deviceIndex;
213
+ /// [out]
214
+ NVPA_Bool isMigPartition;
215
+ /// [out]
216
+ uint32_t gpuInstanceId;
217
+ /// [out]
218
+ uint32_t computeInstanceId;
219
+ } NVPW_Device_GetMigAttributes_Params;
220
+ #define NVPW_Device_GetMigAttributes_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Device_GetMigAttributes_Params, computeInstanceId)
221
+
222
+ NVPA_Status NVPW_Device_GetMigAttributes(NVPW_Device_GetMigAttributes_Params* pParams);
223
+
224
+ typedef struct NVPW_Adapter_GetDeviceIndex_Params
225
+ {
226
+ /// [in]
227
+ size_t structSize;
228
+ /// [in] assign to NULL
229
+ void* pPriv;
230
+ /// [in]
231
+ struct IDXGIAdapter* pAdapter;
232
+ /// [in]
233
+ size_t sliIndex;
234
+ /// [out]
235
+ size_t deviceIndex;
236
+ } NVPW_Adapter_GetDeviceIndex_Params;
237
+ #define NVPW_Adapter_GetDeviceIndex_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Adapter_GetDeviceIndex_Params, deviceIndex)
238
+
239
+ NVPA_Status NVPW_Adapter_GetDeviceIndex(NVPW_Adapter_GetDeviceIndex_Params* pParams);
240
+
241
+ typedef struct NVPW_CounterData_GetNumRanges_Params
242
+ {
243
+ /// [in]
244
+ size_t structSize;
245
+ /// [in] assign to NULL
246
+ void* pPriv;
247
+ const uint8_t* pCounterDataImage;
248
+ size_t numRanges;
249
+ } NVPW_CounterData_GetNumRanges_Params;
250
+ #define NVPW_CounterData_GetNumRanges_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_GetNumRanges_Params, numRanges)
251
+
252
+ NVPA_Status NVPW_CounterData_GetNumRanges(NVPW_CounterData_GetNumRanges_Params* pParams);
253
+
254
+ typedef struct NVPW_CounterData_GetChipName_Params
255
+ {
256
+ /// [in]
257
+ size_t structSize;
258
+ /// [in] assign to NULL
259
+ void* pPriv;
260
+ /// [in]
261
+ const uint8_t* pCounterDataImage;
262
+ /// [in]
263
+ size_t counterDataImageSize;
264
+ /// [out]
265
+ const char* pChipName;
266
+ } NVPW_CounterData_GetChipName_Params;
267
+ #define NVPW_CounterData_GetChipName_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_GetChipName_Params, pChipName)
268
+
269
+ NVPA_Status NVPW_CounterData_GetChipName(NVPW_CounterData_GetChipName_Params* pParams);
270
+
271
+ typedef struct NVPW_Config_GetNumPasses_Params
272
+ {
273
+ /// [in]
274
+ size_t structSize;
275
+ /// [in] assign to NULL
276
+ void* pPriv;
277
+ /// [in]
278
+ const uint8_t* pConfig;
279
+ /// [out]
280
+ size_t numPipelinedPasses;
281
+ /// [out]
282
+ size_t numIsolatedPasses;
283
+ } NVPW_Config_GetNumPasses_Params;
284
+ #define NVPW_Config_GetNumPasses_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Config_GetNumPasses_Params, numIsolatedPasses)
285
+
286
+ /// Total num passes = numPipelinedPasses + numIsolatedPasses * numNestingLevels
287
+ NVPA_Status NVPW_Config_GetNumPasses(NVPW_Config_GetNumPasses_Params* pParams);
288
+
289
+ typedef struct NVPW_Config_GetNumPasses_V2_Params
290
+ {
291
+ /// [in]
292
+ size_t structSize;
293
+ /// [in] assign to NULL
294
+ void* pPriv;
295
+ /// [in]
296
+ const uint8_t* pConfig;
297
+ /// [out]
298
+ size_t numPasses;
299
+ } NVPW_Config_GetNumPasses_V2_Params;
300
+ #define NVPW_Config_GetNumPasses_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Config_GetNumPasses_V2_Params, numPasses)
301
+
302
+ /// Total num passes = numPasses * numNestingLevels
303
+ NVPA_Status NVPW_Config_GetNumPasses_V2(NVPW_Config_GetNumPasses_V2_Params* pParams);
304
+
305
+ #define NVPW_API_SET_CUDA_PROFILER 0x18209d0775b2f89dULL
306
+
307
+ #define NVPW_API_SET_D3D11_PROFILER 0xca55c6738445db2bULL
308
+
309
+ #define NVPW_API_SET_D3D12_PROFILER 0xc0c2d46dd7c7ad78ULL
310
+
311
+ #define NVPW_API_SET_EGL_PROFILER 0x3c3747dae1f9565cULL
312
+
313
+ #define NVPW_API_SET_GPU_PERIODICSAMPLER 0x9f4c2571fc0b2e8aULL
314
+
315
+ #define NVPW_API_SET_METRICSCONTEXT 0x7c8579f6f2144beaULL
316
+
317
+ #define NVPW_API_SET_METRICSEVALUATOR 0x0368a8768d811af9ULL
318
+
319
+ #define NVPW_API_SET_METRICS_AD10X_COMP 0xbe57278e12cb5288ULL
320
+
321
+ #define NVPW_API_SET_METRICS_AD10X_GRFX 0x5cbf0774f81bf491ULL
322
+
323
+ #define NVPW_API_SET_METRICS_GA100_COMP 0x16b7d8c20d8b4915ULL
324
+
325
+ #define NVPW_API_SET_METRICS_GA100_GRFX 0xc94eaabec04a94faULL
326
+
327
+ #define NVPW_API_SET_METRICS_GA10X_COMP 0xb5d6391c2e299ab5ULL
328
+
329
+ #define NVPW_API_SET_METRICS_GA10X_GRFX 0x6ebc121178b5ce0bULL
330
+
331
+ #define NVPW_API_SET_METRICS_GV100_COMP 0x863705cc57919f72ULL
332
+
333
+ #define NVPW_API_SET_METRICS_GV100_GRFX 0x9900da75d164fecfULL
334
+
335
+ #define NVPW_API_SET_METRICS_GV11B_COMP 0xd3f79a859235848fULL
336
+
337
+ #define NVPW_API_SET_METRICS_GV11B_GRFX 0xeb8e26220106e227ULL
338
+
339
+ #define NVPW_API_SET_METRICS_TU10X_COMP 0x70f40be0afd35da8ULL
340
+
341
+ #define NVPW_API_SET_METRICS_TU10X_GRFX 0xdf219cb838db6968ULL
342
+
343
+ #define NVPW_API_SET_METRICS_TU11X_COMP 0xeb0069d7d0956678ULL
344
+
345
+ #define NVPW_API_SET_METRICS_TU11X_GRFX 0x0977d9342bd62743ULL
346
+
347
+ #define NVPW_API_SET_OPENGL_PROFILER 0xe4cd9ea40f2ee777ULL
348
+
349
+ #define NVPW_API_SET_VULKAN_PROFILER 0x8c56b6a03d779689ULL
350
+
351
+ #define NVPW_SDK_VERSION 0x1e128b6f001423fcULL
352
+
353
+ typedef struct NVPW_QueryVersionNumber_Params
354
+ {
355
+ /// [in]
356
+ size_t structSize;
357
+ /// [in] assign to NULL
358
+ void* pPriv;
359
+ /// [in]
360
+ uint64_t apiSet;
361
+ /// [out]
362
+ uint32_t major;
363
+ /// [out]
364
+ uint32_t minor;
365
+ /// [out]
366
+ uint32_t patch;
367
+ /// [out]
368
+ uint32_t relMajor;
369
+ /// [out]
370
+ uint32_t relMinor;
371
+ /// [out]
372
+ uint32_t relPatch;
373
+ } NVPW_QueryVersionNumber_Params;
374
+ #define NVPW_QueryVersionNumber_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_QueryVersionNumber_Params, relPatch)
375
+
376
+ /// Query version number of an API set
377
+ NVPA_Status NVPW_QueryVersionNumber(NVPW_QueryVersionNumber_Params* pParams);
378
+
379
+ typedef enum NVPW_Device_ClockStatus
380
+ {
381
+ /// clock status is unknown
382
+ NVPW_DEVICE_CLOCK_STATUS_UNKNOWN,
383
+ /// clocks are locked to rated tdp values
384
+ NVPW_DEVICE_CLOCK_STATUS_LOCKED_TO_RATED_TDP,
385
+ /// clocks are not locked and can boost above rated tdp
386
+ NVPW_DEVICE_CLOCK_STATUS_BOOST_ENABLED,
387
+ /// clocks are not locked and will not go above rated tdp
388
+ NVPW_DEVICE_CLOCK_STATUS_BOOST_DISABLED,
389
+ NVPW_DEVICE_CLOCK_STATUS__COUNT
390
+ } NVPW_Device_ClockStatus;
391
+
392
+ typedef struct NVPW_Device_GetClockStatus_Params
393
+ {
394
+ /// [in]
395
+ size_t structSize;
396
+ /// [in] assign to NULL
397
+ void* pPriv;
398
+ size_t deviceIndex;
399
+ /// [in]
400
+ NVPW_Device_ClockStatus clockStatus;
401
+ } NVPW_Device_GetClockStatus_Params;
402
+ #define NVPW_Device_GetClockStatus_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Device_GetClockStatus_Params, clockStatus)
403
+
404
+ NVPA_Status NVPW_Device_GetClockStatus(NVPW_Device_GetClockStatus_Params* pParams);
405
+
406
+ typedef enum NVPW_Device_ClockSetting
407
+ {
408
+ /// invalid op, specify valid clocks operation during profiling
409
+ NVPW_DEVICE_CLOCK_SETTING_INVALID,
410
+ /// default to driver/application config (normally unlocked and not boosted, but could be unlocked boosted, or
411
+ /// locked to rated TDP)
412
+ NVPW_DEVICE_CLOCK_SETTING_DEFAULT,
413
+ /// lock clocks at rated tdp base values
414
+ NVPW_DEVICE_CLOCK_SETTING_LOCK_TO_RATED_TDP,
415
+ NVPW_DEVICE_CLOCK_SETTING__COUNT
416
+ } NVPW_Device_ClockSetting;
417
+
418
+ typedef struct NVPW_Device_SetClockSetting_Params
419
+ {
420
+ /// [in]
421
+ size_t structSize;
422
+ /// [in] assign to NULL
423
+ void* pPriv;
424
+ size_t deviceIndex;
425
+ /// [in]
426
+ NVPW_Device_ClockSetting clockSetting;
427
+ } NVPW_Device_SetClockSetting_Params;
428
+ #define NVPW_Device_SetClockSetting_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Device_SetClockSetting_Params, clockSetting)
429
+
430
+ NVPA_Status NVPW_Device_SetClockSetting(NVPW_Device_SetClockSetting_Params* pParams);
431
+
432
+ typedef struct NVPW_CounterData_GetRangeDescriptions_Params
433
+ {
434
+ /// [in]
435
+ size_t structSize;
436
+ /// [in] assign to NULL
437
+ void* pPriv;
438
+ const uint8_t* pCounterDataImage;
439
+ size_t rangeIndex;
440
+ /// [inout] Number of descriptions allocated in ppDescriptions
441
+ size_t numDescriptions;
442
+ const char** ppDescriptions;
443
+ } NVPW_CounterData_GetRangeDescriptions_Params;
444
+ #define NVPW_CounterData_GetRangeDescriptions_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_GetRangeDescriptions_Params, ppDescriptions)
445
+
446
+ NVPA_Status NVPW_CounterData_GetRangeDescriptions(NVPW_CounterData_GetRangeDescriptions_Params* pParams);
447
+
448
+ typedef struct NVPW_Profiler_CounterData_GetRangeDescriptions_Params
449
+ {
450
+ /// [in]
451
+ size_t structSize;
452
+ /// [in] assign to NULL
453
+ void* pPriv;
454
+ const uint8_t* pCounterDataImage;
455
+ size_t rangeIndex;
456
+ /// [inout] Number of descriptions allocated in ppDescriptions
457
+ size_t numDescriptions;
458
+ const char** ppDescriptions;
459
+ } NVPW_Profiler_CounterData_GetRangeDescriptions_Params;
460
+ #define NVPW_Profiler_CounterData_GetRangeDescriptions_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Profiler_CounterData_GetRangeDescriptions_Params, ppDescriptions)
461
+
462
+ NVPA_Status NVPW_Profiler_CounterData_GetRangeDescriptions(NVPW_Profiler_CounterData_GetRangeDescriptions_Params* pParams);
463
+
464
+ #ifndef NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE_DEFINED
465
+ #define NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE_DEFINED
466
+ typedef enum NVPW_PeriodicSampler_CounterData_AppendMode
467
+ {
468
+ NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE_LINEAR = 0,
469
+ NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE_CIRCULAR = 1,
470
+ NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE__COUNT
471
+ } NVPW_PeriodicSampler_CounterData_AppendMode;
472
+ #endif //NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE_DEFINED
473
+
474
+ typedef struct NVPW_PeriodicSampler_CounterData_GetSampleTime_Params
475
+ {
476
+ /// [in]
477
+ size_t structSize;
478
+ /// [in] assign to NULL
479
+ void* pPriv;
480
+ /// [in]
481
+ const uint8_t* pCounterDataImage;
482
+ /// [in]
483
+ size_t rangeIndex;
484
+ /// [out]
485
+ uint64_t timestampStart;
486
+ /// [out]
487
+ uint64_t timestampEnd;
488
+ } NVPW_PeriodicSampler_CounterData_GetSampleTime_Params;
489
+ #define NVPW_PeriodicSampler_CounterData_GetSampleTime_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_CounterData_GetSampleTime_Params, timestampEnd)
490
+
491
+ NVPA_Status NVPW_PeriodicSampler_CounterData_GetSampleTime(NVPW_PeriodicSampler_CounterData_GetSampleTime_Params* pParams);
492
+
493
+ typedef struct NVPW_PeriodicSampler_CounterData_TrimInPlace_Params
494
+ {
495
+ /// [in]
496
+ size_t structSize;
497
+ /// [in] assign to NULL
498
+ void* pPriv;
499
+ /// [in]
500
+ uint8_t* pCounterDataImage;
501
+ /// [in]
502
+ size_t counterDataImageSize;
503
+ /// [out]
504
+ size_t counterDataImageTrimmedSize;
505
+ } NVPW_PeriodicSampler_CounterData_TrimInPlace_Params;
506
+ #define NVPW_PeriodicSampler_CounterData_TrimInPlace_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_CounterData_TrimInPlace_Params, counterDataImageTrimmedSize)
507
+
508
+ NVPA_Status NVPW_PeriodicSampler_CounterData_TrimInPlace(NVPW_PeriodicSampler_CounterData_TrimInPlace_Params* pParams);
509
+
510
+ typedef struct NVPW_PeriodicSampler_CounterData_GetInfo_Params
511
+ {
512
+ /// [in]
513
+ size_t structSize;
514
+ /// [in] assign to NULL
515
+ void* pPriv;
516
+ /// [in]
517
+ const uint8_t* pCounterDataImage;
518
+ /// [in]
519
+ size_t counterDataImageSize;
520
+ /// [out] total number of ranges in the counter data
521
+ size_t numTotalRanges;
522
+ /// [out] if in "linear" mode, this API returns the number of "populated" ranges; if it's in "circular" mode,
523
+ /// then it returns the last "populated" range index + 1, when there is no such range, it returns 0.
524
+ size_t numPopulatedRanges;
525
+ /// [out] if in "linear" mode, this API returns the number of "completed" ranges; if it's in "circular" mode,
526
+ /// then it returns the last "completed" range index + 1, when there is no such range, it returns 0.
527
+ size_t numCompletedRanges;
528
+ } NVPW_PeriodicSampler_CounterData_GetInfo_Params;
529
+ #define NVPW_PeriodicSampler_CounterData_GetInfo_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_CounterData_GetInfo_Params, numCompletedRanges)
530
+
531
+ /// In periodic sampler, a range in counter data stores exactly one sample's data. For better performance, periodic
532
+ /// sampler may operate in an out-of-order fashion when populating sample data, i.e. it may not fully populate all
533
+ /// counters of a sample/range before starting to populate the next sample/range. As a result, we have two concepts
534
+ /// here, "populated" & "completed": a range is considered "populated" even if only partial counters have been
535
+ /// written; on the other hand, a range is only considered "completed" if all the collecting counters have been
536
+ /// written.
537
+ NVPA_Status NVPW_PeriodicSampler_CounterData_GetInfo(NVPW_PeriodicSampler_CounterData_GetInfo_Params* pParams);
538
+
539
+ typedef struct NVPW_PeriodicSampler_CounterData_GetTriggerCount_Params
540
+ {
541
+ /// [in]
542
+ size_t structSize;
543
+ /// [in] assign to NULL
544
+ void* pPriv;
545
+ /// [in]
546
+ const uint8_t* pCounterDataImage;
547
+ /// [in]
548
+ size_t counterDataImageSize;
549
+ /// [in]
550
+ size_t rangeIndex;
551
+ /// [out]
552
+ uint32_t triggerCount;
553
+ } NVPW_PeriodicSampler_CounterData_GetTriggerCount_Params;
554
+ #define NVPW_PeriodicSampler_CounterData_GetTriggerCount_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_CounterData_GetTriggerCount_Params, triggerCount)
555
+
556
+ NVPA_Status NVPW_PeriodicSampler_CounterData_GetTriggerCount(NVPW_PeriodicSampler_CounterData_GetTriggerCount_Params* pParams);
557
+
558
+ typedef struct NVPW_PeriodicSampler_CounterData_IsDataComplete_Params
559
+ {
560
+ /// [in]
561
+ size_t structSize;
562
+ /// [in] assign to NULL
563
+ void* pPriv;
564
+ /// [in]
565
+ const uint8_t* pCounterDataImage;
566
+ /// [in]
567
+ size_t counterDataImageSize;
568
+ /// [in]
569
+ size_t rangeIndex;
570
+ /// [out]
571
+ NVPA_Bool isComplete;
572
+ } NVPW_PeriodicSampler_CounterData_IsDataComplete_Params;
573
+ #define NVPW_PeriodicSampler_CounterData_IsDataComplete_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_CounterData_IsDataComplete_Params, isComplete)
574
+
575
+ /// Checks whether a given sample's data is complete. See also 'NVPW_PeriodicSampler_CounterData_GetInfo'
576
+ NVPA_Status NVPW_PeriodicSampler_CounterData_IsDataComplete(NVPW_PeriodicSampler_CounterData_IsDataComplete_Params* pParams);
577
+
578
+
579
+ typedef struct NVPW_TimestampReport
580
+ {
581
+ uint32_t payload;
582
+ uint8_t reserved0004[4];
583
+ uint64_t timestamp;
584
+ } NVPW_TimestampReport;
585
+
586
+
587
+
588
+
589
+ #ifdef __cplusplus
590
+ } // extern "C"
591
+ #endif
592
+
593
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
594
+ #pragma GCC visibility pop
595
+ #endif
596
+
597
+ #endif // NVPERF_TARGET_H
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/__init__.py ADDED
File without changes
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.11.7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d23892b9f83727174a11b942375a7a17941198bd3b40b0da0f29e2a24abfe069
3
+ size 7095664
mplug_owl2/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb2a7c5b15c84df9505dd47e553fe46f3121a57d30391fca24179d202f73f3f7
3
+ size 7748112
mplug_owl2/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_heuristic.so.9 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94fab98c15040558c3c80f2c1a2f5fda9baa72afc39a88bdcc82185f49d241c3
3
+ size 86326864
mplug_owl2/lib/python3.10/site-packages/pygments/lexers/__pycache__/lisp.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49929fbfa7ceae39358687415dc084825759a1864f07bf2385867292b3cdf0d7
3
+ size 107504
mplug_owl2/lib/python3.10/site-packages/pygments/styles/__pycache__/lovelace.cpython-310.pyc ADDED
Binary file (2.63 kB). View file
 
mplug_owl2/lib/python3.10/site-packages/pygments/styles/__pycache__/vim.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
openflamingo/lib/python3.10/site-packages/wandb/proto/v3/__init__.py ADDED
File without changes
openflamingo/lib/python3.10/site-packages/wandb/proto/v3/__pycache__/wandb_server_pb2.cpython-310.pyc ADDED
Binary file (6.55 kB). View file
 
openflamingo/lib/python3.10/site-packages/wandb/proto/v4/__init__.py ADDED
File without changes
openflamingo/lib/python3.10/site-packages/wandb/proto/v4/wandb_base_pb2.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Generated by the protocol buffer compiler. DO NOT EDIT!
3
+ # source: wandb/proto/wandb_base.proto
4
+ """Generated protocol buffer code."""
5
+ from google.protobuf.internal import builder as _builder
6
+ from google.protobuf import descriptor as _descriptor
7
+ from google.protobuf import descriptor_pool as _descriptor_pool
8
+ from google.protobuf import symbol_database as _symbol_database
9
+ # @@protoc_insertion_point(imports)
10
+
11
+ _sym_db = _symbol_database.Default()
12
+
13
+
14
+
15
+
16
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1cwandb/proto/wandb_base.proto\x12\x0ewandb_internal\"6\n\x0b_RecordInfo\x12\x11\n\tstream_id\x18\x01 \x01(\t\x12\x14\n\x0c_tracelog_id\x18\x64 \x01(\t\"!\n\x0c_RequestInfo\x12\x11\n\tstream_id\x18\x01 \x01(\t\"#\n\x0b_ResultInfo\x12\x14\n\x0c_tracelog_id\x18\x64 \x01(\tB\x1bZ\x19\x63ore/pkg/service_go_protob\x06proto3')
17
+
18
+ _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
19
+ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'wandb.proto.wandb_base_pb2', globals())
20
+ if _descriptor._USE_C_DESCRIPTORS == False:
21
+
22
+ DESCRIPTOR._options = None
23
+ DESCRIPTOR._serialized_options = b'Z\031core/pkg/service_go_proto'
24
+ __RECORDINFO._serialized_start=48
25
+ __RECORDINFO._serialized_end=102
26
+ __REQUESTINFO._serialized_start=104
27
+ __REQUESTINFO._serialized_end=137
28
+ __RESULTINFO._serialized_start=139
29
+ __RESULTINFO._serialized_end=174
30
+ # @@protoc_insertion_point(module_scope)
openflamingo/lib/python3.10/site-packages/wandb/proto/v4/wandb_server_pb2.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Generated by the protocol buffer compiler. DO NOT EDIT!
3
+ # source: wandb/proto/wandb_server.proto
4
+ """Generated protocol buffer code."""
5
+ from google.protobuf.internal import builder as _builder
6
+ from google.protobuf import descriptor as _descriptor
7
+ from google.protobuf import descriptor_pool as _descriptor_pool
8
+ from google.protobuf import symbol_database as _symbol_database
9
+ # @@protoc_insertion_point(imports)
10
+
11
+ _sym_db = _symbol_database.Default()
12
+
13
+
14
+ from wandb.proto import wandb_base_pb2 as wandb_dot_proto_dot_wandb__base__pb2
15
+ from wandb.proto import wandb_internal_pb2 as wandb_dot_proto_dot_wandb__internal__pb2
16
+ from wandb.proto import wandb_settings_pb2 as wandb_dot_proto_dot_wandb__settings__pb2
17
+
18
+
19
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1ewandb/proto/wandb_server.proto\x12\x0ewandb_internal\x1a\x1cwandb/proto/wandb_base.proto\x1a wandb/proto/wandb_internal.proto\x1a wandb/proto/wandb_settings.proto\"k\n\x19ServerAuthenticateRequest\x12\x0f\n\x07\x61pi_key\x18\x01 \x01(\t\x12\x10\n\x08\x62\x61se_url\x18\x02 \x01(\t\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"w\n\x1aServerAuthenticateResponse\x12\x16\n\x0e\x64\x65\x66\x61ult_entity\x18\x01 \x01(\t\x12\x14\n\x0c\x65rror_status\x18\x02 \x01(\t\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"D\n\x15ServerShutdownRequest\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x18\n\x16ServerShutdownResponse\"B\n\x13ServerStatusRequest\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x16\n\x14ServerStatusResponse\"r\n\x17ServerInformInitRequest\x12*\n\x08settings\x18\x01 \x01(\x0b\x32\x18.wandb_internal.Settings\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x1a\n\x18ServerInformInitResponse\"s\n\x18ServerInformStartRequest\x12*\n\x08settings\x18\x01 \x01(\x0b\x32\x18.wandb_internal.Settings\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x1b\n\x19ServerInformStartResponse\"H\n\x19ServerInformFinishRequest\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x1c\n\x1aServerInformFinishResponse\"H\n\x19ServerInformAttachRequest\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"u\n\x1aServerInformAttachResponse\x12*\n\x08settings\x18\x01 \x01(\x0b\x32\x18.wandb_internal.Settings\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"H\n\x19ServerInformDetachRequest\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x1c\n\x1aServerInformDetachResponse\"]\n\x1bServerInformTeardownRequest\x12\x11\n\texit_code\x18\x01 \x01(\x05\x12+\n\x05_info\x18\xc8\x01 \x01(\x0b\x32\x1b.wandb_internal._RecordInfo\"\x1e\n\x1cServerInformTeardownResponse\"\xe7\x04\n\rServerRequest\x12\x30\n\x0erecord_publish\x18\x01 \x01(\x0b\x32\x16.wandb_internal.RecordH\x00\x12\x34\n\x12record_communicate\x18\x02 \x01(\x0b\x32\x16.wandb_internal.RecordH\x00\x12>\n\x0binform_init\x18\x03 \x01(\x0b\x32\'.wandb_internal.ServerInformInitRequestH\x00\x12\x42\n\rinform_finish\x18\x04 \x01(\x0b\x32).wandb_internal.ServerInformFinishRequestH\x00\x12\x42\n\rinform_attach\x18\x05 \x01(\x0b\x32).wandb_internal.ServerInformAttachRequestH\x00\x12\x42\n\rinform_detach\x18\x06 \x01(\x0b\x32).wandb_internal.ServerInformDetachRequestH\x00\x12\x46\n\x0finform_teardown\x18\x07 \x01(\x0b\x32+.wandb_internal.ServerInformTeardownRequestH\x00\x12@\n\x0cinform_start\x18\x08 \x01(\x0b\x32(.wandb_internal.ServerInformStartRequestH\x00\x12\x41\n\x0c\x61uthenticate\x18\t \x01(\x0b\x32).wandb_internal.ServerAuthenticateRequestH\x00\x42\x15\n\x13server_request_type\"\xfd\x04\n\x0eServerResponse\x12\x34\n\x12result_communicate\x18\x02 \x01(\x0b\x32\x16.wandb_internal.ResultH\x00\x12H\n\x14inform_init_response\x18\x03 \x01(\x0b\x32(.wandb_internal.ServerInformInitResponseH\x00\x12L\n\x16inform_finish_response\x18\x04 \x01(\x0b\x32*.wandb_internal.ServerInformFinishResponseH\x00\x12L\n\x16inform_attach_response\x18\x05 \x01(\x0b\x32*.wandb_internal.ServerInformAttachResponseH\x00\x12L\n\x16inform_detach_response\x18\x06 \x01(\x0b\x32*.wandb_internal.ServerInformDetachResponseH\x00\x12P\n\x18inform_teardown_response\x18\x07 \x01(\x0b\x32,.wandb_internal.ServerInformTeardownResponseH\x00\x12J\n\x15inform_start_response\x18\x08 \x01(\x0b\x32).wandb_internal.ServerInformStartResponseH\x00\x12K\n\x15\x61uthenticate_response\x18\t \x01(\x0b\x32*.wandb_internal.ServerAuthenticateResponseH\x00\x42\x16\n\x14server_response_typeB\x1bZ\x19\x63ore/pkg/service_go_protob\x06proto3')
20
+
21
+ _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
22
+ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'wandb.proto.wandb_server_pb2', globals())
23
+ if _descriptor._USE_C_DESCRIPTORS == False:
24
+
25
+ DESCRIPTOR._options = None
26
+ DESCRIPTOR._serialized_options = b'Z\031core/pkg/service_go_proto'
27
+ _SERVERAUTHENTICATEREQUEST._serialized_start=148
28
+ _SERVERAUTHENTICATEREQUEST._serialized_end=255
29
+ _SERVERAUTHENTICATERESPONSE._serialized_start=257
30
+ _SERVERAUTHENTICATERESPONSE._serialized_end=376
31
+ _SERVERSHUTDOWNREQUEST._serialized_start=378
32
+ _SERVERSHUTDOWNREQUEST._serialized_end=446
33
+ _SERVERSHUTDOWNRESPONSE._serialized_start=448
34
+ _SERVERSHUTDOWNRESPONSE._serialized_end=472
35
+ _SERVERSTATUSREQUEST._serialized_start=474
36
+ _SERVERSTATUSREQUEST._serialized_end=540
37
+ _SERVERSTATUSRESPONSE._serialized_start=542
38
+ _SERVERSTATUSRESPONSE._serialized_end=564
39
+ _SERVERINFORMINITREQUEST._serialized_start=566
40
+ _SERVERINFORMINITREQUEST._serialized_end=680
41
+ _SERVERINFORMINITRESPONSE._serialized_start=682
42
+ _SERVERINFORMINITRESPONSE._serialized_end=708
43
+ _SERVERINFORMSTARTREQUEST._serialized_start=710
44
+ _SERVERINFORMSTARTREQUEST._serialized_end=825
45
+ _SERVERINFORMSTARTRESPONSE._serialized_start=827
46
+ _SERVERINFORMSTARTRESPONSE._serialized_end=854
47
+ _SERVERINFORMFINISHREQUEST._serialized_start=856
48
+ _SERVERINFORMFINISHREQUEST._serialized_end=928
49
+ _SERVERINFORMFINISHRESPONSE._serialized_start=930
50
+ _SERVERINFORMFINISHRESPONSE._serialized_end=958
51
+ _SERVERINFORMATTACHREQUEST._serialized_start=960
52
+ _SERVERINFORMATTACHREQUEST._serialized_end=1032
53
+ _SERVERINFORMATTACHRESPONSE._serialized_start=1034
54
+ _SERVERINFORMATTACHRESPONSE._serialized_end=1151
55
+ _SERVERINFORMDETACHREQUEST._serialized_start=1153
56
+ _SERVERINFORMDETACHREQUEST._serialized_end=1225
57
+ _SERVERINFORMDETACHRESPONSE._serialized_start=1227
58
+ _SERVERINFORMDETACHRESPONSE._serialized_end=1255
59
+ _SERVERINFORMTEARDOWNREQUEST._serialized_start=1257
60
+ _SERVERINFORMTEARDOWNREQUEST._serialized_end=1350
61
+ _SERVERINFORMTEARDOWNRESPONSE._serialized_start=1352
62
+ _SERVERINFORMTEARDOWNRESPONSE._serialized_end=1382
63
+ _SERVERREQUEST._serialized_start=1385
64
+ _SERVERREQUEST._serialized_end=2000
65
+ _SERVERRESPONSE._serialized_start=2003
66
+ _SERVERRESPONSE._serialized_end=2640
67
+ # @@protoc_insertion_point(module_scope)
openflamingo/lib/python3.10/site-packages/wandb/proto/v4/wandb_settings_pb2.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Generated by the protocol buffer compiler. DO NOT EDIT!
3
+ # source: wandb/proto/wandb_settings.proto
4
+ """Generated protocol buffer code."""
5
+ from google.protobuf.internal import builder as _builder
6
+ from google.protobuf import descriptor as _descriptor
7
+ from google.protobuf import descriptor_pool as _descriptor_pool
8
+ from google.protobuf import symbol_database as _symbol_database
9
+ # @@protoc_insertion_point(imports)
10
+
11
+ _sym_db = _symbol_database.Default()
12
+
13
+
14
+ from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
15
+
16
+
17
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n wandb/proto/wandb_settings.proto\x12\x0ewandb_internal\x1a\x1egoogle/protobuf/wrappers.proto\" \n\x0fListStringValue\x12\r\n\x05value\x18\x01 \x03(\t\"\x1d\n\x0cListIntValue\x12\r\n\x05value\x18\x01 \x03(\x05\"\x8a\x01\n\x17MapStringKeyStringValue\x12\x41\n\x05value\x18\x01 \x03(\x0b\x32\x32.wandb_internal.MapStringKeyStringValue.ValueEntry\x1a,\n\nValueEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xcb\x01\n#MapStringKeyMapStringKeyStringValue\x12M\n\x05value\x18\x01 \x03(\x0b\x32>.wandb_internal.MapStringKeyMapStringKeyStringValue.ValueEntry\x1aU\n\nValueEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x36\n\x05value\x18\x02 \x01(\x0b\x32\'.wandb_internal.MapStringKeyStringValue:\x02\x38\x01\"\x9a\x01\n\x12OpenMetricsFilters\x12\x33\n\x08sequence\x18\x01 \x01(\x0b\x32\x1f.wandb_internal.ListStringValueH\x00\x12\x46\n\x07mapping\x18\x02 \x01(\x0b\x32\x33.wandb_internal.MapStringKeyMapStringKeyStringValueH\x00\x42\x07\n\x05value\"7\n\tRunMoment\x12\x0b\n\x03run\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01\x12\x0e\n\x06metric\x18\x03 \x01(\t\"\x96J\n\x08Settings\x12-\n\x07\x61pi_key\x18\x37 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12:\n\x13identity_token_file\x18\xaa\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x10\x63redentials_file\x18\xab\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x39\n\x14insecure_disable_ssl\x18\xb9\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12,\n\x08_offline\x18\x1e \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12*\n\x06x_sync\x18\x1f \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x30\n\tsync_file\x18\x86\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x07_shared\x18\xa2\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12,\n\x06run_id\x18k \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07run_url\x18q \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07project\x18\x61 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06\x65ntity\x18\x45 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cx_start_time\x18) \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12.\n\x08root_dir\x18i \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07log_dir\x18U \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0clog_internal\x18V \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\tfiles_dir\x18\x46 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0bx_files_dir\x18\xb4\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0cignore_globs\x18N \x01(\x0b\x32\x1f.wandb_internal.ListStringValue\x12.\n\x08\x62\x61se_url\x18\x39 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12=\n\x17x_file_stream_max_bytes\x18\xac\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x46\n\x1fx_file_stream_transmit_interval\x18\xaf\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x45\n\x14x_extra_http_headers\x18\x0e \x01(\x0b\x32\'.wandb_internal.MapStringKeyStringValue\x12=\n\x17x_file_stream_retry_max\x18\x93\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12K\n$x_file_stream_retry_wait_min_seconds\x18\x94\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12K\n$x_file_stream_retry_wait_max_seconds\x18\x95\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x43\n\x1dx_file_stream_timeout_seconds\x18\x0f \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x42\n\x1cx_file_stream_max_line_bytes\x18\xb2\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12?\n\x19x_file_transfer_retry_max\x18\x96\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12M\n&x_file_transfer_retry_wait_min_seconds\x18\x97\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12M\n&x_file_transfer_retry_wait_max_seconds\x18\x98\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x46\n\x1fx_file_transfer_timeout_seconds\x18\x99\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x39\n\x13x_graphql_retry_max\x18\x9a\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12G\n x_graphql_retry_wait_min_seconds\x18\x9b\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12G\n x_graphql_retry_wait_max_seconds\x18\x9c\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12@\n\x19x_graphql_timeout_seconds\x18\x9d\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x31\n\nhttp_proxy\x18\xa8\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0bhttps_proxy\x18\xa9\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\tx_proxies\x18\xc8\x01 \x01(\x0b\x32\'.wandb_internal.MapStringKeyStringValue\x12-\n\x07program\x18_ \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0fprogram_relpath\x18` \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x10_code_path_local\x18\xa3\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x0fprogram_abspath\x18\x9f\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x05_args\x18\x01 \x01(\x0b\x32\x1f.wandb_internal.ListStringValue\x12)\n\x03_os\x18 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06\x64ocker\x18\x43 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cx_executable\x18\r \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07_python\x18\" \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\tcolab_url\x18\xa0\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12*\n\x04host\x18M \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\x08username\x18\x8d\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x05\x65mail\x18\x44 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x06resume\x18\x66 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\x0bresume_from\x18\xa7\x01 \x01(\x0b\x32\x19.wandb_internal.RunMoment\x12-\n\tfork_from\x18\xa4\x01 \x01(\x0b\x32\x19.wandb_internal.RunMoment\x12\x38\n\x14\x64isable_job_creation\x18\x41 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x30\n\tsweep_url\x18\x83\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x16x_disable_update_check\x18\xa5\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x32\n\x0ex_disable_meta\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12-\n\tsave_code\x18s \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\x0b\x64isable_git\x18? \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12;\n\x16x_disable_machine_info\x18\x9e\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x33\n\x0fx_disable_stats\x18\n \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x39\n\x13x_stats_buffer_size\x18\xa1\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12@\n\x19x_stats_sampling_interval\x18\xae\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x30\n\x0bx_stats_pid\x18* \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12<\n\x12x_stats_disk_paths\x18\x92\x01 \x01(\x0b\x32\x1f.wandb_internal.ListStringValue\x12H\n\"x_stats_neuron_monitor_config_path\x18. \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12O\n\x1ex_stats_open_metrics_endpoints\x18/ \x01(\x0b\x32\'.wandb_internal.MapStringKeyStringValue\x12H\n\x1cx_stats_open_metrics_filters\x18\x30 \x01(\x0b\x32\".wandb_internal.OpenMetricsFilters\x12S\n!x_stats_open_metrics_http_headers\x18\xb8\x01 \x01(\x0b\x32\'.wandb_internal.MapStringKeyStringValue\x12=\n\x16x_stats_gpu_device_ids\x18\xba\x01 \x01(\x0b\x32\x1c.wandb_internal.ListIntValue\x12.\n\x07x_label\x18\xb5\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12=\n\x18x_require_legacy_service\x18\xad\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12;\n\x16x_show_operation_stats\x18\xb0\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x33\n\x0ex_primary_node\x18\xb6\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12:\n\x15x_update_finish_state\x18\xb7\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12<\n\x17\x61llow_offline_artifacts\x18\xb1\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12-\n\x07\x63onsole\x18< \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x11\x63onsole_multipart\x18\xa6\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x35\n\x10sync_tensorboard\x18\xb3\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\x0b_aws_lambda\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x33\n\x0fx_cli_only_mode\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12*\n\x06_colab\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x35\n\x11x_disable_service\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12:\n\x16x_disable_setproctitle\x18\t \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x34\n\x10x_disable_viewer\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x39\n\x15x_flow_control_custom\x18\x10 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12;\n\x17x_flow_control_disabled\x18\x11 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12>\n\x18x_internal_check_process\x18\x12 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12,\n\x08_ipython\x18\x14 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12,\n\x08_jupyter\x18\x15 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x34\n\x0ex_jupyter_root\x18\x16 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x07_kaggle\x18\x17 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12=\n\x18x_live_policy_rate_limit\x18\x18 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12<\n\x17x_live_policy_wait_time\x18\x19 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x30\n\x0bx_log_level\x18\x1a \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x35\n\x10x_network_buffer\x18\x1b \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12)\n\x05_noop\x18\x1c \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12-\n\t_notebook\x18\x1d \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\t_platform\x18! \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x38\n\x12x_runqueue_item_id\x18# \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x13x_save_requirements\x18% \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x39\n\x13x_service_transport\x18& \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x0ex_service_wait\x18\' \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x35\n\x0f_start_datetime\x18( \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\r_tmp_code_dir\x18\x31 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x08_windows\x18\x34 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x34\n\x10\x61llow_val_change\x18\x35 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\tanonymous\x18\x36 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12P\n\x1f\x61zure_account_url_to_access_key\x18\x38 \x01(\x0b\x32\'.wandb_internal.MapStringKeyStringValue\x12.\n\x08\x63ode_dir\x18: \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0c\x63onfig_paths\x18; \x01(\x0b\x32\x1f.wandb_internal.ListStringValue\x12\x30\n\ndeployment\x18= \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\x0c\x64isable_code\x18> \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x31\n\rdisable_hints\x18@ \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12,\n\x08\x64isabled\x18\x42 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12)\n\x05\x66orce\x18G \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x30\n\ngit_commit\x18H \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\ngit_remote\x18I \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x0egit_remote_url\x18J \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08git_root\x18K \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x11heartbeat_seconds\x18L \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x32\n\x0cinit_timeout\x18O \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12,\n\x08is_local\x18P \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x30\n\njob_source\x18Q \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rlabel_disable\x18R \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12*\n\x06launch\x18S \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x38\n\x12launch_config_path\x18T \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12:\n\x14log_symlink_internal\x18W \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10log_symlink_user\x18X \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08log_user\x18Y \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rlogin_timeout\x18Z \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12*\n\x04mode\x18\\ \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rnotebook_name\x18] \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0bproject_url\x18\x62 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x05quiet\x18\x63 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12*\n\x06reinit\x18\x64 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12+\n\x07relogin\x18\x65 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x32\n\x0cresume_fname\x18g \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12+\n\x07resumed\x18h \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\trun_group\x18j \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0crun_job_type\x18l \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08run_mode\x18m \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08run_name\x18n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\trun_notes\x18o \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x08run_tags\x18p \x01(\x0b\x32\x1f.wandb_internal.ListStringValue\x12\x35\n\x11sagemaker_disable\x18r \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x35\n\x0fsettings_system\x18t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x38\n\x12settings_workspace\x18u \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\x0bshow_colors\x18v \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12.\n\nshow_emoji\x18w \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\x0bshow_errors\x18x \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12-\n\tshow_info\x18y \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x31\n\rshow_warnings\x18z \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12*\n\x06silent\x18{ \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x32\n\x0cstart_method\x18| \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12*\n\x06strict\x18} \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x33\n\x0esummary_errors\x18~ \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x34\n\x0fsummary_timeout\x18\x7f \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x36\n\x10summary_warnings\x18\x80\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12/\n\x08sweep_id\x18\x81\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x37\n\x10sweep_param_path\x18\x82\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12,\n\x07symlink\x18\x84\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\x08sync_dir\x18\x85\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12:\n\x13sync_symlink_latest\x18\x87\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12J\n%table_raise_on_max_row_limit_exceeded\x18\x8a\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12/\n\x08timespec\x18\x8b\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x07tmp_dir\x18\x8c\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\twandb_dir\x18\x8e\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0ex_jupyter_name\x18\x8f\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x0ex_jupyter_path\x18\x90\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\x08job_name\x18\x91\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValueJ\x04\x08\x03\x10\x04J\x04\x08\x06\x10\x07J\x04\x08\x0c\x10\rJ\x04\x08\x13\x10\x14J\x04\x08$\x10%J\x04\x08+\x10,J\x04\x08,\x10-J\x04\x08-\x10.J\x04\x08\x32\x10\x33J\x04\x08\x33\x10\x34J\x04\x08[\x10\\J\x04\x08^\x10_J\x06\x08\x88\x01\x10\x89\x01J\x06\x08\x89\x01\x10\x8a\x01\x42\x1bZ\x19\x63ore/pkg/service_go_protob\x06proto3')
18
+
19
+ _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
20
+ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'wandb.proto.wandb_settings_pb2', globals())
21
+ if _descriptor._USE_C_DESCRIPTORS == False:
22
+
23
+ DESCRIPTOR._options = None
24
+ DESCRIPTOR._serialized_options = b'Z\031core/pkg/service_go_proto'
25
+ _MAPSTRINGKEYSTRINGVALUE_VALUEENTRY._options = None
26
+ _MAPSTRINGKEYSTRINGVALUE_VALUEENTRY._serialized_options = b'8\001'
27
+ _MAPSTRINGKEYMAPSTRINGKEYSTRINGVALUE_VALUEENTRY._options = None
28
+ _MAPSTRINGKEYMAPSTRINGKEYSTRINGVALUE_VALUEENTRY._serialized_options = b'8\001'
29
+ _LISTSTRINGVALUE._serialized_start=84
30
+ _LISTSTRINGVALUE._serialized_end=116
31
+ _LISTINTVALUE._serialized_start=118
32
+ _LISTINTVALUE._serialized_end=147
33
+ _MAPSTRINGKEYSTRINGVALUE._serialized_start=150
34
+ _MAPSTRINGKEYSTRINGVALUE._serialized_end=288
35
+ _MAPSTRINGKEYSTRINGVALUE_VALUEENTRY._serialized_start=244
36
+ _MAPSTRINGKEYSTRINGVALUE_VALUEENTRY._serialized_end=288
37
+ _MAPSTRINGKEYMAPSTRINGKEYSTRINGVALUE._serialized_start=291
38
+ _MAPSTRINGKEYMAPSTRINGKEYSTRINGVALUE._serialized_end=494
39
+ _MAPSTRINGKEYMAPSTRINGKEYSTRINGVALUE_VALUEENTRY._serialized_start=409
40
+ _MAPSTRINGKEYMAPSTRINGKEYSTRINGVALUE_VALUEENTRY._serialized_end=494
41
+ _OPENMETRICSFILTERS._serialized_start=497
42
+ _OPENMETRICSFILTERS._serialized_end=651
43
+ _RUNMOMENT._serialized_start=653
44
+ _RUNMOMENT._serialized_end=708
45
+ _SETTINGS._serialized_start=711
46
+ _SETTINGS._serialized_end=10205
47
+ # @@protoc_insertion_point(module_scope)