ZTWHHH commited on
Commit
136a0d7
·
verified ·
1 Parent(s): eb7e967

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evalkit_tf437/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt +1 -0
  2. evalkit_tf437/lib/python3.10/site-packages/google_crc32c/__pycache__/__config__.cpython-310.pyc +0 -0
  3. evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Minimize.xNbeii-5.js.gz +3 -0
  4. evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cuda_stdint.h +112 -0
  5. evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h +1690 -0
  6. evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGL.h +608 -0
  7. evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.hpp +0 -0
  8. evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h +224 -0
  9. evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h +0 -0
  10. evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_functions.h +145 -0
  11. evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_config.h +65 -0
  12. evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.h +510 -0
  13. evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_types.h +108 -0
  14. evalkit_tf437/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_version.h +109 -0
  15. evalkit_tf437/lib/python3.10/site-packages/nvidia/nccl/lib/__init__.py +0 -0
  16. evalkit_tf437/lib/python3.10/site-packages/nvidia/nvjitlink/__pycache__/__init__.cpython-310.pyc +0 -0
  17. evalkit_tf437/lib/python3.10/site-packages/nvidia/nvtx/include/nvToolsExtCudaRt.h +140 -0
  18. evalkit_tf437/lib/python3.10/site-packages/nvidia/nvtx/include/nvtx3/nvToolsExt.h +1499 -0
  19. evalkit_tf437/lib/python3.10/site-packages/nvidia/nvtx/include/nvtx3/nvToolsExtCuda.h +170 -0
  20. evalkit_tf437/lib/python3.10/site-packages/nvidia/nvtx/include/nvtx3/nvToolsExtOpenCL.h +220 -0
  21. evalkit_tf437/lib/python3.10/site-packages/nvidia/nvtx/lib/libnvToolsExt.so.1 +0 -0
  22. evalkit_tf437/lib/python3.10/site-packages/portalocker-2.10.1.dist-info/INSTALLER +1 -0
  23. evalkit_tf437/lib/python3.10/site-packages/portalocker-2.10.1.dist-info/METADATA +255 -0
  24. evalkit_tf437/lib/python3.10/site-packages/pygments/__pycache__/lexer.cpython-310.pyc +0 -0
  25. evalkit_tf437/lib/python3.10/site-packages/pygments/lexers/__pycache__/_cl_builtins.cpython-310.pyc +0 -0
  26. evalkit_tf437/lib/python3.10/site-packages/pygments/lexers/__pycache__/_lasso_builtins.cpython-310.pyc +0 -0
  27. evalkit_tf437/lib/python3.10/site-packages/pygments/lexers/__pycache__/amdgpu.cpython-310.pyc +0 -0
  28. evalkit_tf437/lib/python3.10/site-packages/pygments/lexers/__pycache__/cplint.cpython-310.pyc +0 -0
  29. evalkit_tf437/lib/python3.10/site-packages/pygments/lexers/__pycache__/jslt.cpython-310.pyc +0 -0
  30. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__init__.py +61 -0
  31. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/_mapping.cpython-310.pyc +0 -0
  32. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/colorful.cpython-310.pyc +0 -0
  33. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/default.cpython-310.pyc +0 -0
  34. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/dracula.cpython-310.pyc +0 -0
  35. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/emacs.cpython-310.pyc +0 -0
  36. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/friendly_grayscale.cpython-310.pyc +0 -0
  37. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/igor.cpython-310.pyc +0 -0
  38. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/inkpot.cpython-310.pyc +0 -0
  39. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/lilypond.cpython-310.pyc +0 -0
  40. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/lovelace.cpython-310.pyc +0 -0
  41. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/manni.cpython-310.pyc +0 -0
  42. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/onedark.cpython-310.pyc +0 -0
  43. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/pastie.cpython-310.pyc +0 -0
  44. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/rainbow_dash.cpython-310.pyc +0 -0
  45. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/solarized.cpython-310.pyc +0 -0
  46. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/trac.cpython-310.pyc +0 -0
  47. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/vs.cpython-310.pyc +0 -0
  48. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/_mapping.py +54 -0
  49. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/algol.py +65 -0
  50. evalkit_tf437/lib/python3.10/site-packages/pygments/styles/algol_nu.py +65 -0
evalkit_tf437/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ markupsafe
evalkit_tf437/lib/python3.10/site-packages/google_crc32c/__pycache__/__config__.cpython-310.pyc ADDED
Binary file (661 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Minimize.xNbeii-5.js.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b375318fef0a427c1f3da81c564c1edd22faf77bd43882b972aff8f39c85f879
3
+ size 766
evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cuda_stdint.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2009-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions
6
+ * are met:
7
+ * * Redistributions of source code must retain the above copyright
8
+ * notice, this list of conditions and the following disclaimer.
9
+ * * Redistributions in binary form must reproduce the above copyright
10
+ * notice, this list of conditions and the following disclaimer in the
11
+ * documentation and/or other materials provided with the distribution.
12
+ * * Neither the name of NVIDIA CORPORATION nor the names of its
13
+ * contributors may be used to endorse or promote products derived
14
+ * from this software without specific prior written permission.
15
+ *
16
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
17
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
20
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
24
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+ */
28
+
29
+ #ifndef __cuda_stdint_h__
30
+ #define __cuda_stdint_h__
31
+
32
+ // Compiler-specific treatment for C99's stdint.h
33
+ //
34
+ // By default, this header will use the standard headers (so it
35
+ // is your responsibility to make sure they are available), except
36
+ // on MSVC before Visual Studio 2010, when they were not provided.
37
+ // To support old MSVC, a few of the commonly-used definitions are
38
+ // provided here. If more definitions are needed, add them here,
39
+ // or replace these definitions with a complete implementation,
40
+ // such as the ones available from Google, Boost, or MSVC10. You
41
+ // can prevent the definition of any of these types (in order to
42
+ // use your own) by #defining CU_STDINT_TYPES_ALREADY_DEFINED.
43
+
44
+ #if !defined(CU_STDINT_TYPES_ALREADY_DEFINED)
45
+
46
+ // In VS including stdint.h forces the C++ runtime dep - provide an opt-out
47
+ // (CU_STDINT_VS_FORCE_NO_STDINT_H) for users that care (notably static
48
+ // cudart).
49
+ #if defined(_MSC_VER) && ((_MSC_VER < 1600) || defined(CU_STDINT_VS_FORCE_NO_STDINT_H))
50
+
51
+ // These definitions can be used with MSVC 8 and 9,
52
+ // which don't ship with stdint.h:
53
+
54
+ typedef unsigned char uint8_t;
55
+
56
+ typedef short int16_t;
57
+ typedef unsigned short uint16_t;
58
+
59
+ // To keep it consistent with all MSVC build. define those types
60
+ // in the exact same way they are defined with the MSVC headers
61
+ #if defined(_MSC_VER)
62
+ typedef signed char int8_t;
63
+
64
+ typedef int int32_t;
65
+ typedef unsigned int uint32_t;
66
+
67
+ typedef long long int64_t;
68
+ typedef unsigned long long uint64_t;
69
+ #else
70
+ typedef char int8_t;
71
+
72
+ typedef long int32_t;
73
+ typedef unsigned long uint32_t;
74
+
75
+ typedef __int64 int64_t;
76
+ typedef unsigned __int64 uint64_t;
77
+ #endif
78
+
79
+ #elif defined(__DJGPP__)
80
+
81
+ // These definitions can be used when compiling
82
+ // C code with DJGPP, which only provides stdint.h
83
+ // when compiling C++ code with TR1 enabled.
84
+
85
+ typedef char int8_t;
86
+ typedef unsigned char uint8_t;
87
+
88
+ typedef short int16_t;
89
+ typedef unsigned short uint16_t;
90
+
91
+ typedef long int32_t;
92
+ typedef unsigned long uint32_t;
93
+
94
+ typedef long long int64_t;
95
+ typedef unsigned long long uint64_t;
96
+
97
+ #else
98
+
99
+ // Use standard headers, as specified by C99 and C++ TR1.
100
+ // Known to be provided by:
101
+ // - gcc/glibc, supported by all versions of glibc
102
+ // - djgpp, supported since 2001
103
+ // - MSVC, supported by Visual Studio 2010 and later
104
+
105
+ #include <stdint.h>
106
+
107
+ #endif
108
+
109
+ #endif // !defined(CU_STDINT_TYPES_ALREADY_DEFINED)
110
+
111
+
112
+ #endif // file guard
evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h ADDED
@@ -0,0 +1,1690 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _COOPERATIVE_GROUPS_H_
51
+ #define _COOPERATIVE_GROUPS_H_
52
+
53
+ #if defined(__cplusplus) && defined(__CUDACC__)
54
+
55
+ #include "cooperative_groups/details/info.h"
56
+ #include "cooperative_groups/details/driver_abi.h"
57
+ #include "cooperative_groups/details/helpers.h"
58
+ #include "cooperative_groups/details/memory.h"
59
+
60
+ #if defined(_CG_HAS_STL_ATOMICS)
61
+ #include <cuda/atomic>
62
+ #define _CG_THREAD_SCOPE(scope) _CG_STATIC_CONST_DECL cuda::thread_scope thread_scope = scope;
63
+ #else
64
+ #define _CG_THREAD_SCOPE(scope)
65
+ #endif
66
+
67
+ _CG_BEGIN_NAMESPACE
68
+
69
+ namespace details {
70
+ _CG_CONST_DECL unsigned int coalesced_group_id = 1;
71
+ _CG_CONST_DECL unsigned int multi_grid_group_id = 2;
72
+ _CG_CONST_DECL unsigned int grid_group_id = 3;
73
+ _CG_CONST_DECL unsigned int thread_block_id = 4;
74
+ _CG_CONST_DECL unsigned int multi_tile_group_id = 5;
75
+ _CG_CONST_DECL unsigned int cluster_group_id = 6;
76
+ }
77
+
78
+ /**
79
+ * class thread_group;
80
+ *
81
+ * Generic thread group type, into which all groups are convertible.
82
+ * It acts as a container for all storage necessary for the derived groups,
83
+ * and will dispatch the API calls to the correct derived group. This means
84
+ * that all derived groups must implement the same interface as thread_group.
85
+ */
86
+ class thread_group
87
+ {
88
+ protected:
89
+ struct group_data {
90
+ unsigned int _unused : 1;
91
+ unsigned int type : 7, : 0;
92
+ };
93
+
94
+ struct gg_data {
95
+ details::grid_workspace *gridWs;
96
+ };
97
+
98
+ #if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
99
+ struct mg_data {
100
+ unsigned long long _unused : 1;
101
+ unsigned long long type : 7;
102
+ unsigned long long handle : 56;
103
+ const details::multi_grid::multi_grid_functions *functions;
104
+ };
105
+ #endif
106
+
107
+ struct tg_data {
108
+ unsigned int is_tiled : 1;
109
+ unsigned int type : 7;
110
+ unsigned int size : 24;
111
+ // packed to 4b
112
+ unsigned int metaGroupSize : 16;
113
+ unsigned int metaGroupRank : 16;
114
+ // packed to 8b
115
+ unsigned int mask;
116
+ // packed to 12b
117
+ unsigned int _res;
118
+ };
119
+
120
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
121
+ friend class thread_block;
122
+
123
+ union __align__(8) {
124
+ group_data group;
125
+ tg_data coalesced;
126
+ gg_data grid;
127
+ #if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
128
+ mg_data multi_grid;
129
+ #endif
130
+ } _data;
131
+
132
+ _CG_QUALIFIER thread_group operator=(const thread_group& src);
133
+
134
+ _CG_QUALIFIER thread_group(unsigned int type) {
135
+ _data.group.type = type;
136
+ _data.group._unused = false;
137
+ }
138
+
139
+ #ifdef _CG_CPP11_FEATURES
140
+ static_assert(sizeof(tg_data) <= 16, "Failed size check");
141
+ static_assert(sizeof(gg_data) <= 16, "Failed size check");
142
+ # ifdef _CG_ABI_EXPERIMENTAL
143
+ static_assert(sizeof(mg_data) <= 16, "Failed size check");
144
+ # endif
145
+ #endif
146
+
147
+ public:
148
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device)
149
+
150
+ _CG_QUALIFIER unsigned long long size() const;
151
+ _CG_QUALIFIER unsigned long long num_threads() const;
152
+ _CG_QUALIFIER unsigned long long thread_rank() const;
153
+ _CG_QUALIFIER void sync() const;
154
+ _CG_QUALIFIER unsigned int get_type() const {
155
+ return _data.group.type;
156
+ }
157
+
158
+ };
159
+
160
+ template <unsigned int TyId>
161
+ struct thread_group_base : public thread_group {
162
+ _CG_QUALIFIER thread_group_base() : thread_group(TyId) {}
163
+ _CG_STATIC_CONST_DECL unsigned int id = TyId;
164
+ };
165
+
166
+ #if defined(_CG_HAS_MULTI_GRID_GROUP)
167
+
168
+ /**
169
+ * class multi_grid_group;
170
+ *
171
+ * Threads within this this group are guaranteed to be co-resident on the
172
+ * same system, on multiple devices within the same launched kernels.
173
+ * To use this group, the kernel must have been launched with
174
+ * cuLaunchCooperativeKernelMultiDevice (or the CUDA Runtime equivalent),
175
+ * and the device must support it (queryable device attribute).
176
+ *
177
+ * Constructed via this_multi_grid();
178
+ */
179
+
180
+
181
+ # if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
182
+ class multi_grid_group;
183
+
184
+ // Multi grid group requires these functions to be templated to prevent ptxas from trying to use CG syscalls
185
+ template <typename = void>
186
+ __device__ _CG_DEPRECATED multi_grid_group this_multi_grid();
187
+
188
+ class multi_grid_group : public thread_group_base<details::multi_grid_group_id>
189
+ {
190
+ private:
191
+ template <typename = void>
192
+ _CG_QUALIFIER multi_grid_group() {
193
+ _data.multi_grid.functions = details::multi_grid::load_grid_intrinsics();
194
+ _data.multi_grid.handle = _data.multi_grid.functions->get_intrinsic_handle();
195
+ }
196
+
197
+ friend multi_grid_group this_multi_grid<void>();
198
+
199
+ public:
200
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system)
201
+
202
+ _CG_QUALIFIER bool is_valid() const {
203
+ return (_data.multi_grid.handle != 0);
204
+ }
205
+
206
+ _CG_QUALIFIER void sync() const {
207
+ if (!is_valid()) {
208
+ _CG_ABORT();
209
+ }
210
+ _data.multi_grid.functions->sync(_data.multi_grid.handle);
211
+ }
212
+
213
+ _CG_QUALIFIER unsigned long long num_threads() const {
214
+ _CG_ASSERT(is_valid());
215
+ return _data.multi_grid.functions->size(_data.multi_grid.handle);
216
+ }
217
+
218
+ _CG_QUALIFIER unsigned long long size() const {
219
+ return num_threads();
220
+ }
221
+
222
+ _CG_QUALIFIER unsigned long long thread_rank() const {
223
+ _CG_ASSERT(is_valid());
224
+ return _data.multi_grid.functions->thread_rank(_data.multi_grid.handle);
225
+ }
226
+
227
+ _CG_QUALIFIER unsigned int grid_rank() const {
228
+ _CG_ASSERT(is_valid());
229
+ return (_data.multi_grid.functions->grid_rank(_data.multi_grid.handle));
230
+ }
231
+
232
+ _CG_QUALIFIER unsigned int num_grids() const {
233
+ _CG_ASSERT(is_valid());
234
+ return (_data.multi_grid.functions->num_grids(_data.multi_grid.handle));
235
+ }
236
+ };
237
+ # else
238
+ class multi_grid_group
239
+ {
240
+ private:
241
+ unsigned long long _handle;
242
+ unsigned int _size;
243
+ unsigned int _rank;
244
+
245
+ friend _CG_QUALIFIER multi_grid_group this_multi_grid();
246
+
247
+ _CG_QUALIFIER multi_grid_group() {
248
+ _handle = details::multi_grid::get_intrinsic_handle();
249
+ _size = details::multi_grid::size(_handle);
250
+ _rank = details::multi_grid::thread_rank(_handle);
251
+ }
252
+
253
+ public:
254
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system)
255
+
256
+ _CG_QUALIFIER _CG_DEPRECATED bool is_valid() const {
257
+ return (_handle != 0);
258
+ }
259
+
260
+ _CG_QUALIFIER _CG_DEPRECATED void sync() const {
261
+ if (!is_valid()) {
262
+ _CG_ABORT();
263
+ }
264
+ details::multi_grid::sync(_handle);
265
+ }
266
+
267
+ _CG_QUALIFIER _CG_DEPRECATED unsigned long long num_threads() const {
268
+ _CG_ASSERT(is_valid());
269
+ return _size;
270
+ }
271
+
272
+ _CG_QUALIFIER _CG_DEPRECATED unsigned long long size() const {
273
+ return num_threads();
274
+ }
275
+
276
+ _CG_QUALIFIER _CG_DEPRECATED unsigned long long thread_rank() const {
277
+ _CG_ASSERT(is_valid());
278
+ return _rank;
279
+ }
280
+
281
+ _CG_QUALIFIER _CG_DEPRECATED unsigned int grid_rank() const {
282
+ _CG_ASSERT(is_valid());
283
+ return (details::multi_grid::grid_rank(_handle));
284
+ }
285
+
286
+ _CG_QUALIFIER _CG_DEPRECATED unsigned int num_grids() const {
287
+ _CG_ASSERT(is_valid());
288
+ return (details::multi_grid::num_grids(_handle));
289
+ }
290
+ };
291
+ # endif
292
+
293
+ /**
294
+ * multi_grid_group this_multi_grid()
295
+ *
296
+ * Constructs a multi_grid_group
297
+ */
298
+ # if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
299
+ template <typename>
300
+ __device__
301
+ #else
302
+ _CG_QUALIFIER
303
+ # endif
304
+ _CG_DEPRECATED
305
+ multi_grid_group this_multi_grid()
306
+ {
307
+ return multi_grid_group();
308
+ }
309
+ #endif
310
+
311
+ /**
312
+ * class grid_group;
313
+ *
314
+ * Threads within this this group are guaranteed to be co-resident on the
315
+ * same device within the same launched kernel. To use this group, the kernel
316
+ * must have been launched with cuLaunchCooperativeKernel (or the CUDA Runtime equivalent),
317
+ * and the device must support it (queryable device attribute).
318
+ *
319
+ * Constructed via this_grid();
320
+ */
321
+ class grid_group : public thread_group_base<details::grid_group_id>
322
+ {
323
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::grid_group_id;
324
+ friend _CG_QUALIFIER grid_group this_grid();
325
+
326
+ private:
327
+ _CG_QUALIFIER grid_group(details::grid_workspace *gridWs) {
328
+ _data.grid.gridWs = gridWs;
329
+ }
330
+
331
+ public:
332
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device)
333
+
334
+ _CG_QUALIFIER bool is_valid() const {
335
+ return (_data.grid.gridWs != NULL);
336
+ }
337
+
338
+ _CG_QUALIFIER void sync() const {
339
+ if (!is_valid()) {
340
+ _CG_ABORT();
341
+ }
342
+ details::grid::sync(&_data.grid.gridWs->barrier);
343
+ }
344
+
345
+ _CG_STATIC_QUALIFIER unsigned long long size() {
346
+ return details::grid::size();
347
+ }
348
+
349
+ _CG_STATIC_QUALIFIER unsigned long long thread_rank() {
350
+ return details::grid::thread_rank();
351
+ }
352
+
353
+ _CG_STATIC_QUALIFIER dim3 group_dim() {
354
+ return details::grid::grid_dim();
355
+ }
356
+
357
+ _CG_STATIC_QUALIFIER unsigned long long num_threads() {
358
+ return details::grid::num_threads();
359
+ }
360
+
361
+ _CG_STATIC_QUALIFIER dim3 dim_blocks() {
362
+ return details::grid::dim_blocks();
363
+ }
364
+
365
+ _CG_STATIC_QUALIFIER unsigned long long num_blocks() {
366
+ return details::grid::num_blocks();
367
+ }
368
+
369
+ _CG_STATIC_QUALIFIER dim3 block_index() {
370
+ return details::grid::block_index();
371
+ }
372
+
373
+ _CG_STATIC_QUALIFIER unsigned long long block_rank() {
374
+ return details::grid::block_rank();
375
+ }
376
+
377
+ # if defined(_CG_HAS_CLUSTER_GROUP)
378
+ _CG_STATIC_QUALIFIER dim3 dim_clusters() {
379
+ return details::grid::dim_clusters();
380
+ }
381
+
382
+ _CG_STATIC_QUALIFIER unsigned long long num_clusters() {
383
+ return details::grid::num_clusters();
384
+ }
385
+
386
+ _CG_STATIC_QUALIFIER dim3 cluster_index() {
387
+ return details::grid::cluster_index();
388
+ }
389
+
390
+ _CG_STATIC_QUALIFIER unsigned long long cluster_rank() {
391
+ return details::grid::cluster_rank();
392
+ }
393
+ # endif
394
+ };
395
+
396
+ _CG_QUALIFIER grid_group this_grid() {
397
+ // Load a workspace from the driver
398
+ grid_group gg(details::get_grid_workspace());
399
+ #ifdef _CG_DEBUG
400
+ // *all* threads must be available to synchronize
401
+ gg.sync();
402
+ #endif // _CG_DEBUG
403
+ return gg;
404
+ }
405
+
406
+ #if defined(_CG_HAS_CLUSTER_GROUP)
407
+ /**
408
+ * class cluster_group
409
+ *
410
+ * Every GPU kernel is executed by a grid of thread blocks. A grid can be evenly
411
+ * divided along all dimensions to form groups of blocks, each group of which is
412
+ * a block cluster. Clustered grids are subject to various restrictions and
413
+ * limitations. Primarily, a cluster consists of at most 8 blocks by default
414
+ * (although the user is allowed to opt-in to non-standard sizes,) and clustered
415
+ * grids are subject to additional occupancy limitations due to per-cluster
416
+ * hardware resource consumption. In exchange, a block cluster is guaranteed to
417
+ * be a cooperative group, with access to all cooperative group capabilities, as
418
+ * well as cluster specific capabilities and accelerations. A cluster_group
419
+ * represents a block cluster.
420
+ *
421
+ * Constructed via this_cluster_group();
422
+ */
423
+ class cluster_group : public thread_group_base<details::cluster_group_id>
424
+ {
425
+ // Friends
426
+ friend _CG_QUALIFIER cluster_group this_cluster();
427
+
428
+ // Disable constructor
429
+ _CG_QUALIFIER cluster_group()
430
+ {
431
+ }
432
+
433
+ public:
434
+ //_CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_cluster)
435
+
436
+ using arrival_token = struct {};
437
+
438
+ // Functionality exposed by the group
439
+ _CG_STATIC_QUALIFIER void sync()
440
+ {
441
+ return details::cluster::sync();
442
+ }
443
+
444
+ _CG_STATIC_QUALIFIER arrival_token barrier_arrive()
445
+ {
446
+ details::cluster::barrier_arrive();
447
+ return arrival_token();
448
+ }
449
+
450
+ _CG_STATIC_QUALIFIER void barrier_wait()
451
+ {
452
+ return details::cluster::barrier_wait();
453
+ }
454
+
455
+ _CG_STATIC_QUALIFIER void barrier_wait(arrival_token&&)
456
+ {
457
+ return details::cluster::barrier_wait();
458
+ }
459
+
460
+ _CG_STATIC_QUALIFIER unsigned int query_shared_rank(const void *addr)
461
+ {
462
+ return details::cluster::query_shared_rank(addr);
463
+ }
464
+
465
+ template <typename T>
466
+ _CG_STATIC_QUALIFIER T* map_shared_rank(T *addr, int rank)
467
+ {
468
+ return details::cluster::map_shared_rank(addr, rank);
469
+ }
470
+
471
+ _CG_STATIC_QUALIFIER dim3 block_index()
472
+ {
473
+ return details::cluster::block_index();
474
+ }
475
+
476
+ _CG_STATIC_QUALIFIER unsigned int block_rank()
477
+ {
478
+ return details::cluster::block_rank();
479
+ }
480
+
481
+ _CG_STATIC_QUALIFIER unsigned int thread_rank()
482
+ {
483
+ return details::cluster::thread_rank();
484
+ }
485
+
486
+ _CG_STATIC_QUALIFIER dim3 dim_blocks()
487
+ {
488
+ return details::cluster::dim_blocks();
489
+ }
490
+
491
+ _CG_STATIC_QUALIFIER unsigned int num_blocks()
492
+ {
493
+ return details::cluster::num_blocks();
494
+ }
495
+
496
+ _CG_STATIC_QUALIFIER dim3 dim_threads()
497
+ {
498
+ return details::cluster::dim_threads();
499
+ }
500
+
501
+ _CG_STATIC_QUALIFIER unsigned int num_threads()
502
+ {
503
+ return details::cluster::num_threads();
504
+ }
505
+
506
+ // Legacy aliases
507
+ _CG_STATIC_QUALIFIER unsigned int size()
508
+ {
509
+ return num_threads();
510
+ }
511
+ };
512
+
513
+ /*
514
+ * cluster_group this_cluster()
515
+ *
516
+ * Constructs a cluster_group
517
+ */
518
+ _CG_QUALIFIER cluster_group this_cluster()
519
+ {
520
+ cluster_group cg;
521
+ #ifdef _CG_DEBUG
522
+ cg.sync();
523
+ #endif
524
+ return cg;
525
+ }
526
+ #endif
527
+
528
+ #if defined(_CG_CPP11_FEATURES)
529
+ class thread_block;
530
+ template <unsigned int MaxBlockSize>
531
+ _CG_QUALIFIER thread_block this_thread_block(block_tile_memory<MaxBlockSize>& scratch);
532
+ #endif
533
+
534
+ /**
535
+ * class thread_block
536
+ *
537
+ * Every GPU kernel is executed by a grid of thread blocks, and threads within
538
+ * each block are guaranteed to reside on the same streaming multiprocessor.
539
+ * A thread_block represents a thread block whose dimensions are not known until runtime.
540
+ *
541
+ * Constructed via this_thread_block();
542
+ */
543
+ class thread_block : public thread_group_base<details::thread_block_id>
544
+ {
545
+ // Friends
546
+ friend _CG_QUALIFIER thread_block this_thread_block();
547
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
548
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz);
549
+
550
+ #if defined(_CG_CPP11_FEATURES)
551
+ template <unsigned int MaxBlockSize>
552
+ friend _CG_QUALIFIER thread_block this_thread_block(block_tile_memory<MaxBlockSize>& scratch);
553
+ template <unsigned int Size>
554
+ friend class __static_size_multi_warp_tile_base;
555
+
556
+ details::multi_warp_scratch* const tile_memory;
557
+
558
+ template <unsigned int MaxBlockSize>
559
+ _CG_QUALIFIER thread_block(block_tile_memory<MaxBlockSize>& scratch) :
560
+ tile_memory(details::get_scratch_ptr(&scratch)) {
561
+ #ifdef _CG_DEBUG
562
+ if (num_threads() > MaxBlockSize) {
563
+ details::abort();
564
+ }
565
+ #endif
566
+ #if !defined(_CG_HAS_RESERVED_SHARED)
567
+ tile_memory->init_barriers(thread_rank());
568
+ sync();
569
+ #endif
570
+ }
571
+ #endif
572
+
573
+ // Disable constructor
574
+ _CG_QUALIFIER thread_block()
575
+ #if defined(_CG_CPP11_FEATURES)
576
+ : tile_memory(details::get_scratch_ptr(NULL))
577
+ #endif
578
+ { }
579
+
580
+ // Internal Use
581
+ _CG_QUALIFIER thread_group _get_tiled_threads(unsigned int tilesz) const {
582
+ const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0);
583
+
584
+ // Invalid, immediately fail
585
+ if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) {
586
+ details::abort();
587
+ return (thread_block());
588
+ }
589
+
590
+ unsigned int mask;
591
+ unsigned int base_offset = thread_rank() & (~(tilesz - 1));
592
+ unsigned int masklength = min((unsigned int)size() - base_offset, tilesz);
593
+
594
+ mask = (unsigned int)(-1) >> (32 - masklength);
595
+ mask <<= (details::laneid() & ~(tilesz - 1));
596
+ thread_group tile = thread_group(details::coalesced_group_id);
597
+ tile._data.coalesced.mask = mask;
598
+ tile._data.coalesced.size = __popc(mask);
599
+ tile._data.coalesced.metaGroupSize = (details::cta::size() + tilesz - 1) / tilesz;
600
+ tile._data.coalesced.metaGroupRank = details::cta::thread_rank() / tilesz;
601
+ tile._data.coalesced.is_tiled = true;
602
+ return (tile);
603
+ }
604
+
605
+ public:
606
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::thread_block_id;
607
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block)
608
+
609
+ _CG_STATIC_QUALIFIER void sync() {
610
+ details::cta::sync();
611
+ }
612
+
613
+ _CG_STATIC_QUALIFIER unsigned int size() {
614
+ return details::cta::size();
615
+ }
616
+
617
+ _CG_STATIC_QUALIFIER unsigned int thread_rank() {
618
+ return details::cta::thread_rank();
619
+ }
620
+
621
+ // Additional functionality exposed by the group
622
+ _CG_STATIC_QUALIFIER dim3 group_index() {
623
+ return details::cta::group_index();
624
+ }
625
+
626
+ _CG_STATIC_QUALIFIER dim3 thread_index() {
627
+ return details::cta::thread_index();
628
+ }
629
+
630
+ _CG_STATIC_QUALIFIER dim3 group_dim() {
631
+ return details::cta::block_dim();
632
+ }
633
+
634
+ _CG_STATIC_QUALIFIER dim3 dim_threads() {
635
+ return details::cta::dim_threads();
636
+ }
637
+
638
+ _CG_STATIC_QUALIFIER unsigned int num_threads() {
639
+ return details::cta::num_threads();
640
+ }
641
+
642
+ };
643
+
644
+ /**
645
+ * thread_block this_thread_block()
646
+ *
647
+ * Constructs a thread_block group
648
+ */
649
+ _CG_QUALIFIER thread_block this_thread_block()
650
+ {
651
+ return (thread_block());
652
+ }
653
+
654
+ #if defined(_CG_CPP11_FEATURES)
655
+ template <unsigned int MaxBlockSize>
656
+ _CG_QUALIFIER thread_block this_thread_block(block_tile_memory<MaxBlockSize>& scratch) {
657
+ return (thread_block(scratch));
658
+ }
659
+ #endif
660
+
661
+ /**
662
+ * class coalesced_group
663
+ *
664
+ * A group representing the current set of converged threads in a warp.
665
+ * The size of the group is not guaranteed and it may return a group of
666
+ * only one thread (itself).
667
+ *
668
+ * This group exposes warp-synchronous builtins.
669
+ * Constructed via coalesced_threads();
670
+ */
671
+ class coalesced_group : public thread_group_base<details::coalesced_group_id>
672
+ {
673
+ private:
674
+ friend _CG_QUALIFIER coalesced_group coalesced_threads();
675
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
676
+ friend _CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz);
677
+ friend class details::_coalesced_group_data_access;
678
+
679
+ _CG_QUALIFIER unsigned int _packLanes(unsigned laneMask) const {
680
+ unsigned int member_pack = 0;
681
+ unsigned int member_rank = 0;
682
+ for (int bit_idx = 0; bit_idx < 32; bit_idx++) {
683
+ unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx);
684
+ if (lane_bit) {
685
+ if (laneMask & lane_bit)
686
+ member_pack |= 1 << member_rank;
687
+ member_rank++;
688
+ }
689
+ }
690
+ return (member_pack);
691
+ }
692
+
693
+ // Internal Use
694
+ _CG_QUALIFIER coalesced_group _get_tiled_threads(unsigned int tilesz) const {
695
+ const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0);
696
+
697
+ // Invalid, immediately fail
698
+ if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) {
699
+ details::abort();
700
+ return (coalesced_group(0));
701
+ }
702
+ if (size() <= tilesz) {
703
+ return (*this);
704
+ }
705
+
706
+ if ((_data.coalesced.is_tiled == true) && pow2_tilesz) {
707
+ unsigned int base_offset = (thread_rank() & (~(tilesz - 1)));
708
+ unsigned int masklength = min((unsigned int)size() - base_offset, tilesz);
709
+ unsigned int mask = (unsigned int)(-1) >> (32 - masklength);
710
+
711
+ mask <<= (details::laneid() & ~(tilesz - 1));
712
+ coalesced_group coalesced_tile = coalesced_group(mask);
713
+ coalesced_tile._data.coalesced.metaGroupSize = size() / tilesz;
714
+ coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz;
715
+ coalesced_tile._data.coalesced.is_tiled = true;
716
+ return (coalesced_tile);
717
+ }
718
+ else if ((_data.coalesced.is_tiled == false) && pow2_tilesz) {
719
+ unsigned int mask = 0;
720
+ unsigned int member_rank = 0;
721
+ int seen_lanes = (thread_rank() / tilesz) * tilesz;
722
+ for (unsigned int bit_idx = 0; bit_idx < 32; bit_idx++) {
723
+ unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx);
724
+ if (lane_bit) {
725
+ if (seen_lanes <= 0 && member_rank < tilesz) {
726
+ mask |= lane_bit;
727
+ member_rank++;
728
+ }
729
+ seen_lanes--;
730
+ }
731
+ }
732
+ coalesced_group coalesced_tile = coalesced_group(mask);
733
+ // Override parent with the size of this group
734
+ coalesced_tile._data.coalesced.metaGroupSize = (size() + tilesz - 1) / tilesz;
735
+ coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz;
736
+ return coalesced_tile;
737
+ }
738
+ else {
739
+ // None in _CG_VERSION 1000
740
+ details::abort();
741
+ }
742
+
743
+ return (coalesced_group(0));
744
+ }
745
+
746
+ protected:
747
+ _CG_QUALIFIER coalesced_group(unsigned int mask) {
748
+ _data.coalesced.mask = mask;
749
+ _data.coalesced.size = __popc(mask);
750
+ _data.coalesced.metaGroupRank = 0;
751
+ _data.coalesced.metaGroupSize = 1;
752
+ _data.coalesced.is_tiled = false;
753
+ }
754
+
755
+ _CG_QUALIFIER unsigned int get_mask() const {
756
+ return (_data.coalesced.mask);
757
+ }
758
+
759
+ public:
760
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id;
761
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block)
762
+
763
+ _CG_QUALIFIER unsigned int num_threads() const {
764
+ return _data.coalesced.size;
765
+ }
766
+
767
+ _CG_QUALIFIER unsigned int size() const {
768
+ return num_threads();
769
+ }
770
+
771
+ _CG_QUALIFIER unsigned int thread_rank() const {
772
+ return (__popc(_data.coalesced.mask & details::lanemask32_lt()));
773
+ }
774
+
775
+ // Rank of this group in the upper level of the hierarchy
776
+ _CG_QUALIFIER unsigned int meta_group_rank() const {
777
+ return _data.coalesced.metaGroupRank;
778
+ }
779
+
780
+ // Total num partitions created out of all CTAs when the group was created
781
+ _CG_QUALIFIER unsigned int meta_group_size() const {
782
+ return _data.coalesced.metaGroupSize;
783
+ }
784
+
785
+ _CG_QUALIFIER void sync() const {
786
+ __syncwarp(_data.coalesced.mask);
787
+ }
788
+
789
+ #ifdef _CG_CPP11_FEATURES
790
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
791
+ _CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const {
792
+ unsigned int lane = (srcRank == 0) ? __ffs(_data.coalesced.mask) - 1 :
793
+ (size() == 32) ? srcRank : __fns(_data.coalesced.mask, 0, (srcRank + 1));
794
+
795
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
796
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), _data.coalesced.mask, lane, 32);
797
+ }
798
+
799
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
800
+ _CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const {
801
+ if (size() == 32) {
802
+ return details::tile::shuffle_dispatch<TyElem>::shfl_down(
803
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), 0xFFFFFFFF, delta, 32);
804
+ }
805
+
806
+ unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1);
807
+
808
+ if (lane >= 32)
809
+ lane = details::laneid();
810
+
811
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
812
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), _data.coalesced.mask, lane, 32);
813
+ }
814
+
815
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
816
+ _CG_QUALIFIER TyRet shfl_up(TyElem&& elem, int delta) const {
817
+ if (size() == 32) {
818
+ return details::tile::shuffle_dispatch<TyElem>::shfl_up(
819
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), 0xFFFFFFFF, delta, 32);
820
+ }
821
+
822
+ unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1));
823
+ if (lane >= 32)
824
+ lane = details::laneid();
825
+
826
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
827
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), _data.coalesced.mask, lane, 32);
828
+ }
829
+ #else
830
+ template <typename TyIntegral>
831
+ _CG_QUALIFIER TyIntegral shfl(TyIntegral var, unsigned int src_rank) const {
832
+ details::assert_if_not_arithmetic<TyIntegral>();
833
+ unsigned int lane = (src_rank == 0) ? __ffs(_data.coalesced.mask) - 1 :
834
+ (size() == 32) ? src_rank : __fns(_data.coalesced.mask, 0, (src_rank + 1));
835
+ return (__shfl_sync(_data.coalesced.mask, var, lane, 32));
836
+ }
837
+
838
+ template <typename TyIntegral>
839
+ _CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, int delta) const {
840
+ details::assert_if_not_arithmetic<TyIntegral>();
841
+ if (size() == 32) {
842
+ return (__shfl_up_sync(0xFFFFFFFF, var, delta, 32));
843
+ }
844
+ unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1));
845
+ if (lane >= 32) lane = details::laneid();
846
+ return (__shfl_sync(_data.coalesced.mask, var, lane, 32));
847
+ }
848
+
849
+ template <typename TyIntegral>
850
+ _CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, int delta) const {
851
+ details::assert_if_not_arithmetic<TyIntegral>();
852
+ if (size() == 32) {
853
+ return (__shfl_down_sync(0xFFFFFFFF, var, delta, 32));
854
+ }
855
+ unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1);
856
+ if (lane >= 32) lane = details::laneid();
857
+ return (__shfl_sync(_data.coalesced.mask, var, lane, 32));
858
+ }
859
+ #endif
860
+
861
+ _CG_QUALIFIER int any(int predicate) const {
862
+ return (__ballot_sync(_data.coalesced.mask, predicate) != 0);
863
+ }
864
+ _CG_QUALIFIER int all(int predicate) const {
865
+ return (__ballot_sync(_data.coalesced.mask, predicate) == _data.coalesced.mask);
866
+ }
867
+ _CG_QUALIFIER unsigned int ballot(int predicate) const {
868
+ if (size() == 32) {
869
+ return (__ballot_sync(0xFFFFFFFF, predicate));
870
+ }
871
+ unsigned int lane_ballot = __ballot_sync(_data.coalesced.mask, predicate);
872
+ return (_packLanes(lane_ballot));
873
+ }
874
+
875
+ #ifdef _CG_HAS_MATCH_COLLECTIVE
876
+
877
+ template <typename TyIntegral>
878
+ _CG_QUALIFIER unsigned int match_any(TyIntegral val) const {
879
+ details::assert_if_not_arithmetic<TyIntegral>();
880
+ if (size() == 32) {
881
+ return (__match_any_sync(0xFFFFFFFF, val));
882
+ }
883
+ unsigned int lane_match = __match_any_sync(_data.coalesced.mask, val);
884
+ return (_packLanes(lane_match));
885
+ }
886
+
887
+ template <typename TyIntegral>
888
+ _CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const {
889
+ details::assert_if_not_arithmetic<TyIntegral>();
890
+ if (size() == 32) {
891
+ return (__match_all_sync(0xFFFFFFFF, val, &pred));
892
+ }
893
+ unsigned int lane_match = __match_all_sync(_data.coalesced.mask, val, &pred);
894
+ return (_packLanes(lane_match));
895
+ }
896
+
897
+ #endif /* !_CG_HAS_MATCH_COLLECTIVE */
898
+
899
+ };
900
+
901
+ _CG_QUALIFIER coalesced_group coalesced_threads()
902
+ {
903
+ return (coalesced_group(__activemask()));
904
+ }
905
+
906
+ namespace details {
907
+ template <unsigned int Size> struct verify_thread_block_tile_size;
908
+ template <> struct verify_thread_block_tile_size<32> { typedef void OK; };
909
+ template <> struct verify_thread_block_tile_size<16> { typedef void OK; };
910
+ template <> struct verify_thread_block_tile_size<8> { typedef void OK; };
911
+ template <> struct verify_thread_block_tile_size<4> { typedef void OK; };
912
+ template <> struct verify_thread_block_tile_size<2> { typedef void OK; };
913
+ template <> struct verify_thread_block_tile_size<1> { typedef void OK; };
914
+
915
+ #ifdef _CG_CPP11_FEATURES
916
+ template <unsigned int Size>
917
+ using _is_power_of_2 = _CG_STL_NAMESPACE::integral_constant<bool, (Size & (Size - 1)) == 0>;
918
+
919
+ template <unsigned int Size>
920
+ using _is_single_warp = _CG_STL_NAMESPACE::integral_constant<bool, Size <= 32>;
921
+ template <unsigned int Size>
922
+ using _is_multi_warp =
923
+ _CG_STL_NAMESPACE::integral_constant<bool, (Size > 32) && (Size <= 1024)>;
924
+
925
+ template <unsigned int Size>
926
+ using _is_valid_single_warp_tile =
927
+ _CG_STL_NAMESPACE::integral_constant<bool, _is_power_of_2<Size>::value && _is_single_warp<Size>::value>;
928
+ template <unsigned int Size>
929
+ using _is_valid_multi_warp_tile =
930
+ _CG_STL_NAMESPACE::integral_constant<bool, _is_power_of_2<Size>::value && _is_multi_warp<Size>::value>;
931
+ #else
932
+ template <unsigned int Size>
933
+ struct _is_multi_warp {
934
+ static const bool value = false;
935
+ };
936
+ #endif
937
+ }
938
+
939
+ template <unsigned int Size>
940
+ class __static_size_tile_base
941
+ {
942
+ protected:
943
+ _CG_STATIC_CONST_DECL unsigned int numThreads = Size;
944
+
945
+ public:
946
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block)
947
+
948
+ // Rank of thread within tile
949
+ _CG_STATIC_QUALIFIER unsigned int thread_rank() {
950
+ return (details::cta::thread_rank() & (numThreads - 1));
951
+ }
952
+
953
+ // Number of threads within tile
954
+ _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int num_threads() {
955
+ return numThreads;
956
+ }
957
+
958
+ _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int size() {
959
+ return num_threads();
960
+ }
961
+ };
962
+
963
+ template <unsigned int Size>
964
+ class __static_size_thread_block_tile_base : public __static_size_tile_base<Size>
965
+ {
966
+ friend class details::_coalesced_group_data_access;
967
+ typedef details::tile::tile_helpers<Size> th;
968
+
969
+ #ifdef _CG_CPP11_FEATURES
970
+ static_assert(details::_is_valid_single_warp_tile<Size>::value, "Size must be one of 1/2/4/8/16/32");
971
+ #else
972
+ typedef typename details::verify_thread_block_tile_size<Size>::OK valid;
973
+ #endif
974
+ using __static_size_tile_base<Size>::numThreads;
975
+ _CG_STATIC_CONST_DECL unsigned int fullMask = 0xFFFFFFFF;
976
+
977
+ protected:
978
+ _CG_STATIC_QUALIFIER unsigned int build_mask() {
979
+ unsigned int mask = fullMask;
980
+ if (numThreads != 32) {
981
+ // [0,31] representing the current active thread in the warp
982
+ unsigned int laneId = details::laneid();
983
+ // shift mask according to the partition it belongs to
984
+ mask = th::tileMask << (laneId & ~(th::laneMask));
985
+ }
986
+ return (mask);
987
+ }
988
+
989
+ public:
990
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id;
991
+
992
+ _CG_STATIC_QUALIFIER void sync() {
993
+ __syncwarp(build_mask());
994
+ }
995
+
996
+ #ifdef _CG_CPP11_FEATURES
997
+ // PTX supported collectives
998
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
999
+ _CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const {
1000
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
1001
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), srcRank, numThreads);
1002
+ }
1003
+
1004
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
1005
+ _CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const {
1006
+ return details::tile::shuffle_dispatch<TyElem>::shfl_down(
1007
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), delta, numThreads);
1008
+ }
1009
+
1010
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
1011
+ _CG_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int delta) const {
1012
+ return details::tile::shuffle_dispatch<TyElem>::shfl_up(
1013
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), delta, numThreads);
1014
+ }
1015
+
1016
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
1017
+ _CG_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int laneMask) const {
1018
+ return details::tile::shuffle_dispatch<TyElem>::shfl_xor(
1019
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), laneMask, numThreads);
1020
+ }
1021
+ #else
1022
+ template <typename TyIntegral>
1023
+ _CG_QUALIFIER TyIntegral shfl(TyIntegral var, int srcRank) const {
1024
+ details::assert_if_not_arithmetic<TyIntegral>();
1025
+ return (__shfl_sync(build_mask(), var, srcRank, numThreads));
1026
+ }
1027
+
1028
+ template <typename TyIntegral>
1029
+ _CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, unsigned int delta) const {
1030
+ details::assert_if_not_arithmetic<TyIntegral>();
1031
+ return (__shfl_down_sync(build_mask(), var, delta, numThreads));
1032
+ }
1033
+
1034
+ template <typename TyIntegral>
1035
+ _CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, unsigned int delta) const {
1036
+ details::assert_if_not_arithmetic<TyIntegral>();
1037
+ return (__shfl_up_sync(build_mask(), var, delta, numThreads));
1038
+ }
1039
+
1040
+ template <typename TyIntegral>
1041
+ _CG_QUALIFIER TyIntegral shfl_xor(TyIntegral var, unsigned int laneMask) const {
1042
+ details::assert_if_not_arithmetic<TyIntegral>();
1043
+ return (__shfl_xor_sync(build_mask(), var, laneMask, numThreads));
1044
+ }
1045
+ #endif //_CG_CPP11_FEATURES
1046
+
1047
+ _CG_QUALIFIER int any(int predicate) const {
1048
+ unsigned int lane_ballot = __ballot_sync(build_mask(), predicate);
1049
+ return (lane_ballot != 0);
1050
+ }
1051
+ _CG_QUALIFIER int all(int predicate) const {
1052
+ unsigned int lane_ballot = __ballot_sync(build_mask(), predicate);
1053
+ return (lane_ballot == build_mask());
1054
+ }
1055
+ _CG_QUALIFIER unsigned int ballot(int predicate) const {
1056
+ unsigned int lane_ballot = __ballot_sync(build_mask(), predicate);
1057
+ return (lane_ballot >> (details::laneid() & (~(th::laneMask))));
1058
+ }
1059
+
1060
+ #ifdef _CG_HAS_MATCH_COLLECTIVE
1061
+ template <typename TyIntegral>
1062
+ _CG_QUALIFIER unsigned int match_any(TyIntegral val) const {
1063
+ details::assert_if_not_arithmetic<TyIntegral>();
1064
+ unsigned int lane_match = __match_any_sync(build_mask(), val);
1065
+ return (lane_match >> (details::laneid() & (~(th::laneMask))));
1066
+ }
1067
+
1068
+ template <typename TyIntegral>
1069
+ _CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const {
1070
+ details::assert_if_not_arithmetic<TyIntegral>();
1071
+ unsigned int lane_match = __match_all_sync(build_mask(), val, &pred);
1072
+ return (lane_match >> (details::laneid() & (~(th::laneMask))));
1073
+ }
1074
+ #endif
1075
+
1076
+ };
1077
+
1078
+ template <unsigned int Size, typename ParentT>
1079
+ class __static_parent_thread_block_tile_base
1080
+ {
1081
+ public:
1082
+ // Rank of this group in the upper level of the hierarchy
1083
+ _CG_STATIC_QUALIFIER unsigned int meta_group_rank() {
1084
+ return ParentT::thread_rank() / Size;
1085
+ }
1086
+
1087
+ // Total num partitions created out of all CTAs when the group was created
1088
+ _CG_STATIC_QUALIFIER unsigned int meta_group_size() {
1089
+ return (ParentT::size() + Size - 1) / Size;
1090
+ }
1091
+ };
1092
+
1093
+ /**
1094
+ * class thread_block_tile<unsigned int Size, ParentT = void>
1095
+ *
1096
+ * Statically-sized group type, representing one tile of a thread block.
1097
+ * The only specializations currently supported are those with native
1098
+ * hardware support (1/2/4/8/16/32)
1099
+ *
1100
+ * This group exposes warp-synchronous builtins.
1101
+ * Can only be constructed via tiled_partition<Size>(ParentT&)
1102
+ */
1103
+
1104
+ template <unsigned int Size, typename ParentT = void>
1105
+ class __single_warp_thread_block_tile :
1106
+ public __static_size_thread_block_tile_base<Size>,
1107
+ public __static_parent_thread_block_tile_base<Size, ParentT>
1108
+ {
1109
+ typedef __static_parent_thread_block_tile_base<Size, ParentT> staticParentBaseT;
1110
+ friend class details::_coalesced_group_data_access;
1111
+
1112
+ protected:
1113
+ _CG_QUALIFIER __single_warp_thread_block_tile() { };
1114
+ _CG_QUALIFIER __single_warp_thread_block_tile(unsigned int, unsigned int) { };
1115
+
1116
+ _CG_STATIC_QUALIFIER unsigned int get_mask() {
1117
+ return __static_size_thread_block_tile_base<Size>::build_mask();
1118
+ }
1119
+ };
1120
+
1121
+ template <unsigned int Size>
1122
+ class __single_warp_thread_block_tile<Size, void> :
1123
+ public __static_size_thread_block_tile_base<Size>,
1124
+ public thread_group_base<details::coalesced_group_id>
1125
+ {
1126
+ _CG_STATIC_CONST_DECL unsigned int numThreads = Size;
1127
+
1128
+ template <unsigned int, typename ParentT> friend class __single_warp_thread_block_tile;
1129
+ friend class details::_coalesced_group_data_access;
1130
+
1131
+ typedef __static_size_thread_block_tile_base<numThreads> staticSizeBaseT;
1132
+
1133
+ protected:
1134
+ _CG_QUALIFIER __single_warp_thread_block_tile(unsigned int meta_group_rank, unsigned int meta_group_size) {
1135
+ _data.coalesced.mask = staticSizeBaseT::build_mask();
1136
+ _data.coalesced.size = numThreads;
1137
+ _data.coalesced.metaGroupRank = meta_group_rank;
1138
+ _data.coalesced.metaGroupSize = meta_group_size;
1139
+ _data.coalesced.is_tiled = true;
1140
+ }
1141
+
1142
+ _CG_QUALIFIER unsigned int get_mask() const {
1143
+ return (_data.coalesced.mask);
1144
+ }
1145
+
1146
+ public:
1147
+ using staticSizeBaseT::sync;
1148
+ using staticSizeBaseT::size;
1149
+ using staticSizeBaseT::num_threads;
1150
+ using staticSizeBaseT::thread_rank;
1151
+
1152
+ _CG_QUALIFIER unsigned int meta_group_rank() const {
1153
+ return _data.coalesced.metaGroupRank;
1154
+ }
1155
+
1156
+ _CG_QUALIFIER unsigned int meta_group_size() const {
1157
+ return _data.coalesced.metaGroupSize;
1158
+ }
1159
+ };
1160
+
1161
+ /**
1162
+ * Outer level API calls
1163
+ * void sync(GroupT) - see <group_type>.sync()
1164
+ * void thread_rank(GroupT) - see <group_type>.thread_rank()
1165
+ * void group_size(GroupT) - see <group_type>.size()
1166
+ */
1167
+ template <class GroupT>
1168
+ _CG_QUALIFIER void sync(GroupT const &g)
1169
+ {
1170
+ g.sync();
1171
+ }
1172
+
1173
+ // TODO: Use a static dispatch to determine appropriate return type
1174
+ // C++03 is stuck with unsigned long long for now
1175
+ #ifdef _CG_CPP11_FEATURES
1176
+ template <class GroupT>
1177
+ _CG_QUALIFIER auto thread_rank(GroupT const& g) -> decltype(g.thread_rank()) {
1178
+ return g.thread_rank();
1179
+ }
1180
+
1181
+
1182
+ template <class GroupT>
1183
+ _CG_QUALIFIER auto group_size(GroupT const &g) -> decltype(g.num_threads()) {
1184
+ return g.num_threads();
1185
+ }
1186
+ #else
1187
+ template <class GroupT>
1188
+ _CG_QUALIFIER unsigned long long thread_rank(GroupT const& g) {
1189
+ return static_cast<unsigned long long>(g.thread_rank());
1190
+ }
1191
+
1192
+
1193
+ template <class GroupT>
1194
+ _CG_QUALIFIER unsigned long long group_size(GroupT const &g) {
1195
+ return static_cast<unsigned long long>(g.num_threads());
1196
+ }
1197
+ #endif
1198
+
1199
+
1200
+ /**
1201
+ * tiled_partition
1202
+ *
1203
+ * The tiled_partition(parent, tilesz) method is a collective operation that
1204
+ * partitions the parent group into a one-dimensional, row-major, tiling of subgroups.
1205
+ *
1206
+ * A total of ((size(parent)+tilesz-1)/tilesz) subgroups will
1207
+ * be created where threads having identical k = (thread_rank(parent)/tilesz)
1208
+ * will be members of the same subgroup.
1209
+ *
1210
+ * The implementation may cause the calling thread to wait until all the members
1211
+ * of the parent group have invoked the operation before resuming execution.
1212
+ *
1213
+ * Functionality is limited to power-of-two sized subgorup instances of at most
1214
+ * 32 threads. Only thread_block, thread_block_tile<>, and their subgroups can be
1215
+ * tiled_partition() in _CG_VERSION 1000.
1216
+ */
1217
+ _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz)
1218
+ {
1219
+ if (parent.get_type() == details::coalesced_group_id) {
1220
+ const coalesced_group *_cg = static_cast<const coalesced_group*>(&parent);
1221
+ return _cg->_get_tiled_threads(tilesz);
1222
+ }
1223
+ else {
1224
+ const thread_block *_tb = static_cast<const thread_block*>(&parent);
1225
+ return _tb->_get_tiled_threads(tilesz);
1226
+ }
1227
+ }
1228
+
1229
+ // Thread block type overload: returns a basic thread_group for now (may be specialized later)
1230
+ _CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz)
1231
+ {
1232
+ return (parent._get_tiled_threads(tilesz));
1233
+ }
1234
+
1235
+ // Coalesced group type overload: retains its ability to stay coalesced
1236
+ _CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz)
1237
+ {
1238
+ return (parent._get_tiled_threads(tilesz));
1239
+ }
1240
+
1241
+ namespace details {
1242
+ template <unsigned int Size, typename ParentT>
1243
+ class internal_thread_block_tile : public __single_warp_thread_block_tile<Size, ParentT> {};
1244
+
1245
+ template <unsigned int Size, typename ParentT>
1246
+ _CG_QUALIFIER internal_thread_block_tile<Size, ParentT> tiled_partition_internal() {
1247
+ return internal_thread_block_tile<Size, ParentT>();
1248
+ }
1249
+
1250
+ template <typename TyVal, typename GroupT, typename WarpLambda, typename InterWarpLambda>
1251
+ _CG_QUALIFIER TyVal multi_warp_collectives_helper(
1252
+ const GroupT& group,
1253
+ WarpLambda warp_lambda,
1254
+ InterWarpLambda inter_warp_lambda) {
1255
+ return group.template collectives_scheme<TyVal>(warp_lambda, inter_warp_lambda);
1256
+ }
1257
+
1258
+ template <typename T, typename GroupT>
1259
+ _CG_QUALIFIER T* multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id) {
1260
+ return group.template get_scratch_location<T>(warp_id);
1261
+ }
1262
+
1263
+ template <typename GroupT>
1264
+ _CG_QUALIFIER details::barrier_t* multi_warp_sync_location_getter(const GroupT& group) {
1265
+ return group.get_sync_location();
1266
+ }
1267
+
1268
+ }
1269
+ /**
1270
+ * tiled_partition<tilesz>
1271
+ *
1272
+ * The tiled_partition<tilesz>(parent) method is a collective operation that
1273
+ * partitions the parent group into a one-dimensional, row-major, tiling of subgroups.
1274
+ *
1275
+ * A total of ((size(parent)/tilesz) subgroups will be created,
1276
+ * therefore the parent group size must be evenly divisible by the tilesz.
1277
+ * The allow parent groups are thread_block or thread_block_tile<size>.
1278
+ *
1279
+ * The implementation may cause the calling thread to wait until all the members
1280
+ * of the parent group have invoked the operation before resuming execution.
1281
+ *
1282
+ * Functionality is limited to native hardware sizes, 1/2/4/8/16/32.
1283
+ * The size(parent) must be greater than the template Size parameter
1284
+ * otherwise the results are undefined.
1285
+ */
1286
+
1287
+ #if defined(_CG_CPP11_FEATURES)
1288
+ template <unsigned int Size>
1289
+ class __static_size_multi_warp_tile_base : public __static_size_tile_base<Size>
1290
+ {
1291
+ static_assert(details::_is_valid_multi_warp_tile<Size>::value, "Size must be one of 64/128/256/512");
1292
+
1293
+ template <typename TyVal, typename GroupT, typename WarpLambda, typename InterWarpLambda>
1294
+ friend __device__ TyVal details::multi_warp_collectives_helper(
1295
+ const GroupT& group,
1296
+ WarpLambda warp_lambda,
1297
+ InterWarpLambda inter_warp_lambda);
1298
+ template <typename T, typename GroupT>
1299
+ friend __device__ T* details::multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id);
1300
+ template <typename GroupT>
1301
+ friend __device__ details::barrier_t* details::multi_warp_sync_location_getter(const GroupT& group);
1302
+ template <unsigned int OtherSize>
1303
+ friend class __static_size_multi_warp_tile_base;
1304
+ using WarpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base<Size>>;
1305
+ using ThisType = __static_size_multi_warp_tile_base<Size>;
1306
+ _CG_STATIC_CONST_DECL int numWarps = Size / 32;
1307
+
1308
+ protected:
1309
+ details::multi_warp_scratch* const tile_memory;
1310
+
1311
+ template <typename GroupT>
1312
+ _CG_QUALIFIER __static_size_multi_warp_tile_base(const GroupT& g) : tile_memory(g.tile_memory) {
1313
+ #if defined(_CG_HAS_RESERVED_SHARED)
1314
+ details::sync_warps_reset(get_sync_location(), details::cta::thread_rank());
1315
+ g.sync();
1316
+ #endif
1317
+ }
1318
+
1319
+
1320
+ private:
1321
+ _CG_QUALIFIER details::barrier_t* get_sync_location() const {
1322
+ // Different group sizes use different barriers, all groups of a given size share one barrier.
1323
+ unsigned int sync_id = details::log2(Size / 64);
1324
+ return &tile_memory->barriers[sync_id];
1325
+ }
1326
+
1327
+ template <typename T>
1328
+ _CG_QUALIFIER T* get_scratch_location(unsigned int warp_id) const {
1329
+ unsigned int scratch_id = (details::cta::thread_rank() - thread_rank()) / 32 + warp_id;
1330
+ return reinterpret_cast<T*>(&tile_memory->communication_memory[scratch_id]);
1331
+ }
1332
+
1333
+ template <typename T>
1334
+ _CG_QUALIFIER T* get_scratch_location() const {
1335
+ unsigned int scratch_id = details::cta::thread_rank() / 32;
1336
+ return reinterpret_cast<T*>(&tile_memory->communication_memory[scratch_id]);
1337
+ }
1338
+
1339
+ template <typename TyVal>
1340
+ _CG_QUALIFIER TyVal shfl_impl(TyVal val, unsigned int src) const {
1341
+ unsigned int src_warp = src / 32;
1342
+ auto warp = details::tiled_partition_internal<32, ThisType>();
1343
+ details::barrier_t* sync_location = get_sync_location();
1344
+
1345
+ // Get warp slot of the source threads warp.
1346
+ TyVal* warp_scratch_location = get_scratch_location<TyVal>(src_warp);
1347
+
1348
+ if (warp.meta_group_rank() == src_warp) {
1349
+ warp.sync();
1350
+ // Put shuffled value into my warp slot and let my warp arrive at the barrier.
1351
+ if (thread_rank() == src) {
1352
+ *warp_scratch_location = val;
1353
+ }
1354
+ details::sync_warps_arrive(sync_location, details::cta::thread_rank(), numWarps);
1355
+ TyVal result = *warp_scratch_location;
1356
+ details::sync_warps_wait(sync_location, details::cta::thread_rank());
1357
+ return result;
1358
+ }
1359
+ else {
1360
+ // Wait for the source warp to arrive on the barrier.
1361
+ details::sync_warps_wait_for_specific_warp(sync_location,
1362
+ (details::cta::thread_rank() / 32 - warp.meta_group_rank() + src_warp));
1363
+ TyVal result = *warp_scratch_location;
1364
+ details::sync_warps(sync_location, details::cta::thread_rank(), numWarps);
1365
+ return result;
1366
+ }
1367
+ }
1368
+
1369
+ template <typename TyVal, typename WarpLambda, typename InterWarpLambda>
1370
+ _CG_QUALIFIER TyVal collectives_scheme(const WarpLambda& warp_lambda, const InterWarpLambda& inter_warp_lambda) const {
1371
+ static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size,
1372
+ "Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes");
1373
+ auto warp = details::tiled_partition_internal<32, ThisType>();
1374
+ details::barrier_t* sync_location = get_sync_location();
1375
+ TyVal* warp_scratch_location = get_scratch_location<TyVal>();
1376
+
1377
+ warp_lambda(warp, warp_scratch_location);
1378
+
1379
+ if (details::sync_warps_last_releases(sync_location, details::cta::thread_rank(), numWarps)) {
1380
+ auto subwarp = details::tiled_partition_internal<numWarps, decltype(warp)>();
1381
+ if (subwarp.meta_group_rank() == 0) {
1382
+ TyVal* thread_scratch_location = get_scratch_location<TyVal>(subwarp.thread_rank());
1383
+ inter_warp_lambda(subwarp, thread_scratch_location);
1384
+ }
1385
+ warp.sync();
1386
+ details::sync_warps_release(sync_location, warp.thread_rank() == 0, details::cta::thread_rank(), numWarps);
1387
+ }
1388
+ TyVal result = *warp_scratch_location;
1389
+ return result;
1390
+ }
1391
+
1392
+ public:
1393
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::multi_tile_group_id;
1394
+
1395
+ using __static_size_tile_base<Size>::thread_rank;
1396
+
1397
+ template <typename TyVal>
1398
+ _CG_QUALIFIER TyVal shfl(TyVal val, unsigned int src) const {
1399
+ static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size,
1400
+ "Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes");
1401
+ return shfl_impl(val, src);
1402
+ }
1403
+
1404
+ _CG_QUALIFIER void sync() const {
1405
+ details::sync_warps(get_sync_location(), details::cta::thread_rank(), numWarps);
1406
+ }
1407
+
1408
+ _CG_QUALIFIER int any(int predicate) const {
1409
+ auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) {
1410
+ *warp_scratch_location = __any_sync(0xFFFFFFFF, predicate);
1411
+ };
1412
+ auto inter_warp_lambda =
1413
+ [] (details::internal_thread_block_tile<numWarps, WarpType>& subwarp, int* thread_scratch_location) {
1414
+ *thread_scratch_location = __any_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location);
1415
+ };
1416
+ return collectives_scheme<int>(warp_lambda, inter_warp_lambda);
1417
+ }
1418
+
1419
+ _CG_QUALIFIER int all(int predicate) const {
1420
+ auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) {
1421
+ *warp_scratch_location = __all_sync(0xFFFFFFFF, predicate);
1422
+ };
1423
+ auto inter_warp_lambda =
1424
+ [] (details::internal_thread_block_tile<numWarps, WarpType>& subwarp, int* thread_scratch_location) {
1425
+ *thread_scratch_location = __all_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location);
1426
+ };
1427
+ return collectives_scheme<int>(warp_lambda, inter_warp_lambda);
1428
+ }
1429
+ };
1430
+
1431
+
1432
+ template <unsigned int Size, typename ParentT = void>
1433
+ class __multi_warp_thread_block_tile :
1434
+ public __static_size_multi_warp_tile_base<Size>,
1435
+ public __static_parent_thread_block_tile_base<Size, ParentT>
1436
+ {
1437
+ typedef __static_parent_thread_block_tile_base<Size, ParentT> staticParentBaseT;
1438
+ typedef __static_size_multi_warp_tile_base<Size> staticTileBaseT;
1439
+ protected:
1440
+ _CG_QUALIFIER __multi_warp_thread_block_tile(const ParentT& g) :
1441
+ __static_size_multi_warp_tile_base<Size>(g) {}
1442
+ };
1443
+
1444
+ template <unsigned int Size>
1445
+ class __multi_warp_thread_block_tile<Size, void> : public __static_size_multi_warp_tile_base<Size>
1446
+ {
1447
+ const unsigned int metaGroupRank;
1448
+ const unsigned int metaGroupSize;
1449
+
1450
+ protected:
1451
+ template <unsigned int OtherSize, typename ParentT>
1452
+ _CG_QUALIFIER __multi_warp_thread_block_tile(const __multi_warp_thread_block_tile<OtherSize, ParentT>& g) :
1453
+ __static_size_multi_warp_tile_base<Size>(g), metaGroupRank(g.meta_group_rank()), metaGroupSize(g.meta_group_size()) {}
1454
+
1455
+ public:
1456
+ _CG_QUALIFIER unsigned int meta_group_rank() const {
1457
+ return metaGroupRank;
1458
+ }
1459
+
1460
+ _CG_QUALIFIER unsigned int meta_group_size() const {
1461
+ return metaGroupSize;
1462
+ }
1463
+ };
1464
+ #endif
1465
+
1466
+ template <unsigned int Size, typename ParentT = void>
1467
+ class thread_block_tile;
1468
+
1469
+ namespace details {
1470
+ template <unsigned int Size, typename ParentT, bool IsMultiWarp>
1471
+ class thread_block_tile_impl;
1472
+
1473
+ template <unsigned int Size, typename ParentT>
1474
+ class thread_block_tile_impl<Size, ParentT, false>: public __single_warp_thread_block_tile<Size, ParentT>
1475
+ {
1476
+ protected:
1477
+ template <unsigned int OtherSize, typename OtherParentT, bool OtherIsMultiWarp>
1478
+ _CG_QUALIFIER thread_block_tile_impl(const thread_block_tile_impl<OtherSize, OtherParentT, OtherIsMultiWarp>& g) :
1479
+ __single_warp_thread_block_tile<Size, ParentT>(g.meta_group_rank(), g.meta_group_size()) {}
1480
+
1481
+ _CG_QUALIFIER thread_block_tile_impl(const thread_block& g) :
1482
+ __single_warp_thread_block_tile<Size, ParentT>() {}
1483
+ };
1484
+
1485
+ #if defined(_CG_CPP11_FEATURES)
1486
+ template <unsigned int Size, typename ParentT>
1487
+ class thread_block_tile_impl<Size, ParentT, true> : public __multi_warp_thread_block_tile<Size, ParentT>
1488
+ {
1489
+ protected:
1490
+ template <typename GroupT>
1491
+ _CG_QUALIFIER thread_block_tile_impl(const GroupT& g) :
1492
+ __multi_warp_thread_block_tile<Size, ParentT>(g) {}
1493
+ };
1494
+ #else
1495
+ template <unsigned int Size, typename ParentT>
1496
+ class thread_block_tile_impl<Size, ParentT, true>
1497
+ {
1498
+ protected:
1499
+ template <typename GroupT>
1500
+ _CG_QUALIFIER thread_block_tile_impl(const GroupT& g) {}
1501
+ };
1502
+ #endif
1503
+ }
1504
+
1505
+ template <unsigned int Size, typename ParentT>
1506
+ class thread_block_tile : public details::thread_block_tile_impl<Size, ParentT, details::_is_multi_warp<Size>::value>
1507
+ {
1508
+ friend _CG_QUALIFIER thread_block_tile<1, void> this_thread();
1509
+
1510
+ protected:
1511
+ _CG_QUALIFIER thread_block_tile(const ParentT& g) :
1512
+ details::thread_block_tile_impl<Size, ParentT, details::_is_multi_warp<Size>::value>(g) {}
1513
+
1514
+ public:
1515
+ _CG_QUALIFIER operator thread_block_tile<Size, void>() const {
1516
+ return thread_block_tile<Size, void>(*this);
1517
+ }
1518
+ };
1519
+
1520
+ template <unsigned int Size>
1521
+ class thread_block_tile<Size, void> : public details::thread_block_tile_impl<Size, void, details::_is_multi_warp<Size>::value>
1522
+ {
1523
+ template <unsigned int, typename ParentT>
1524
+ friend class thread_block_tile;
1525
+
1526
+ protected:
1527
+ template <unsigned int OtherSize, typename OtherParentT>
1528
+ _CG_QUALIFIER thread_block_tile(const thread_block_tile<OtherSize, OtherParentT>& g) :
1529
+ details::thread_block_tile_impl<Size, void, details::_is_multi_warp<Size>::value>(g) {}
1530
+
1531
+ public:
1532
+ template <typename ParentT>
1533
+ _CG_QUALIFIER thread_block_tile(const thread_block_tile<Size, ParentT>& g) :
1534
+ details::thread_block_tile_impl<Size, void, details::_is_multi_warp<Size>::value>(g) {}
1535
+ };
1536
+
1537
+ namespace details {
1538
+ template <unsigned int Size, typename ParentT>
1539
+ struct tiled_partition_impl;
1540
+
1541
+ template <unsigned int Size>
1542
+ struct tiled_partition_impl<Size, thread_block> : public thread_block_tile<Size, thread_block> {
1543
+ _CG_QUALIFIER tiled_partition_impl(const thread_block& g) :
1544
+ thread_block_tile<Size, thread_block>(g) {}
1545
+ };
1546
+
1547
+ // ParentT = static thread_block_tile<ParentSize, GrandParent> specialization
1548
+ template <unsigned int Size, unsigned int ParentSize, typename GrandParent>
1549
+ struct tiled_partition_impl<Size, thread_block_tile<ParentSize, GrandParent> > :
1550
+ public thread_block_tile<Size, thread_block_tile<ParentSize, GrandParent> > {
1551
+ #ifdef _CG_CPP11_FEATURES
1552
+ static_assert(Size < ParentSize, "Tile size bigger or equal to the parent group size");
1553
+ #endif
1554
+ _CG_QUALIFIER tiled_partition_impl(const thread_block_tile<ParentSize, GrandParent>& g) :
1555
+ thread_block_tile<Size, thread_block_tile<ParentSize, GrandParent> >(g) {}
1556
+ };
1557
+
1558
+ }
1559
+
1560
+ template <unsigned int Size, typename ParentT>
1561
+ _CG_QUALIFIER thread_block_tile<Size, ParentT> tiled_partition(const ParentT& g)
1562
+ {
1563
+ return details::tiled_partition_impl<Size, ParentT>(g);
1564
+ }
1565
+
1566
+ /**
1567
+ * thread_group this_thread()
1568
+ *
1569
+ * Constructs a generic thread_group containing only the calling thread
1570
+ */
1571
+ _CG_QUALIFIER thread_block_tile<1, void> this_thread()
1572
+ {
1573
+ // Make thread_block_tile<1, thread_block> parent of the returned group, so it will have its
1574
+ // meta group rank and size set to 0 and 1 respectively.
1575
+ return thread_block_tile<1, thread_block_tile<1, thread_block> >(this_thread_block());
1576
+ }
1577
+
1578
+ /**
1579
+ * <group_type>.sync()
1580
+ *
1581
+ * Executes a barrier across the group
1582
+ *
1583
+ * Implements both a compiler fence and an architectural fence to prevent,
1584
+ * memory reordering around the barrier.
1585
+ */
1586
+ _CG_QUALIFIER void thread_group::sync() const
1587
+ {
1588
+ switch (_data.group.type) {
1589
+ case details::coalesced_group_id:
1590
+ cooperative_groups::sync(*static_cast<const coalesced_group*>(this));
1591
+ break;
1592
+ case details::thread_block_id:
1593
+ cooperative_groups::sync(*static_cast<const thread_block*>(this));
1594
+ break;
1595
+ case details::grid_group_id:
1596
+ cooperative_groups::sync(*static_cast<const grid_group*>(this));
1597
+ break;
1598
+ #if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
1599
+ case details::multi_grid_group_id:
1600
+ cooperative_groups::sync(*static_cast<const multi_grid_group*>(this));
1601
+ break;
1602
+ #endif
1603
+ #if defined(_CG_HAS_CLUSTER_GROUP)
1604
+ case details::cluster_group_id:
1605
+ cooperative_groups::sync(*static_cast<const cluster_group*>(this));
1606
+ break;
1607
+ #endif
1608
+ default:
1609
+ break;
1610
+ }
1611
+ }
1612
+
1613
+ /**
1614
+ * <group_type>.size()
1615
+ *
1616
+ * Returns the total number of threads in the group.
1617
+ */
1618
+ _CG_QUALIFIER unsigned long long thread_group::size() const
1619
+ {
1620
+ unsigned long long size = 0;
1621
+ switch (_data.group.type) {
1622
+ case details::coalesced_group_id:
1623
+ size = cooperative_groups::group_size(*static_cast<const coalesced_group*>(this));
1624
+ break;
1625
+ case details::thread_block_id:
1626
+ size = cooperative_groups::group_size(*static_cast<const thread_block*>(this));
1627
+ break;
1628
+ case details::grid_group_id:
1629
+ size = cooperative_groups::group_size(*static_cast<const grid_group*>(this));
1630
+ break;
1631
+ #if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
1632
+ case details::multi_grid_group_id:
1633
+ size = cooperative_groups::group_size(*static_cast<const multi_grid_group*>(this));
1634
+ break;
1635
+ #endif
1636
+ #if defined(_CG_HAS_CLUSTER_GROUP)
1637
+ case details::cluster_group_id:
1638
+ size = cooperative_groups::group_size(*static_cast<const cluster_group*>(this));
1639
+ break;
1640
+ #endif
1641
+ default:
1642
+ break;
1643
+ }
1644
+ return size;
1645
+ }
1646
+
1647
+ /**
1648
+ * <group_type>.thread_rank()
1649
+ *
1650
+ * Returns the linearized rank of the calling thread along the interval [0, size()).
1651
+ */
1652
+ _CG_QUALIFIER unsigned long long thread_group::thread_rank() const
1653
+ {
1654
+ unsigned long long rank = 0;
1655
+ switch (_data.group.type) {
1656
+ case details::coalesced_group_id:
1657
+ rank = cooperative_groups::thread_rank(*static_cast<const coalesced_group*>(this));
1658
+ break;
1659
+ case details::thread_block_id:
1660
+ rank = cooperative_groups::thread_rank(*static_cast<const thread_block*>(this));
1661
+ break;
1662
+ case details::grid_group_id:
1663
+ rank = cooperative_groups::thread_rank(*static_cast<const grid_group*>(this));
1664
+ break;
1665
+ #if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
1666
+ case details::multi_grid_group_id:
1667
+ rank = cooperative_groups::thread_rank(*static_cast<const multi_grid_group*>(this));
1668
+ break;
1669
+ #endif
1670
+ #if defined(_CG_HAS_CLUSTER_GROUP)
1671
+ case details::cluster_group_id:
1672
+ rank = cooperative_groups::thread_rank(*static_cast<const cluster_group*>(this));
1673
+ break;
1674
+ #endif
1675
+ default:
1676
+ break;
1677
+ }
1678
+ return rank;
1679
+ }
1680
+
1681
+ _CG_END_NAMESPACE
1682
+
1683
+ #include <cooperative_groups/details/partitioning.h>
1684
+ #if (!defined(_MSC_VER) || defined(_WIN64))
1685
+ # include <cooperative_groups/details/invoke.h>
1686
+ #endif
1687
+
1688
+ # endif /* ! (__cplusplus, __CUDACC__) */
1689
+
1690
+ #endif /* !_COOPERATIVE_GROUPS_H_ */
evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGL.h ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAGL_H
51
+ #define CUDAGL_H
52
+
53
+ #include <cuda.h>
54
+ #include <GL/gl.h>
55
+
56
+ #if defined(__CUDA_API_VERSION_INTERNAL) || defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED)
57
+ #define __CUDA_DEPRECATED
58
+ #elif defined(_MSC_VER)
59
+ #define __CUDA_DEPRECATED __declspec(deprecated)
60
+ #elif defined(__GNUC__)
61
+ #define __CUDA_DEPRECATED __attribute__((deprecated))
62
+ #else
63
+ #define __CUDA_DEPRECATED
64
+ #endif
65
+
66
+ #ifdef CUDA_FORCE_API_VERSION
67
+ #error "CUDA_FORCE_API_VERSION is no longer supported."
68
+ #endif
69
+
70
+ #if defined(__CUDA_API_VERSION_INTERNAL) || defined(CUDA_API_PER_THREAD_DEFAULT_STREAM)
71
+ #define __CUDA_API_PER_THREAD_DEFAULT_STREAM
72
+ #define __CUDA_API_PTDS(api) api ## _ptds
73
+ #define __CUDA_API_PTSZ(api) api ## _ptsz
74
+ #else
75
+ #define __CUDA_API_PTDS(api) api
76
+ #define __CUDA_API_PTSZ(api) api
77
+ #endif
78
+
79
+ #define cuGLCtxCreate cuGLCtxCreate_v2
80
+ #define cuGLMapBufferObject __CUDA_API_PTDS(cuGLMapBufferObject_v2)
81
+ #define cuGLMapBufferObjectAsync __CUDA_API_PTSZ(cuGLMapBufferObjectAsync_v2)
82
+ #define cuGLGetDevices cuGLGetDevices_v2
83
+
84
+ #ifdef __cplusplus
85
+ extern "C" {
86
+ #endif
87
+
88
+ /**
89
+ * \file cudaGL.h
90
+ * \brief Header file for the OpenGL interoperability functions of the
91
+ * low-level CUDA driver application programming interface.
92
+ */
93
+
94
+ /**
95
+ * \defgroup CUDA_GL OpenGL Interoperability
96
+ * \ingroup CUDA_DRIVER
97
+ *
98
+ * ___MANBRIEF___ OpenGL interoperability functions of the low-level CUDA
99
+ * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
100
+ *
101
+ * This section describes the OpenGL interoperability functions of the
102
+ * low-level CUDA driver application programming interface. Note that mapping
103
+ * of OpenGL resources is performed with the graphics API agnostic, resource
104
+ * mapping interface described in \ref CUDA_GRAPHICS "Graphics Interoperability".
105
+ *
106
+ * @{
107
+ */
108
+
109
+ #if defined(_WIN32)
110
+ #if !defined(WGL_NV_gpu_affinity)
111
+ typedef void* HGPUNV;
112
+ #endif
113
+ #endif /* _WIN32 */
114
+
115
+ /**
116
+ * \brief Registers an OpenGL buffer object
117
+ *
118
+ * Registers the buffer object specified by \p buffer for access by
119
+ * CUDA. A handle to the registered object is returned as \p
120
+ * pCudaResource. The register flags \p Flags specify the intended usage,
121
+ * as follows:
122
+ *
123
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_NONE: Specifies no hints about how this
124
+ * resource will be used. It is therefore assumed that this resource will be
125
+ * read from and written to by CUDA. This is the default value.
126
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: Specifies that CUDA
127
+ * will not write to this resource.
128
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD: Specifies that
129
+ * CUDA will not read from this resource and will write over the
130
+ * entire contents of the resource, so none of the data previously
131
+ * stored in the resource will be preserved.
132
+ *
133
+ * \param pCudaResource - Pointer to the returned object handle
134
+ * \param buffer - name of buffer object to be registered
135
+ * \param Flags - Register flags
136
+ *
137
+ * \return
138
+ * ::CUDA_SUCCESS,
139
+ * ::CUDA_ERROR_INVALID_HANDLE,
140
+ * ::CUDA_ERROR_ALREADY_MAPPED,
141
+ * ::CUDA_ERROR_INVALID_CONTEXT,
142
+ * ::CUDA_ERROR_OPERATING_SYSTEM
143
+ * \notefnerr
144
+ *
145
+ * \sa
146
+ * ::cuGraphicsUnregisterResource,
147
+ * ::cuGraphicsMapResources,
148
+ * ::cuGraphicsResourceGetMappedPointer,
149
+ * ::cudaGraphicsGLRegisterBuffer
150
+ */
151
+ CUresult CUDAAPI cuGraphicsGLRegisterBuffer(CUgraphicsResource *pCudaResource, GLuint buffer, unsigned int Flags);
152
+
153
+ /**
154
+ * \brief Register an OpenGL texture or renderbuffer object
155
+ *
156
+ * Registers the texture or renderbuffer object specified by \p image for access by CUDA.
157
+ * A handle to the registered object is returned as \p pCudaResource.
158
+ *
159
+ * \p target must match the type of the object, and must be one of ::GL_TEXTURE_2D,
160
+ * ::GL_TEXTURE_RECTANGLE, ::GL_TEXTURE_CUBE_MAP, ::GL_TEXTURE_3D, ::GL_TEXTURE_2D_ARRAY,
161
+ * or ::GL_RENDERBUFFER.
162
+ *
163
+ * The register flags \p Flags specify the intended usage, as follows:
164
+ *
165
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_NONE: Specifies no hints about how this
166
+ * resource will be used. It is therefore assumed that this resource will be
167
+ * read from and written to by CUDA. This is the default value.
168
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: Specifies that CUDA
169
+ * will not write to this resource.
170
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD: Specifies that
171
+ * CUDA will not read from this resource and will write over the
172
+ * entire contents of the resource, so none of the data previously
173
+ * stored in the resource will be preserved.
174
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST: Specifies that CUDA will
175
+ * bind this resource to a surface reference.
176
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER: Specifies that CUDA will perform
177
+ * texture gather operations on this resource.
178
+ *
179
+ * The following image formats are supported. For brevity's sake, the list is abbreviated.
180
+ * For ex., {GL_R, GL_RG} X {8, 16} would expand to the following 4 formats
181
+ * {GL_R8, GL_R16, GL_RG8, GL_RG16} :
182
+ * - GL_RED, GL_RG, GL_RGBA, GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY
183
+ * - {GL_R, GL_RG, GL_RGBA} X {8, 16, 16F, 32F, 8UI, 16UI, 32UI, 8I, 16I, 32I}
184
+ * - {GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY} X
185
+ * {8, 16, 16F_ARB, 32F_ARB, 8UI_EXT, 16UI_EXT, 32UI_EXT, 8I_EXT, 16I_EXT, 32I_EXT}
186
+ *
187
+ * The following image classes are currently disallowed:
188
+ * - Textures with borders
189
+ * - Multisampled renderbuffers
190
+ *
191
+ * \param pCudaResource - Pointer to the returned object handle
192
+ * \param image - name of texture or renderbuffer object to be registered
193
+ * \param target - Identifies the type of object specified by \p image
194
+ * \param Flags - Register flags
195
+ *
196
+ * \return
197
+ * ::CUDA_SUCCESS,
198
+ * ::CUDA_ERROR_INVALID_HANDLE,
199
+ * ::CUDA_ERROR_ALREADY_MAPPED,
200
+ * ::CUDA_ERROR_INVALID_CONTEXT,
201
+ * ::CUDA_ERROR_OPERATING_SYSTEM
202
+ * \notefnerr
203
+ *
204
+ * \sa
205
+ * ::cuGraphicsUnregisterResource,
206
+ * ::cuGraphicsMapResources,
207
+ * ::cuGraphicsSubResourceGetMappedArray,
208
+ * ::cudaGraphicsGLRegisterImage
209
+ */
210
+ CUresult CUDAAPI cuGraphicsGLRegisterImage(CUgraphicsResource *pCudaResource, GLuint image, GLenum target, unsigned int Flags);
211
+
212
+ #ifdef _WIN32
213
+ /**
214
+ * \brief Gets the CUDA device associated with hGpu
215
+ *
216
+ * Returns in \p *pDevice the CUDA device associated with a \p hGpu, if
217
+ * applicable.
218
+ *
219
+ * \param pDevice - Device associated with hGpu
220
+ * \param hGpu - Handle to a GPU, as queried via ::WGL_NV_gpu_affinity()
221
+ *
222
+ * \return
223
+ * ::CUDA_SUCCESS,
224
+ * ::CUDA_ERROR_DEINITIALIZED,
225
+ * ::CUDA_ERROR_NOT_INITIALIZED,
226
+ * ::CUDA_ERROR_INVALID_CONTEXT,
227
+ * ::CUDA_ERROR_INVALID_VALUE
228
+ * \notefnerr
229
+ *
230
+ * \sa ::cuGLMapBufferObject,
231
+ * ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject,
232
+ * ::cuGLUnregisterBufferObject, ::cuGLUnmapBufferObjectAsync,
233
+ * ::cuGLSetBufferObjectMapFlags,
234
+ * ::cudaWGLGetDevice
235
+ */
236
+ CUresult CUDAAPI cuWGLGetDevice(CUdevice *pDevice, HGPUNV hGpu);
237
+ #endif /* _WIN32 */
238
+
239
+ /**
240
+ * CUDA devices corresponding to an OpenGL device
241
+ */
242
+ typedef enum CUGLDeviceList_enum {
243
+ CU_GL_DEVICE_LIST_ALL = 0x01, /**< The CUDA devices for all GPUs used by the current OpenGL context */
244
+ CU_GL_DEVICE_LIST_CURRENT_FRAME = 0x02, /**< The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame */
245
+ CU_GL_DEVICE_LIST_NEXT_FRAME = 0x03, /**< The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame */
246
+ } CUGLDeviceList;
247
+
248
+ /**
249
+ * \brief Gets the CUDA devices associated with the current OpenGL context
250
+ *
251
+ * Returns in \p *pCudaDeviceCount the number of CUDA-compatible devices
252
+ * corresponding to the current OpenGL context. Also returns in \p *pCudaDevices
253
+ * at most cudaDeviceCount of the CUDA-compatible devices corresponding to
254
+ * the current OpenGL context. If any of the GPUs being used by the current OpenGL
255
+ * context are not CUDA capable then the call will return CUDA_ERROR_NO_DEVICE.
256
+ *
257
+ * The \p deviceList argument may be any of the following:
258
+ * - ::CU_GL_DEVICE_LIST_ALL: Query all devices used by the current OpenGL context.
259
+ * - ::CU_GL_DEVICE_LIST_CURRENT_FRAME: Query the devices used by the current OpenGL context to
260
+ * render the current frame (in SLI).
261
+ * - ::CU_GL_DEVICE_LIST_NEXT_FRAME: Query the devices used by the current OpenGL context to
262
+ * render the next frame (in SLI). Note that this is a prediction, it can't be guaranteed that
263
+ * this is correct in all cases.
264
+ *
265
+ * \param pCudaDeviceCount - Returned number of CUDA devices.
266
+ * \param pCudaDevices - Returned CUDA devices.
267
+ * \param cudaDeviceCount - The size of the output device array pCudaDevices.
268
+ * \param deviceList - The set of devices to return.
269
+ *
270
+ * \return
271
+ * ::CUDA_SUCCESS,
272
+ * ::CUDA_ERROR_NO_DEVICE,
273
+ * ::CUDA_ERROR_INVALID_VALUE,
274
+ * ::CUDA_ERROR_INVALID_CONTEXT,
275
+ * ::CUDA_ERROR_INVALID_GRAPHICS_CONTEXT,
276
+ * ::CUDA_ERROR_OPERATING_SYSTEM
277
+ *
278
+ * \notefnerr
279
+ *
280
+ * \sa
281
+ * ::cuWGLGetDevice,
282
+ * ::cudaGLGetDevices
283
+ */
284
+ CUresult CUDAAPI cuGLGetDevices(unsigned int *pCudaDeviceCount, CUdevice *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList);
285
+
286
+ /**
287
+ * \defgroup CUDA_GL_DEPRECATED OpenGL Interoperability [DEPRECATED]
288
+ *
289
+ * ___MANBRIEF___ deprecated OpenGL interoperability functions of the low-level
290
+ * CUDA driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
291
+ *
292
+ * This section describes deprecated OpenGL interoperability functionality.
293
+ *
294
+ * @{
295
+ */
296
+
297
+ /** Flags to map or unmap a resource */
298
+ typedef enum CUGLmap_flags_enum {
299
+ CU_GL_MAP_RESOURCE_FLAGS_NONE = 0x00,
300
+ CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY = 0x01,
301
+ CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD = 0x02,
302
+ } CUGLmap_flags;
303
+
304
+ /**
305
+ * \brief Create a CUDA context for interoperability with OpenGL
306
+ *
307
+ * \deprecated This function is deprecated as of Cuda 5.0.
308
+ *
309
+ * This function is deprecated and should no longer be used. It is
310
+ * no longer necessary to associate a CUDA context with an OpenGL
311
+ * context in order to achieve maximum interoperability performance.
312
+ *
313
+ * \param pCtx - Returned CUDA context
314
+ * \param Flags - Options for CUDA context creation
315
+ * \param device - Device on which to create the context
316
+ *
317
+ * \return
318
+ * ::CUDA_SUCCESS,
319
+ * ::CUDA_ERROR_DEINITIALIZED,
320
+ * ::CUDA_ERROR_NOT_INITIALIZED,
321
+ * ::CUDA_ERROR_INVALID_CONTEXT,
322
+ * ::CUDA_ERROR_INVALID_VALUE,
323
+ * ::CUDA_ERROR_OUT_OF_MEMORY
324
+ * \notefnerr
325
+ *
326
+ * \sa ::cuCtxCreate, ::cuGLInit, ::cuGLMapBufferObject,
327
+ * ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject,
328
+ * ::cuGLUnregisterBufferObject, ::cuGLMapBufferObjectAsync,
329
+ * ::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags,
330
+ * ::cuWGLGetDevice
331
+ */
332
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLCtxCreate(CUcontext *pCtx, unsigned int Flags, CUdevice device );
333
+
334
+ /**
335
+ * \brief Initializes OpenGL interoperability
336
+ *
337
+ * \deprecated This function is deprecated as of Cuda 3.0.
338
+ *
339
+ * Initializes OpenGL interoperability. This function is deprecated
340
+ * and calling it is no longer required. It may fail if the needed
341
+ * OpenGL driver facilities are not available.
342
+ *
343
+ * \return
344
+ * ::CUDA_SUCCESS,
345
+ * ::CUDA_ERROR_DEINITIALIZED,
346
+ * ::CUDA_ERROR_NOT_INITIALIZED,
347
+ * ::CUDA_ERROR_INVALID_CONTEXT,
348
+ * ::CUDA_ERROR_UNKNOWN
349
+ * \notefnerr
350
+ *
351
+ * \sa ::cuGLMapBufferObject,
352
+ * ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject,
353
+ * ::cuGLUnregisterBufferObject, ::cuGLMapBufferObjectAsync,
354
+ * ::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags,
355
+ * ::cuWGLGetDevice
356
+ */
357
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLInit(void);
358
+
359
+ /**
360
+ * \brief Registers an OpenGL buffer object
361
+ *
362
+ * \deprecated This function is deprecated as of Cuda 3.0.
363
+ *
364
+ * Registers the buffer object specified by \p buffer for access by
365
+ * CUDA. This function must be called before CUDA can map the buffer
366
+ * object. There must be a valid OpenGL context bound to the current
367
+ * thread when this function is called, and the buffer name is
368
+ * resolved by that context.
369
+ *
370
+ * \param buffer - The name of the buffer object to register.
371
+ *
372
+ * \return
373
+ * ::CUDA_SUCCESS,
374
+ * ::CUDA_ERROR_DEINITIALIZED,
375
+ * ::CUDA_ERROR_NOT_INITIALIZED,
376
+ * ::CUDA_ERROR_INVALID_CONTEXT,
377
+ * ::CUDA_ERROR_ALREADY_MAPPED
378
+ * \notefnerr
379
+ *
380
+ * \sa ::cuGraphicsGLRegisterBuffer
381
+ */
382
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLRegisterBufferObject(GLuint buffer);
383
+
384
+ /**
385
+ * \brief Maps an OpenGL buffer object
386
+ *
387
+ * \deprecated This function is deprecated as of Cuda 3.0.
388
+ *
389
+ * Maps the buffer object specified by \p buffer into the address space of the
390
+ * current CUDA context and returns in \p *dptr and \p *size the base pointer
391
+ * and size of the resulting mapping.
392
+ *
393
+ * There must be a valid OpenGL context bound to the current thread
394
+ * when this function is called. This must be the same context, or a
395
+ * member of the same shareGroup, as the context that was bound when
396
+ * the buffer was registered.
397
+ *
398
+ * All streams in the current CUDA context are synchronized with the
399
+ * current GL context.
400
+ *
401
+ * \param dptr - Returned mapped base pointer
402
+ * \param size - Returned size of mapping
403
+ * \param buffer - The name of the buffer object to map
404
+ *
405
+ * \return
406
+ * ::CUDA_SUCCESS,
407
+ * ::CUDA_ERROR_DEINITIALIZED,
408
+ * ::CUDA_ERROR_NOT_INITIALIZED,
409
+ * ::CUDA_ERROR_INVALID_CONTEXT,
410
+ * ::CUDA_ERROR_INVALID_VALUE,
411
+ * ::CUDA_ERROR_MAP_FAILED
412
+ * \notefnerr
413
+ *
414
+ * \sa ::cuGraphicsMapResources
415
+ */
416
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLMapBufferObject(CUdeviceptr *dptr, size_t *size, GLuint buffer);
417
+
418
+ /**
419
+ * \brief Unmaps an OpenGL buffer object
420
+ *
421
+ * \deprecated This function is deprecated as of Cuda 3.0.
422
+ *
423
+ * Unmaps the buffer object specified by \p buffer for access by CUDA.
424
+ *
425
+ * There must be a valid OpenGL context bound to the current thread
426
+ * when this function is called. This must be the same context, or a
427
+ * member of the same shareGroup, as the context that was bound when
428
+ * the buffer was registered.
429
+ *
430
+ * All streams in the current CUDA context are synchronized with the
431
+ * current GL context.
432
+ *
433
+ * \param buffer - Buffer object to unmap
434
+ *
435
+ * \return
436
+ * ::CUDA_SUCCESS,
437
+ * ::CUDA_ERROR_DEINITIALIZED,
438
+ * ::CUDA_ERROR_NOT_INITIALIZED,
439
+ * ::CUDA_ERROR_INVALID_CONTEXT,
440
+ * ::CUDA_ERROR_INVALID_VALUE
441
+ * \notefnerr
442
+ *
443
+ * \sa ::cuGraphicsUnmapResources
444
+ */
445
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLUnmapBufferObject(GLuint buffer);
446
+
447
+ /**
448
+ * \brief Unregister an OpenGL buffer object
449
+ *
450
+ * \deprecated This function is deprecated as of Cuda 3.0.
451
+ *
452
+ * Unregisters the buffer object specified by \p buffer. This
453
+ * releases any resources associated with the registered buffer.
454
+ * After this call, the buffer may no longer be mapped for access by
455
+ * CUDA.
456
+ *
457
+ * There must be a valid OpenGL context bound to the current thread
458
+ * when this function is called. This must be the same context, or a
459
+ * member of the same shareGroup, as the context that was bound when
460
+ * the buffer was registered.
461
+ *
462
+ * \param buffer - Name of the buffer object to unregister
463
+ *
464
+ * \return
465
+ * ::CUDA_SUCCESS,
466
+ * ::CUDA_ERROR_DEINITIALIZED,
467
+ * ::CUDA_ERROR_NOT_INITIALIZED,
468
+ * ::CUDA_ERROR_INVALID_CONTEXT,
469
+ * ::CUDA_ERROR_INVALID_VALUE
470
+ * \notefnerr
471
+ *
472
+ * \sa ::cuGraphicsUnregisterResource
473
+ */
474
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLUnregisterBufferObject(GLuint buffer);
475
+
476
+ /**
477
+ * \brief Set the map flags for an OpenGL buffer object
478
+ *
479
+ * \deprecated This function is deprecated as of Cuda 3.0.
480
+ *
481
+ * Sets the map flags for the buffer object specified by \p buffer.
482
+ *
483
+ * Changes to \p Flags will take effect the next time \p buffer is mapped.
484
+ * The \p Flags argument may be any of the following:
485
+ * - ::CU_GL_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
486
+ * resource will be used. It is therefore assumed that this resource will be
487
+ * read from and written to by CUDA kernels. This is the default value.
488
+ * - ::CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA kernels which
489
+ * access this resource will not write to this resource.
490
+ * - ::CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that CUDA kernels
491
+ * which access this resource will not read from this resource and will
492
+ * write over the entire contents of the resource, so none of the data
493
+ * previously stored in the resource will be preserved.
494
+ *
495
+ * If \p buffer has not been registered for use with CUDA, then
496
+ * ::CUDA_ERROR_INVALID_HANDLE is returned. If \p buffer is presently
497
+ * mapped for access by CUDA, then ::CUDA_ERROR_ALREADY_MAPPED is returned.
498
+ *
499
+ * There must be a valid OpenGL context bound to the current thread
500
+ * when this function is called. This must be the same context, or a
501
+ * member of the same shareGroup, as the context that was bound when
502
+ * the buffer was registered.
503
+ *
504
+ * \param buffer - Buffer object to unmap
505
+ * \param Flags - Map flags
506
+ *
507
+ * \return
508
+ * ::CUDA_SUCCESS,
509
+ * ::CUDA_ERROR_NOT_INITIALIZED,
510
+ * ::CUDA_ERROR_INVALID_HANDLE,
511
+ * ::CUDA_ERROR_ALREADY_MAPPED,
512
+ * ::CUDA_ERROR_INVALID_CONTEXT,
513
+ * \notefnerr
514
+ *
515
+ * \sa ::cuGraphicsResourceSetMapFlags
516
+ */
517
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLSetBufferObjectMapFlags(GLuint buffer, unsigned int Flags);
518
+
519
+ /**
520
+ * \brief Maps an OpenGL buffer object
521
+ *
522
+ * \deprecated This function is deprecated as of Cuda 3.0.
523
+ *
524
+ * Maps the buffer object specified by \p buffer into the address space of the
525
+ * current CUDA context and returns in \p *dptr and \p *size the base pointer
526
+ * and size of the resulting mapping.
527
+ *
528
+ * There must be a valid OpenGL context bound to the current thread
529
+ * when this function is called. This must be the same context, or a
530
+ * member of the same shareGroup, as the context that was bound when
531
+ * the buffer was registered.
532
+ *
533
+ * Stream \p hStream in the current CUDA context is synchronized with
534
+ * the current GL context.
535
+ *
536
+ * \param dptr - Returned mapped base pointer
537
+ * \param size - Returned size of mapping
538
+ * \param buffer - The name of the buffer object to map
539
+ * \param hStream - Stream to synchronize
540
+ *
541
+ * \return
542
+ * ::CUDA_SUCCESS,
543
+ * ::CUDA_ERROR_DEINITIALIZED,
544
+ * ::CUDA_ERROR_NOT_INITIALIZED,
545
+ * ::CUDA_ERROR_INVALID_CONTEXT,
546
+ * ::CUDA_ERROR_INVALID_VALUE,
547
+ * ::CUDA_ERROR_MAP_FAILED
548
+ * \notefnerr
549
+ *
550
+ * \sa ::cuGraphicsMapResources
551
+ */
552
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLMapBufferObjectAsync(CUdeviceptr *dptr, size_t *size, GLuint buffer, CUstream hStream);
553
+
554
+ /**
555
+ * \brief Unmaps an OpenGL buffer object
556
+ *
557
+ * \deprecated This function is deprecated as of Cuda 3.0.
558
+ *
559
+ * Unmaps the buffer object specified by \p buffer for access by CUDA.
560
+ *
561
+ * There must be a valid OpenGL context bound to the current thread
562
+ * when this function is called. This must be the same context, or a
563
+ * member of the same shareGroup, as the context that was bound when
564
+ * the buffer was registered.
565
+ *
566
+ * Stream \p hStream in the current CUDA context is synchronized with
567
+ * the current GL context.
568
+ *
569
+ * \param buffer - Name of the buffer object to unmap
570
+ * \param hStream - Stream to synchronize
571
+ *
572
+ * \return
573
+ * ::CUDA_SUCCESS,
574
+ * ::CUDA_ERROR_DEINITIALIZED,
575
+ * ::CUDA_ERROR_NOT_INITIALIZED,
576
+ * ::CUDA_ERROR_INVALID_CONTEXT,
577
+ * ::CUDA_ERROR_INVALID_VALUE
578
+ * \notefnerr
579
+ *
580
+ * \sa ::cuGraphicsUnmapResources
581
+ */
582
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLUnmapBufferObjectAsync(GLuint buffer, CUstream hStream);
583
+
584
+ /** @} */ /* END CUDA_GL_DEPRECATED */
585
+ /** @} */ /* END CUDA_GL */
586
+
587
+
588
+ #if defined(__CUDA_API_VERSION_INTERNAL)
589
+ #undef cuGLCtxCreate
590
+ #undef cuGLMapBufferObject
591
+ #undef cuGLMapBufferObjectAsync
592
+ #undef cuGLGetDevices
593
+
594
+ CUresult CUDAAPI cuGLGetDevices(unsigned int *pCudaDeviceCount, CUdevice *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList);
595
+ CUresult CUDAAPI cuGLMapBufferObject_v2(CUdeviceptr *dptr, size_t *size, GLuint buffer);
596
+ CUresult CUDAAPI cuGLMapBufferObjectAsync_v2(CUdeviceptr *dptr, size_t *size, GLuint buffer, CUstream hStream);
597
+ CUresult CUDAAPI cuGLCtxCreate(CUcontext *pCtx, unsigned int Flags, CUdevice device );
598
+ CUresult CUDAAPI cuGLMapBufferObject(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer);
599
+ CUresult CUDAAPI cuGLMapBufferObjectAsync(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer, CUstream hStream);
600
+ #endif /* __CUDA_API_VERSION_INTERNAL */
601
+
602
+ #ifdef __cplusplus
603
+ };
604
+ #endif
605
+
606
+ #undef __CUDA_DEPRECATED
607
+
608
+ #endif
evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.hpp ADDED
The diff for this file is too large to render. See raw diff
 
evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_PIPELINE_H_
51
+ # define _CUDA_PIPELINE_H_
52
+
53
+ # include "cuda_pipeline_primitives.h"
54
+
55
+ # if !defined(_CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER)
56
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
57
+ -std=c++11 compiler option.
58
+ # endif
59
+
60
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
61
+ # include "cuda_awbarrier.h"
62
+ # endif
63
+
64
+ // Integration with libcu++'s cuda::barrier<cuda::thread_scope_block>.
65
+
66
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
67
+ # if defined(_LIBCUDACXX_CUDA_ABI_VERSION)
68
+ # define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION _LIBCUDACXX_CUDA_ABI_VERSION
69
+ # else
70
+ # define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION 4
71
+ # endif
72
+
73
+ # define _LIBCUDACXX_PIPELINE_CONCAT(X, Y) X ## Y
74
+ # define _LIBCUDACXX_PIPELINE_CONCAT2(X, Y) _LIBCUDACXX_PIPELINE_CONCAT(X, Y)
75
+ # define _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE _LIBCUDACXX_PIPELINE_CONCAT2(__, _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION)
76
+
77
+ namespace cuda { inline namespace _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE {
78
+ struct __block_scope_barrier_base;
79
+ }}
80
+
81
+ # endif
82
+
83
+ _CUDA_PIPELINE_BEGIN_NAMESPACE
84
+
85
+ template<size_t N, typename T>
86
+ _CUDA_PIPELINE_QUALIFIER
87
+ auto segment(T* ptr) -> T(*)[N];
88
+
89
+ class pipeline {
90
+ public:
91
+ pipeline(const pipeline&) = delete;
92
+ pipeline(pipeline&&) = delete;
93
+ pipeline& operator=(const pipeline&) = delete;
94
+ pipeline& operator=(pipeline&&) = delete;
95
+
96
+ _CUDA_PIPELINE_QUALIFIER pipeline();
97
+ _CUDA_PIPELINE_QUALIFIER size_t commit();
98
+ _CUDA_PIPELINE_QUALIFIER void commit_and_wait();
99
+ _CUDA_PIPELINE_QUALIFIER void wait(size_t batch);
100
+ template<unsigned N>
101
+ _CUDA_PIPELINE_QUALIFIER void wait_prior();
102
+
103
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
104
+ _CUDA_PIPELINE_QUALIFIER void arrive_on(awbarrier& barrier);
105
+ _CUDA_PIPELINE_QUALIFIER void arrive_on(cuda::__block_scope_barrier_base& barrier);
106
+ # endif
107
+
108
+ private:
109
+ size_t current_batch;
110
+ };
111
+
112
+ template<class T>
113
+ _CUDA_PIPELINE_QUALIFIER
114
+ void memcpy_async(T& dst, const T& src, pipeline& pipe);
115
+
116
+ template<class T, size_t DstN, size_t SrcN>
117
+ _CUDA_PIPELINE_QUALIFIER
118
+ void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe);
119
+
120
+ template<size_t N, typename T>
121
+ _CUDA_PIPELINE_QUALIFIER
122
+ auto segment(T* ptr) -> T(*)[N]
123
+ {
124
+ return (T(*)[N])ptr;
125
+ }
126
+
127
+ _CUDA_PIPELINE_QUALIFIER
128
+ pipeline::pipeline()
129
+ : current_batch(0)
130
+ {
131
+ }
132
+
133
+ _CUDA_PIPELINE_QUALIFIER
134
+ size_t pipeline::commit()
135
+ {
136
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_commit();
137
+ return this->current_batch++;
138
+ }
139
+
140
+ _CUDA_PIPELINE_QUALIFIER
141
+ void pipeline::commit_and_wait()
142
+ {
143
+ (void)pipeline::commit();
144
+ pipeline::wait_prior<0>();
145
+ }
146
+
147
+ _CUDA_PIPELINE_QUALIFIER
148
+ void pipeline::wait(size_t batch)
149
+ {
150
+ const size_t prior = this->current_batch > batch ? this->current_batch - batch : 0;
151
+
152
+ switch (prior) {
153
+ case 0 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<0>(); break;
154
+ case 1 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<1>(); break;
155
+ case 2 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<2>(); break;
156
+ case 3 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<3>(); break;
157
+ case 4 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<4>(); break;
158
+ case 5 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<5>(); break;
159
+ case 6 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<6>(); break;
160
+ case 7 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<7>(); break;
161
+ default : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<8>(); break;
162
+ }
163
+ }
164
+
165
+ template<unsigned N>
166
+ _CUDA_PIPELINE_QUALIFIER
167
+ void pipeline::wait_prior()
168
+ {
169
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<N>();
170
+ }
171
+
172
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
173
+ _CUDA_PIPELINE_QUALIFIER
174
+ void pipeline::arrive_on(awbarrier& barrier)
175
+ {
176
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(&barrier.barrier);
177
+ }
178
+
179
+ _CUDA_PIPELINE_QUALIFIER
180
+ void pipeline::arrive_on(cuda::__block_scope_barrier_base & barrier)
181
+ {
182
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(reinterpret_cast<uint64_t *>(&barrier));
183
+ }
184
+ # endif
185
+
186
+ template<class T>
187
+ _CUDA_PIPELINE_QUALIFIER
188
+ void memcpy_async(T& dst, const T& src, pipeline& pipe)
189
+ {
190
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(&src) & (alignof(T) - 1)));
191
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(&dst) & (alignof(T) - 1)));
192
+
193
+ if (__is_trivially_copyable(T)) {
194
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_relaxed<sizeof(T), alignof(T)>(
195
+ reinterpret_cast<void*>(&dst), reinterpret_cast<const void*>(&src));
196
+ } else {
197
+ dst = src;
198
+ }
199
+ }
200
+
201
+ template<class T, size_t DstN, size_t SrcN>
202
+ _CUDA_PIPELINE_QUALIFIER
203
+ void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe)
204
+ {
205
+ constexpr size_t dst_size = sizeof(*dst);
206
+ constexpr size_t src_size = sizeof(*src);
207
+ static_assert(dst_size == 4 || dst_size == 8 || dst_size == 16, "Unsupported copy size.");
208
+ static_assert(src_size <= dst_size, "Source size must be less than or equal to destination size.");
209
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (dst_size - 1)));
210
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (dst_size - 1)));
211
+
212
+ if (__is_trivially_copyable(T)) {
213
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_strict<sizeof(*dst), sizeof(*src)>(
214
+ reinterpret_cast<void*>(*dst), reinterpret_cast<const void*>(*src));
215
+ } else {
216
+ for (size_t i = 0; i < DstN; ++i) {
217
+ (*dst)[i] = (i < SrcN) ? (*src)[i] : T();
218
+ }
219
+ }
220
+ }
221
+
222
+ _CUDA_PIPELINE_END_NAMESPACE
223
+
224
+ #endif /* !_CUDA_PIPELINE_H_ */
evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h ADDED
The diff for this file is too large to render. See raw diff
 
evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_functions.h ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__DRIVER_FUNCTIONS_H__)
51
+ #define __DRIVER_FUNCTIONS_H__
52
+
53
+ #include "builtin_types.h"
54
+ #include "crt/host_defines.h"
55
+ #include "driver_types.h"
56
+
57
+ /**
58
+ * \addtogroup CUDART_MEMORY
59
+ *
60
+ * @{
61
+ */
62
+
63
+ /**
64
+ * \brief Returns a cudaPitchedPtr based on input parameters
65
+ *
66
+ * Returns a ::cudaPitchedPtr based on the specified input parameters \p d,
67
+ * \p p, \p xsz, and \p ysz.
68
+ *
69
+ * \param d - Pointer to allocated memory
70
+ * \param p - Pitch of allocated memory in bytes
71
+ * \param xsz - Logical width of allocation in elements
72
+ * \param ysz - Logical height of allocation in elements
73
+ *
74
+ * \return
75
+ * ::cudaPitchedPtr specified by \p d, \p p, \p xsz, and \p ysz
76
+ *
77
+ * \sa make_cudaExtent, make_cudaPos
78
+ */
79
+ static __inline__ __host__ struct cudaPitchedPtr make_cudaPitchedPtr(void *d, size_t p, size_t xsz, size_t ysz)
80
+ {
81
+ struct cudaPitchedPtr s;
82
+
83
+ s.ptr = d;
84
+ s.pitch = p;
85
+ s.xsize = xsz;
86
+ s.ysize = ysz;
87
+
88
+ return s;
89
+ }
90
+
91
+ /**
92
+ * \brief Returns a cudaPos based on input parameters
93
+ *
94
+ * Returns a ::cudaPos based on the specified input parameters \p x,
95
+ * \p y, and \p z.
96
+ *
97
+ * \param x - X position
98
+ * \param y - Y position
99
+ * \param z - Z position
100
+ *
101
+ * \return
102
+ * ::cudaPos specified by \p x, \p y, and \p z
103
+ *
104
+ * \sa make_cudaExtent, make_cudaPitchedPtr
105
+ */
106
+ static __inline__ __host__ struct cudaPos make_cudaPos(size_t x, size_t y, size_t z)
107
+ {
108
+ struct cudaPos p;
109
+
110
+ p.x = x;
111
+ p.y = y;
112
+ p.z = z;
113
+
114
+ return p;
115
+ }
116
+
117
+ /**
118
+ * \brief Returns a cudaExtent based on input parameters
119
+ *
120
+ * Returns a ::cudaExtent based on the specified input parameters \p w,
121
+ * \p h, and \p d.
122
+ *
123
+ * \param w - Width in elements when referring to array memory, in bytes when referring to linear memory
124
+ * \param h - Height in elements
125
+ * \param d - Depth in elements
126
+ *
127
+ * \return
128
+ * ::cudaExtent specified by \p w, \p h, and \p d
129
+ *
130
+ * \sa make_cudaPitchedPtr, make_cudaPos
131
+ */
132
+ static __inline__ __host__ struct cudaExtent make_cudaExtent(size_t w, size_t h, size_t d)
133
+ {
134
+ struct cudaExtent e;
135
+
136
+ e.width = w;
137
+ e.height = h;
138
+ e.depth = d;
139
+
140
+ return e;
141
+ }
142
+
143
+ /** @} */ /* END CUDART_MEMORY */
144
+
145
+ #endif /* !__DRIVER_FUNCTIONS_H__ */
evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_config.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("host_config.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "host_config.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H_WRAPPER__
58
+ #endif
59
+
60
+ #include "crt/host_config.h"
61
+
62
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H_WRAPPER__)
63
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
64
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H_WRAPPER__
65
+ #endif
evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.h ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_32_INTRINSICS_H__)
51
+ #define __SM_32_INTRINSICS_H__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __SM_32_INTRINSICS_DECL__ __device__
55
+ #else /* !__CUDACC_RTC__ */
56
+ #define __SM_32_INTRINSICS_DECL__ static __device__ __inline__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ #if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #include "cuda_runtime_api.h"
70
+
71
+ #if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA)
72
+ #define __DEF_IF_HOST ;
73
+ #else /* defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA) */
74
+ #define __DEF_IF_HOST { }
75
+ #endif /* defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA) */
76
+
77
+
78
+ /*******************************************************************************
79
+ * *
80
+ * Below are declarations of SM-3.5 intrinsics which are included as *
81
+ * source (instead of being built in to the compiler) *
82
+ * *
83
+ *******************************************************************************/
84
+ /******************************************************************************
85
+ * __ldg *
86
+ ******************************************************************************/
87
+ __SM_32_INTRINSICS_DECL__ long __ldg(const long *ptr) __DEF_IF_HOST
88
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldg(const unsigned long *ptr) __DEF_IF_HOST
89
+
90
+ __SM_32_INTRINSICS_DECL__ char __ldg(const char *ptr) __DEF_IF_HOST
91
+ __SM_32_INTRINSICS_DECL__ signed char __ldg(const signed char *ptr) __DEF_IF_HOST
92
+ __SM_32_INTRINSICS_DECL__ short __ldg(const short *ptr) __DEF_IF_HOST
93
+ __SM_32_INTRINSICS_DECL__ int __ldg(const int *ptr) __DEF_IF_HOST
94
+ __SM_32_INTRINSICS_DECL__ long long __ldg(const long long *ptr) __DEF_IF_HOST
95
+ __SM_32_INTRINSICS_DECL__ char2 __ldg(const char2 *ptr) __DEF_IF_HOST
96
+ __SM_32_INTRINSICS_DECL__ char4 __ldg(const char4 *ptr) __DEF_IF_HOST
97
+ __SM_32_INTRINSICS_DECL__ short2 __ldg(const short2 *ptr) __DEF_IF_HOST
98
+ __SM_32_INTRINSICS_DECL__ short4 __ldg(const short4 *ptr) __DEF_IF_HOST
99
+ __SM_32_INTRINSICS_DECL__ int2 __ldg(const int2 *ptr) __DEF_IF_HOST
100
+ __SM_32_INTRINSICS_DECL__ int4 __ldg(const int4 *ptr) __DEF_IF_HOST
101
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldg(const longlong2 *ptr) __DEF_IF_HOST
102
+
103
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldg(const unsigned char *ptr) __DEF_IF_HOST
104
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldg(const unsigned short *ptr) __DEF_IF_HOST
105
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldg(const unsigned int *ptr) __DEF_IF_HOST
106
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldg(const unsigned long long *ptr) __DEF_IF_HOST
107
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldg(const uchar2 *ptr) __DEF_IF_HOST
108
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldg(const uchar4 *ptr) __DEF_IF_HOST
109
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldg(const ushort2 *ptr) __DEF_IF_HOST
110
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldg(const ushort4 *ptr) __DEF_IF_HOST
111
+ __SM_32_INTRINSICS_DECL__ uint2 __ldg(const uint2 *ptr) __DEF_IF_HOST
112
+ __SM_32_INTRINSICS_DECL__ uint4 __ldg(const uint4 *ptr) __DEF_IF_HOST
113
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldg(const ulonglong2 *ptr) __DEF_IF_HOST
114
+
115
+ __SM_32_INTRINSICS_DECL__ float __ldg(const float *ptr) __DEF_IF_HOST
116
+ __SM_32_INTRINSICS_DECL__ double __ldg(const double *ptr) __DEF_IF_HOST
117
+ __SM_32_INTRINSICS_DECL__ float2 __ldg(const float2 *ptr) __DEF_IF_HOST
118
+ __SM_32_INTRINSICS_DECL__ float4 __ldg(const float4 *ptr) __DEF_IF_HOST
119
+ __SM_32_INTRINSICS_DECL__ double2 __ldg(const double2 *ptr) __DEF_IF_HOST
120
+ /******************************************************************************
121
+ * __ldcg *
122
+ ******************************************************************************/
123
+ __SM_32_INTRINSICS_DECL__ long __ldcg(const long *ptr) __DEF_IF_HOST
124
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcg(const unsigned long *ptr) __DEF_IF_HOST
125
+
126
+ __SM_32_INTRINSICS_DECL__ char __ldcg(const char *ptr) __DEF_IF_HOST
127
+ __SM_32_INTRINSICS_DECL__ signed char __ldcg(const signed char *ptr) __DEF_IF_HOST
128
+ __SM_32_INTRINSICS_DECL__ short __ldcg(const short *ptr) __DEF_IF_HOST
129
+ __SM_32_INTRINSICS_DECL__ int __ldcg(const int *ptr) __DEF_IF_HOST
130
+ __SM_32_INTRINSICS_DECL__ long long __ldcg(const long long *ptr) __DEF_IF_HOST
131
+ __SM_32_INTRINSICS_DECL__ char2 __ldcg(const char2 *ptr) __DEF_IF_HOST
132
+ __SM_32_INTRINSICS_DECL__ char4 __ldcg(const char4 *ptr) __DEF_IF_HOST
133
+ __SM_32_INTRINSICS_DECL__ short2 __ldcg(const short2 *ptr) __DEF_IF_HOST
134
+ __SM_32_INTRINSICS_DECL__ short4 __ldcg(const short4 *ptr) __DEF_IF_HOST
135
+ __SM_32_INTRINSICS_DECL__ int2 __ldcg(const int2 *ptr) __DEF_IF_HOST
136
+ __SM_32_INTRINSICS_DECL__ int4 __ldcg(const int4 *ptr) __DEF_IF_HOST
137
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldcg(const longlong2 *ptr) __DEF_IF_HOST
138
+
139
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldcg(const unsigned char *ptr) __DEF_IF_HOST
140
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldcg(const unsigned short *ptr) __DEF_IF_HOST
141
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldcg(const unsigned int *ptr) __DEF_IF_HOST
142
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldcg(const unsigned long long *ptr) __DEF_IF_HOST
143
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldcg(const uchar2 *ptr) __DEF_IF_HOST
144
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldcg(const uchar4 *ptr) __DEF_IF_HOST
145
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldcg(const ushort2 *ptr) __DEF_IF_HOST
146
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldcg(const ushort4 *ptr) __DEF_IF_HOST
147
+ __SM_32_INTRINSICS_DECL__ uint2 __ldcg(const uint2 *ptr) __DEF_IF_HOST
148
+ __SM_32_INTRINSICS_DECL__ uint4 __ldcg(const uint4 *ptr) __DEF_IF_HOST
149
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldcg(const ulonglong2 *ptr) __DEF_IF_HOST
150
+
151
+ __SM_32_INTRINSICS_DECL__ float __ldcg(const float *ptr) __DEF_IF_HOST
152
+ __SM_32_INTRINSICS_DECL__ double __ldcg(const double *ptr) __DEF_IF_HOST
153
+ __SM_32_INTRINSICS_DECL__ float2 __ldcg(const float2 *ptr) __DEF_IF_HOST
154
+ __SM_32_INTRINSICS_DECL__ float4 __ldcg(const float4 *ptr) __DEF_IF_HOST
155
+ __SM_32_INTRINSICS_DECL__ double2 __ldcg(const double2 *ptr) __DEF_IF_HOST
156
+ /******************************************************************************
157
+ * __ldca *
158
+ ******************************************************************************/
159
+ __SM_32_INTRINSICS_DECL__ long __ldca(const long *ptr) __DEF_IF_HOST
160
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldca(const unsigned long *ptr) __DEF_IF_HOST
161
+
162
+ __SM_32_INTRINSICS_DECL__ char __ldca(const char *ptr) __DEF_IF_HOST
163
+ __SM_32_INTRINSICS_DECL__ signed char __ldca(const signed char *ptr) __DEF_IF_HOST
164
+ __SM_32_INTRINSICS_DECL__ short __ldca(const short *ptr) __DEF_IF_HOST
165
+ __SM_32_INTRINSICS_DECL__ int __ldca(const int *ptr) __DEF_IF_HOST
166
+ __SM_32_INTRINSICS_DECL__ long long __ldca(const long long *ptr) __DEF_IF_HOST
167
+ __SM_32_INTRINSICS_DECL__ char2 __ldca(const char2 *ptr) __DEF_IF_HOST
168
+ __SM_32_INTRINSICS_DECL__ char4 __ldca(const char4 *ptr) __DEF_IF_HOST
169
+ __SM_32_INTRINSICS_DECL__ short2 __ldca(const short2 *ptr) __DEF_IF_HOST
170
+ __SM_32_INTRINSICS_DECL__ short4 __ldca(const short4 *ptr) __DEF_IF_HOST
171
+ __SM_32_INTRINSICS_DECL__ int2 __ldca(const int2 *ptr) __DEF_IF_HOST
172
+ __SM_32_INTRINSICS_DECL__ int4 __ldca(const int4 *ptr) __DEF_IF_HOST
173
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldca(const longlong2 *ptr) __DEF_IF_HOST
174
+
175
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldca(const unsigned char *ptr) __DEF_IF_HOST
176
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldca(const unsigned short *ptr) __DEF_IF_HOST
177
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldca(const unsigned int *ptr) __DEF_IF_HOST
178
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldca(const unsigned long long *ptr) __DEF_IF_HOST
179
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldca(const uchar2 *ptr) __DEF_IF_HOST
180
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldca(const uchar4 *ptr) __DEF_IF_HOST
181
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldca(const ushort2 *ptr) __DEF_IF_HOST
182
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldca(const ushort4 *ptr) __DEF_IF_HOST
183
+ __SM_32_INTRINSICS_DECL__ uint2 __ldca(const uint2 *ptr) __DEF_IF_HOST
184
+ __SM_32_INTRINSICS_DECL__ uint4 __ldca(const uint4 *ptr) __DEF_IF_HOST
185
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldca(const ulonglong2 *ptr) __DEF_IF_HOST
186
+
187
+ __SM_32_INTRINSICS_DECL__ float __ldca(const float *ptr) __DEF_IF_HOST
188
+ __SM_32_INTRINSICS_DECL__ double __ldca(const double *ptr) __DEF_IF_HOST
189
+ __SM_32_INTRINSICS_DECL__ float2 __ldca(const float2 *ptr) __DEF_IF_HOST
190
+ __SM_32_INTRINSICS_DECL__ float4 __ldca(const float4 *ptr) __DEF_IF_HOST
191
+ __SM_32_INTRINSICS_DECL__ double2 __ldca(const double2 *ptr) __DEF_IF_HOST
192
+ /******************************************************************************
193
+ * __ldcs *
194
+ ******************************************************************************/
195
+ __SM_32_INTRINSICS_DECL__ long __ldcs(const long *ptr) __DEF_IF_HOST
196
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcs(const unsigned long *ptr) __DEF_IF_HOST
197
+
198
+ __SM_32_INTRINSICS_DECL__ char __ldcs(const char *ptr) __DEF_IF_HOST
199
+ __SM_32_INTRINSICS_DECL__ signed char __ldcs(const signed char *ptr) __DEF_IF_HOST
200
+ __SM_32_INTRINSICS_DECL__ short __ldcs(const short *ptr) __DEF_IF_HOST
201
+ __SM_32_INTRINSICS_DECL__ int __ldcs(const int *ptr) __DEF_IF_HOST
202
+ __SM_32_INTRINSICS_DECL__ long long __ldcs(const long long *ptr) __DEF_IF_HOST
203
+ __SM_32_INTRINSICS_DECL__ char2 __ldcs(const char2 *ptr) __DEF_IF_HOST
204
+ __SM_32_INTRINSICS_DECL__ char4 __ldcs(const char4 *ptr) __DEF_IF_HOST
205
+ __SM_32_INTRINSICS_DECL__ short2 __ldcs(const short2 *ptr) __DEF_IF_HOST
206
+ __SM_32_INTRINSICS_DECL__ short4 __ldcs(const short4 *ptr) __DEF_IF_HOST
207
+ __SM_32_INTRINSICS_DECL__ int2 __ldcs(const int2 *ptr) __DEF_IF_HOST
208
+ __SM_32_INTRINSICS_DECL__ int4 __ldcs(const int4 *ptr) __DEF_IF_HOST
209
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldcs(const longlong2 *ptr) __DEF_IF_HOST
210
+
211
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldcs(const unsigned char *ptr) __DEF_IF_HOST
212
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldcs(const unsigned short *ptr) __DEF_IF_HOST
213
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldcs(const unsigned int *ptr) __DEF_IF_HOST
214
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldcs(const unsigned long long *ptr) __DEF_IF_HOST
215
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldcs(const uchar2 *ptr) __DEF_IF_HOST
216
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldcs(const uchar4 *ptr) __DEF_IF_HOST
217
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldcs(const ushort2 *ptr) __DEF_IF_HOST
218
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldcs(const ushort4 *ptr) __DEF_IF_HOST
219
+ __SM_32_INTRINSICS_DECL__ uint2 __ldcs(const uint2 *ptr) __DEF_IF_HOST
220
+ __SM_32_INTRINSICS_DECL__ uint4 __ldcs(const uint4 *ptr) __DEF_IF_HOST
221
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldcs(const ulonglong2 *ptr) __DEF_IF_HOST
222
+
223
+ __SM_32_INTRINSICS_DECL__ float __ldcs(const float *ptr) __DEF_IF_HOST
224
+ __SM_32_INTRINSICS_DECL__ double __ldcs(const double *ptr) __DEF_IF_HOST
225
+ __SM_32_INTRINSICS_DECL__ float2 __ldcs(const float2 *ptr) __DEF_IF_HOST
226
+ __SM_32_INTRINSICS_DECL__ float4 __ldcs(const float4 *ptr) __DEF_IF_HOST
227
+ __SM_32_INTRINSICS_DECL__ double2 __ldcs(const double2 *ptr) __DEF_IF_HOST
228
+ /******************************************************************************
229
+ * __ldlu *
230
+ ******************************************************************************/
231
+ __SM_32_INTRINSICS_DECL__ long __ldlu(const long *ptr) __DEF_IF_HOST
232
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldlu(const unsigned long *ptr) __DEF_IF_HOST
233
+
234
+ __SM_32_INTRINSICS_DECL__ char __ldlu(const char *ptr) __DEF_IF_HOST
235
+ __SM_32_INTRINSICS_DECL__ signed char __ldlu(const signed char *ptr) __DEF_IF_HOST
236
+ __SM_32_INTRINSICS_DECL__ short __ldlu(const short *ptr) __DEF_IF_HOST
237
+ __SM_32_INTRINSICS_DECL__ int __ldlu(const int *ptr) __DEF_IF_HOST
238
+ __SM_32_INTRINSICS_DECL__ long long __ldlu(const long long *ptr) __DEF_IF_HOST
239
+ __SM_32_INTRINSICS_DECL__ char2 __ldlu(const char2 *ptr) __DEF_IF_HOST
240
+ __SM_32_INTRINSICS_DECL__ char4 __ldlu(const char4 *ptr) __DEF_IF_HOST
241
+ __SM_32_INTRINSICS_DECL__ short2 __ldlu(const short2 *ptr) __DEF_IF_HOST
242
+ __SM_32_INTRINSICS_DECL__ short4 __ldlu(const short4 *ptr) __DEF_IF_HOST
243
+ __SM_32_INTRINSICS_DECL__ int2 __ldlu(const int2 *ptr) __DEF_IF_HOST
244
+ __SM_32_INTRINSICS_DECL__ int4 __ldlu(const int4 *ptr) __DEF_IF_HOST
245
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldlu(const longlong2 *ptr) __DEF_IF_HOST
246
+
247
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldlu(const unsigned char *ptr) __DEF_IF_HOST
248
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldlu(const unsigned short *ptr) __DEF_IF_HOST
249
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldlu(const unsigned int *ptr) __DEF_IF_HOST
250
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldlu(const unsigned long long *ptr) __DEF_IF_HOST
251
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldlu(const uchar2 *ptr) __DEF_IF_HOST
252
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldlu(const uchar4 *ptr) __DEF_IF_HOST
253
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldlu(const ushort2 *ptr) __DEF_IF_HOST
254
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldlu(const ushort4 *ptr) __DEF_IF_HOST
255
+ __SM_32_INTRINSICS_DECL__ uint2 __ldlu(const uint2 *ptr) __DEF_IF_HOST
256
+ __SM_32_INTRINSICS_DECL__ uint4 __ldlu(const uint4 *ptr) __DEF_IF_HOST
257
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldlu(const ulonglong2 *ptr) __DEF_IF_HOST
258
+
259
+ __SM_32_INTRINSICS_DECL__ float __ldlu(const float *ptr) __DEF_IF_HOST
260
+ __SM_32_INTRINSICS_DECL__ double __ldlu(const double *ptr) __DEF_IF_HOST
261
+ __SM_32_INTRINSICS_DECL__ float2 __ldlu(const float2 *ptr) __DEF_IF_HOST
262
+ __SM_32_INTRINSICS_DECL__ float4 __ldlu(const float4 *ptr) __DEF_IF_HOST
263
+ __SM_32_INTRINSICS_DECL__ double2 __ldlu(const double2 *ptr) __DEF_IF_HOST
264
+ /******************************************************************************
265
+ * __ldcv *
266
+ ******************************************************************************/
267
+ __SM_32_INTRINSICS_DECL__ long __ldcv(const long *ptr) __DEF_IF_HOST
268
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcv(const unsigned long *ptr) __DEF_IF_HOST
269
+
270
+ __SM_32_INTRINSICS_DECL__ char __ldcv(const char *ptr) __DEF_IF_HOST
271
+ __SM_32_INTRINSICS_DECL__ signed char __ldcv(const signed char *ptr) __DEF_IF_HOST
272
+ __SM_32_INTRINSICS_DECL__ short __ldcv(const short *ptr) __DEF_IF_HOST
273
+ __SM_32_INTRINSICS_DECL__ int __ldcv(const int *ptr) __DEF_IF_HOST
274
+ __SM_32_INTRINSICS_DECL__ long long __ldcv(const long long *ptr) __DEF_IF_HOST
275
+ __SM_32_INTRINSICS_DECL__ char2 __ldcv(const char2 *ptr) __DEF_IF_HOST
276
+ __SM_32_INTRINSICS_DECL__ char4 __ldcv(const char4 *ptr) __DEF_IF_HOST
277
+ __SM_32_INTRINSICS_DECL__ short2 __ldcv(const short2 *ptr) __DEF_IF_HOST
278
+ __SM_32_INTRINSICS_DECL__ short4 __ldcv(const short4 *ptr) __DEF_IF_HOST
279
+ __SM_32_INTRINSICS_DECL__ int2 __ldcv(const int2 *ptr) __DEF_IF_HOST
280
+ __SM_32_INTRINSICS_DECL__ int4 __ldcv(const int4 *ptr) __DEF_IF_HOST
281
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldcv(const longlong2 *ptr) __DEF_IF_HOST
282
+
283
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldcv(const unsigned char *ptr) __DEF_IF_HOST
284
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldcv(const unsigned short *ptr) __DEF_IF_HOST
285
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldcv(const unsigned int *ptr) __DEF_IF_HOST
286
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldcv(const unsigned long long *ptr) __DEF_IF_HOST
287
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldcv(const uchar2 *ptr) __DEF_IF_HOST
288
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldcv(const uchar4 *ptr) __DEF_IF_HOST
289
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldcv(const ushort2 *ptr) __DEF_IF_HOST
290
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldcv(const ushort4 *ptr) __DEF_IF_HOST
291
+ __SM_32_INTRINSICS_DECL__ uint2 __ldcv(const uint2 *ptr) __DEF_IF_HOST
292
+ __SM_32_INTRINSICS_DECL__ uint4 __ldcv(const uint4 *ptr) __DEF_IF_HOST
293
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldcv(const ulonglong2 *ptr) __DEF_IF_HOST
294
+
295
+ __SM_32_INTRINSICS_DECL__ float __ldcv(const float *ptr) __DEF_IF_HOST
296
+ __SM_32_INTRINSICS_DECL__ double __ldcv(const double *ptr) __DEF_IF_HOST
297
+ __SM_32_INTRINSICS_DECL__ float2 __ldcv(const float2 *ptr) __DEF_IF_HOST
298
+ __SM_32_INTRINSICS_DECL__ float4 __ldcv(const float4 *ptr) __DEF_IF_HOST
299
+ __SM_32_INTRINSICS_DECL__ double2 __ldcv(const double2 *ptr) __DEF_IF_HOST
300
+ /******************************************************************************
301
+ * __stwb *
302
+ ******************************************************************************/
303
+ __SM_32_INTRINSICS_DECL__ void __stwb(long *ptr, long value) __DEF_IF_HOST
304
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned long *ptr, unsigned long value) __DEF_IF_HOST
305
+
306
+ __SM_32_INTRINSICS_DECL__ void __stwb(char *ptr, char value) __DEF_IF_HOST
307
+ __SM_32_INTRINSICS_DECL__ void __stwb(signed char *ptr, signed char value) __DEF_IF_HOST
308
+ __SM_32_INTRINSICS_DECL__ void __stwb(short *ptr, short value) __DEF_IF_HOST
309
+ __SM_32_INTRINSICS_DECL__ void __stwb(int *ptr, int value) __DEF_IF_HOST
310
+ __SM_32_INTRINSICS_DECL__ void __stwb(long long *ptr, long long value) __DEF_IF_HOST
311
+ __SM_32_INTRINSICS_DECL__ void __stwb(char2 *ptr, char2 value) __DEF_IF_HOST
312
+ __SM_32_INTRINSICS_DECL__ void __stwb(char4 *ptr, char4 value) __DEF_IF_HOST
313
+ __SM_32_INTRINSICS_DECL__ void __stwb(short2 *ptr, short2 value) __DEF_IF_HOST
314
+ __SM_32_INTRINSICS_DECL__ void __stwb(short4 *ptr, short4 value) __DEF_IF_HOST
315
+ __SM_32_INTRINSICS_DECL__ void __stwb(int2 *ptr, int2 value) __DEF_IF_HOST
316
+ __SM_32_INTRINSICS_DECL__ void __stwb(int4 *ptr, int4 value) __DEF_IF_HOST
317
+ __SM_32_INTRINSICS_DECL__ void __stwb(longlong2 *ptr, longlong2 value) __DEF_IF_HOST
318
+
319
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned char *ptr, unsigned char value) __DEF_IF_HOST
320
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned short *ptr, unsigned short value) __DEF_IF_HOST
321
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned int *ptr, unsigned int value) __DEF_IF_HOST
322
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned long long *ptr, unsigned long long value) __DEF_IF_HOST
323
+ __SM_32_INTRINSICS_DECL__ void __stwb(uchar2 *ptr, uchar2 value) __DEF_IF_HOST
324
+ __SM_32_INTRINSICS_DECL__ void __stwb(uchar4 *ptr, uchar4 value) __DEF_IF_HOST
325
+ __SM_32_INTRINSICS_DECL__ void __stwb(ushort2 *ptr, ushort2 value) __DEF_IF_HOST
326
+ __SM_32_INTRINSICS_DECL__ void __stwb(ushort4 *ptr, ushort4 value) __DEF_IF_HOST
327
+ __SM_32_INTRINSICS_DECL__ void __stwb(uint2 *ptr, uint2 value) __DEF_IF_HOST
328
+ __SM_32_INTRINSICS_DECL__ void __stwb(uint4 *ptr, uint4 value) __DEF_IF_HOST
329
+ __SM_32_INTRINSICS_DECL__ void __stwb(ulonglong2 *ptr, ulonglong2 value) __DEF_IF_HOST
330
+
331
+ __SM_32_INTRINSICS_DECL__ void __stwb(float *ptr, float value) __DEF_IF_HOST
332
+ __SM_32_INTRINSICS_DECL__ void __stwb(double *ptr, double value) __DEF_IF_HOST
333
+ __SM_32_INTRINSICS_DECL__ void __stwb(float2 *ptr, float2 value) __DEF_IF_HOST
334
+ __SM_32_INTRINSICS_DECL__ void __stwb(float4 *ptr, float4 value) __DEF_IF_HOST
335
+ __SM_32_INTRINSICS_DECL__ void __stwb(double2 *ptr, double2 value) __DEF_IF_HOST
336
+ /******************************************************************************
337
+ * __stcg *
338
+ ******************************************************************************/
339
+ __SM_32_INTRINSICS_DECL__ void __stcg(long *ptr, long value) __DEF_IF_HOST
340
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned long *ptr, unsigned long value) __DEF_IF_HOST
341
+
342
+ __SM_32_INTRINSICS_DECL__ void __stcg(char *ptr, char value) __DEF_IF_HOST
343
+ __SM_32_INTRINSICS_DECL__ void __stcg(signed char *ptr, signed char value) __DEF_IF_HOST
344
+ __SM_32_INTRINSICS_DECL__ void __stcg(short *ptr, short value) __DEF_IF_HOST
345
+ __SM_32_INTRINSICS_DECL__ void __stcg(int *ptr, int value) __DEF_IF_HOST
346
+ __SM_32_INTRINSICS_DECL__ void __stcg(long long *ptr, long long value) __DEF_IF_HOST
347
+ __SM_32_INTRINSICS_DECL__ void __stcg(char2 *ptr, char2 value) __DEF_IF_HOST
348
+ __SM_32_INTRINSICS_DECL__ void __stcg(char4 *ptr, char4 value) __DEF_IF_HOST
349
+ __SM_32_INTRINSICS_DECL__ void __stcg(short2 *ptr, short2 value) __DEF_IF_HOST
350
+ __SM_32_INTRINSICS_DECL__ void __stcg(short4 *ptr, short4 value) __DEF_IF_HOST
351
+ __SM_32_INTRINSICS_DECL__ void __stcg(int2 *ptr, int2 value) __DEF_IF_HOST
352
+ __SM_32_INTRINSICS_DECL__ void __stcg(int4 *ptr, int4 value) __DEF_IF_HOST
353
+ __SM_32_INTRINSICS_DECL__ void __stcg(longlong2 *ptr, longlong2 value) __DEF_IF_HOST
354
+
355
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned char *ptr, unsigned char value) __DEF_IF_HOST
356
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned short *ptr, unsigned short value) __DEF_IF_HOST
357
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned int *ptr, unsigned int value) __DEF_IF_HOST
358
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned long long *ptr, unsigned long long value) __DEF_IF_HOST
359
+ __SM_32_INTRINSICS_DECL__ void __stcg(uchar2 *ptr, uchar2 value) __DEF_IF_HOST
360
+ __SM_32_INTRINSICS_DECL__ void __stcg(uchar4 *ptr, uchar4 value) __DEF_IF_HOST
361
+ __SM_32_INTRINSICS_DECL__ void __stcg(ushort2 *ptr, ushort2 value) __DEF_IF_HOST
362
+ __SM_32_INTRINSICS_DECL__ void __stcg(ushort4 *ptr, ushort4 value) __DEF_IF_HOST
363
+ __SM_32_INTRINSICS_DECL__ void __stcg(uint2 *ptr, uint2 value) __DEF_IF_HOST
364
+ __SM_32_INTRINSICS_DECL__ void __stcg(uint4 *ptr, uint4 value) __DEF_IF_HOST
365
+ __SM_32_INTRINSICS_DECL__ void __stcg(ulonglong2 *ptr, ulonglong2 value) __DEF_IF_HOST
366
+
367
+ __SM_32_INTRINSICS_DECL__ void __stcg(float *ptr, float value) __DEF_IF_HOST
368
+ __SM_32_INTRINSICS_DECL__ void __stcg(double *ptr, double value) __DEF_IF_HOST
369
+ __SM_32_INTRINSICS_DECL__ void __stcg(float2 *ptr, float2 value) __DEF_IF_HOST
370
+ __SM_32_INTRINSICS_DECL__ void __stcg(float4 *ptr, float4 value) __DEF_IF_HOST
371
+ __SM_32_INTRINSICS_DECL__ void __stcg(double2 *ptr, double2 value) __DEF_IF_HOST
372
+ /******************************************************************************
373
+ * __stcs *
374
+ ******************************************************************************/
375
+ __SM_32_INTRINSICS_DECL__ void __stcs(long *ptr, long value) __DEF_IF_HOST
376
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned long *ptr, unsigned long value) __DEF_IF_HOST
377
+
378
+ __SM_32_INTRINSICS_DECL__ void __stcs(char *ptr, char value) __DEF_IF_HOST
379
+ __SM_32_INTRINSICS_DECL__ void __stcs(signed char *ptr, signed char value) __DEF_IF_HOST
380
+ __SM_32_INTRINSICS_DECL__ void __stcs(short *ptr, short value) __DEF_IF_HOST
381
+ __SM_32_INTRINSICS_DECL__ void __stcs(int *ptr, int value) __DEF_IF_HOST
382
+ __SM_32_INTRINSICS_DECL__ void __stcs(long long *ptr, long long value) __DEF_IF_HOST
383
+ __SM_32_INTRINSICS_DECL__ void __stcs(char2 *ptr, char2 value) __DEF_IF_HOST
384
+ __SM_32_INTRINSICS_DECL__ void __stcs(char4 *ptr, char4 value) __DEF_IF_HOST
385
+ __SM_32_INTRINSICS_DECL__ void __stcs(short2 *ptr, short2 value) __DEF_IF_HOST
386
+ __SM_32_INTRINSICS_DECL__ void __stcs(short4 *ptr, short4 value) __DEF_IF_HOST
387
+ __SM_32_INTRINSICS_DECL__ void __stcs(int2 *ptr, int2 value) __DEF_IF_HOST
388
+ __SM_32_INTRINSICS_DECL__ void __stcs(int4 *ptr, int4 value) __DEF_IF_HOST
389
+ __SM_32_INTRINSICS_DECL__ void __stcs(longlong2 *ptr, longlong2 value) __DEF_IF_HOST
390
+
391
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned char *ptr, unsigned char value) __DEF_IF_HOST
392
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned short *ptr, unsigned short value) __DEF_IF_HOST
393
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned int *ptr, unsigned int value) __DEF_IF_HOST
394
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned long long *ptr, unsigned long long value) __DEF_IF_HOST
395
+ __SM_32_INTRINSICS_DECL__ void __stcs(uchar2 *ptr, uchar2 value) __DEF_IF_HOST
396
+ __SM_32_INTRINSICS_DECL__ void __stcs(uchar4 *ptr, uchar4 value) __DEF_IF_HOST
397
+ __SM_32_INTRINSICS_DECL__ void __stcs(ushort2 *ptr, ushort2 value) __DEF_IF_HOST
398
+ __SM_32_INTRINSICS_DECL__ void __stcs(ushort4 *ptr, ushort4 value) __DEF_IF_HOST
399
+ __SM_32_INTRINSICS_DECL__ void __stcs(uint2 *ptr, uint2 value) __DEF_IF_HOST
400
+ __SM_32_INTRINSICS_DECL__ void __stcs(uint4 *ptr, uint4 value) __DEF_IF_HOST
401
+ __SM_32_INTRINSICS_DECL__ void __stcs(ulonglong2 *ptr, ulonglong2 value) __DEF_IF_HOST
402
+
403
+ __SM_32_INTRINSICS_DECL__ void __stcs(float *ptr, float value) __DEF_IF_HOST
404
+ __SM_32_INTRINSICS_DECL__ void __stcs(double *ptr, double value) __DEF_IF_HOST
405
+ __SM_32_INTRINSICS_DECL__ void __stcs(float2 *ptr, float2 value) __DEF_IF_HOST
406
+ __SM_32_INTRINSICS_DECL__ void __stcs(float4 *ptr, float4 value) __DEF_IF_HOST
407
+ __SM_32_INTRINSICS_DECL__ void __stcs(double2 *ptr, double2 value) __DEF_IF_HOST
408
+ /******************************************************************************
409
+ * __stwt *
410
+ ******************************************************************************/
411
+ __SM_32_INTRINSICS_DECL__ void __stwt(long *ptr, long value) __DEF_IF_HOST
412
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned long *ptr, unsigned long value) __DEF_IF_HOST
413
+
414
+ __SM_32_INTRINSICS_DECL__ void __stwt(char *ptr, char value) __DEF_IF_HOST
415
+ __SM_32_INTRINSICS_DECL__ void __stwt(signed char *ptr, signed char value) __DEF_IF_HOST
416
+ __SM_32_INTRINSICS_DECL__ void __stwt(short *ptr, short value) __DEF_IF_HOST
417
+ __SM_32_INTRINSICS_DECL__ void __stwt(int *ptr, int value) __DEF_IF_HOST
418
+ __SM_32_INTRINSICS_DECL__ void __stwt(long long *ptr, long long value) __DEF_IF_HOST
419
+ __SM_32_INTRINSICS_DECL__ void __stwt(char2 *ptr, char2 value) __DEF_IF_HOST
420
+ __SM_32_INTRINSICS_DECL__ void __stwt(char4 *ptr, char4 value) __DEF_IF_HOST
421
+ __SM_32_INTRINSICS_DECL__ void __stwt(short2 *ptr, short2 value) __DEF_IF_HOST
422
+ __SM_32_INTRINSICS_DECL__ void __stwt(short4 *ptr, short4 value) __DEF_IF_HOST
423
+ __SM_32_INTRINSICS_DECL__ void __stwt(int2 *ptr, int2 value) __DEF_IF_HOST
424
+ __SM_32_INTRINSICS_DECL__ void __stwt(int4 *ptr, int4 value) __DEF_IF_HOST
425
+ __SM_32_INTRINSICS_DECL__ void __stwt(longlong2 *ptr, longlong2 value) __DEF_IF_HOST
426
+
427
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned char *ptr, unsigned char value) __DEF_IF_HOST
428
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned short *ptr, unsigned short value) __DEF_IF_HOST
429
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned int *ptr, unsigned int value) __DEF_IF_HOST
430
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned long long *ptr, unsigned long long value) __DEF_IF_HOST
431
+ __SM_32_INTRINSICS_DECL__ void __stwt(uchar2 *ptr, uchar2 value) __DEF_IF_HOST
432
+ __SM_32_INTRINSICS_DECL__ void __stwt(uchar4 *ptr, uchar4 value) __DEF_IF_HOST
433
+ __SM_32_INTRINSICS_DECL__ void __stwt(ushort2 *ptr, ushort2 value) __DEF_IF_HOST
434
+ __SM_32_INTRINSICS_DECL__ void __stwt(ushort4 *ptr, ushort4 value) __DEF_IF_HOST
435
+ __SM_32_INTRINSICS_DECL__ void __stwt(uint2 *ptr, uint2 value) __DEF_IF_HOST
436
+ __SM_32_INTRINSICS_DECL__ void __stwt(uint4 *ptr, uint4 value) __DEF_IF_HOST
437
+ __SM_32_INTRINSICS_DECL__ void __stwt(ulonglong2 *ptr, ulonglong2 value) __DEF_IF_HOST
438
+
439
+ __SM_32_INTRINSICS_DECL__ void __stwt(float *ptr, float value) __DEF_IF_HOST
440
+ __SM_32_INTRINSICS_DECL__ void __stwt(double *ptr, double value) __DEF_IF_HOST
441
+ __SM_32_INTRINSICS_DECL__ void __stwt(float2 *ptr, float2 value) __DEF_IF_HOST
442
+ __SM_32_INTRINSICS_DECL__ void __stwt(float4 *ptr, float4 value) __DEF_IF_HOST
443
+ __SM_32_INTRINSICS_DECL__ void __stwt(double2 *ptr, double2 value) __DEF_IF_HOST
444
+
445
+
446
+ // SHF is the "funnel shift" operation - an accelerated left/right shift with carry
447
+ // operating on 64-bit quantities, which are concatenations of two 32-bit registers.
448
+
449
+ /**
450
+ * \ingroup CUDA_MATH_INTRINSIC_INT
451
+ * \brief Concatenate \p hi : \p lo, shift left by \p shift & 31 bits, return the most significant 32 bits.
452
+ *
453
+ * Shift the 64-bit value formed by concatenating argument \p lo and \p hi left by the amount specified by the argument \p shift.
454
+ * Argument \p lo holds bits 31:0 and argument \p hi holds bits 63:32 of the 64-bit source value.
455
+ * The source is shifted left by the wrapped value of \p shift (\p shift & 31).
456
+ * The most significant 32-bits of the result are returned.
457
+ *
458
+ * \return Returns the most significant 32 bits of the shifted 64-bit value.
459
+ */
460
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_l(unsigned int lo, unsigned int hi, unsigned int shift) __DEF_IF_HOST
461
+ /**
462
+ * \ingroup CUDA_MATH_INTRINSIC_INT
463
+ * \brief Concatenate \p hi : \p lo, shift left by min(\p shift, 32) bits, return the most significant 32 bits.
464
+ *
465
+ * Shift the 64-bit value formed by concatenating argument \p lo and \p hi left by the amount specified by the argument \p shift.
466
+ * Argument \p lo holds bits 31:0 and argument \p hi holds bits 63:32 of the 64-bit source value.
467
+ * The source is shifted left by the clamped value of \p shift (min(\p shift, 32)).
468
+ * The most significant 32-bits of the result are returned.
469
+ *
470
+ * \return Returns the most significant 32 bits of the shifted 64-bit value.
471
+ */
472
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_lc(unsigned int lo, unsigned int hi, unsigned int shift) __DEF_IF_HOST
473
+
474
+ /**
475
+ * \ingroup CUDA_MATH_INTRINSIC_INT
476
+ * \brief Concatenate \p hi : \p lo, shift right by \p shift & 31 bits, return the least significant 32 bits.
477
+ *
478
+ * Shift the 64-bit value formed by concatenating argument \p lo and \p hi right by the amount specified by the argument \p shift.
479
+ * Argument \p lo holds bits 31:0 and argument \p hi holds bits 63:32 of the 64-bit source value.
480
+ * The source is shifted right by the wrapped value of \p shift (\p shift & 31).
481
+ * The least significant 32-bits of the result are returned.
482
+ *
483
+ * \return Returns the least significant 32 bits of the shifted 64-bit value.
484
+ */
485
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_r(unsigned int lo, unsigned int hi, unsigned int shift) __DEF_IF_HOST
486
+ /**
487
+ * \ingroup CUDA_MATH_INTRINSIC_INT
488
+ * \brief Concatenate \p hi : \p lo, shift right by min(\p shift, 32) bits, return the least significant 32 bits.
489
+ *
490
+ * Shift the 64-bit value formed by concatenating argument \p lo and \p hi right by the amount specified by the argument \p shift.
491
+ * Argument \p lo holds bits 31:0 and argument \p hi holds bits 63:32 of the 64-bit source value.
492
+ * The source is shifted right by the clamped value of \p shift (min(\p shift, 32)).
493
+ * The least significant 32-bits of the result are returned.
494
+ *
495
+ * \return Returns the least significant 32 bits of the shifted 64-bit value.
496
+ */
497
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_rc(unsigned int lo, unsigned int hi, unsigned int shift) __DEF_IF_HOST
498
+
499
+
500
+ #endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */
501
+
502
+ #endif /* __cplusplus && __CUDACC__ */
503
+
504
+ #undef __SM_32_INTRINSICS_DECL__
505
+
506
+ #if !defined(__CUDACC_RTC__) && (defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA))
507
+ #include "sm_32_intrinsics.hpp"
508
+ #endif /* !defined(__CUDACC_RTC__) && (defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA)) */
509
+
510
+ #endif /* !__SM_32_INTRINSICS_H__ */
evalkit_tf437/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_types.h ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SURFACE_TYPES_H__)
51
+ #define __SURFACE_TYPES_H__
52
+
53
+ /*******************************************************************************
54
+ * *
55
+ * *
56
+ * *
57
+ *******************************************************************************/
58
+
59
+ #include "driver_types.h"
60
+
61
+ /**
62
+ * \addtogroup CUDART_TYPES
63
+ *
64
+ * @{
65
+ */
66
+
67
+ /*******************************************************************************
68
+ * *
69
+ * *
70
+ * *
71
+ *******************************************************************************/
72
+
73
+ #define cudaSurfaceType1D 0x01
74
+ #define cudaSurfaceType2D 0x02
75
+ #define cudaSurfaceType3D 0x03
76
+ #define cudaSurfaceTypeCubemap 0x0C
77
+ #define cudaSurfaceType1DLayered 0xF1
78
+ #define cudaSurfaceType2DLayered 0xF2
79
+ #define cudaSurfaceTypeCubemapLayered 0xFC
80
+
81
+ /**
82
+ * CUDA Surface boundary modes
83
+ */
84
+ enum __device_builtin__ cudaSurfaceBoundaryMode
85
+ {
86
+ cudaBoundaryModeZero = 0, /**< Zero boundary mode */
87
+ cudaBoundaryModeClamp = 1, /**< Clamp boundary mode */
88
+ cudaBoundaryModeTrap = 2 /**< Trap boundary mode */
89
+ };
90
+
91
+ /**
92
+ * CUDA Surface format modes
93
+ */
94
+ enum __device_builtin__ cudaSurfaceFormatMode
95
+ {
96
+ cudaFormatModeForced = 0, /**< Forced format mode */
97
+ cudaFormatModeAuto = 1 /**< Auto format mode */
98
+ };
99
+
100
+ /**
101
+ * An opaque value that represents a CUDA Surface object
102
+ */
103
+ typedef __device_builtin__ unsigned long long cudaSurfaceObject_t;
104
+
105
+ /** @} */
106
+ /** @} */ /* END CUDART_TYPES */
107
+
108
+ #endif /* !__SURFACE_TYPES_H__ */
evalkit_tf437/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_version.h ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /**
51
+ * \file: The master cuDNN version file.
52
+ */
53
+
54
+ #ifndef CUDNN_VERSION_H_
55
+ #define CUDNN_VERSION_H_
56
+
57
+ #define CUDNN_MAJOR 8
58
+ #define CUDNN_MINOR 9
59
+ #define CUDNN_PATCHLEVEL 2
60
+
61
+ #define CUDNN_VERSION (CUDNN_MAJOR * 1000 + CUDNN_MINOR * 100 + CUDNN_PATCHLEVEL)
62
+
63
+ /* cannot use constexpr here since this is a C-only file */
64
+ /* Below is the max SM version this cuDNN library is aware of and supports natively */
65
+
66
+ #define CUDNN_MAX_SM_MAJOR_NUMBER 9
67
+ #define CUDNN_MAX_SM_MINOR_NUMBER 0
68
+ #define CUDNN_MAX_DEVICE_VERSION (CUDNN_MAX_SM_MAJOR_NUMBER * 100 + CUDNN_MAX_SM_MINOR_NUMBER * 10)
69
+
70
+ /* Here are constants for each of the SM Architectures we support to use in code where device version checks must be
71
+ * made */
72
+
73
+ /* MAXWELL SM 50 52 53 */
74
+ #define CUDNN_SM_50 500
75
+ #define CUDNN_SM_52 520
76
+ #define CUDNN_SM_53 530
77
+
78
+ /* PASCAL SM 60 61 62 */
79
+ #define CUDNN_SM_60 600
80
+ #define CUDNN_SM_61 610
81
+ #define CUDNN_SM_62 620
82
+
83
+ /* VOLTA SM 70 72 */
84
+ #define CUDNN_SM_70 700
85
+ #define CUDNN_SM_72 720
86
+
87
+ /* TURING SM 75 */
88
+ #define CUDNN_SM_75 750
89
+
90
+ /* AMPERE SM 80 86 87 */
91
+ #define CUDNN_SM_80 800
92
+ #define CUDNN_SM_86 860
93
+ #define CUDNN_SM_87 870
94
+
95
+ /* ADA LOVELACE SM 89 */
96
+ #define CUDNN_SM_89 890
97
+
98
+ /* HOPPER SM 90 */
99
+ #define CUDNN_SM_90 900
100
+
101
+ /* END MARKER for last known version.
102
+ * This can be replaced after support for 1000 is added
103
+ */
104
+ #define CUDNN_SM_9X_END 999
105
+
106
+ /* This is the minimum version we support devices below this will return CUDNN_STATUS_ARCH_MISMATCH */
107
+ #define CUDNN_MIN_DEVICE_VERSION CUDNN_SM_50
108
+
109
+ #endif /* CUDNN_VERSION_H */
evalkit_tf437/lib/python3.10/site-packages/nvidia/nccl/lib/__init__.py ADDED
File without changes
evalkit_tf437/lib/python3.10/site-packages/nvidia/nvjitlink/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (175 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/nvidia/nvtx/include/nvToolsExtCudaRt.h ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2009-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO USER:
5
+ *
6
+ * This source code is subject to NVIDIA ownership rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * This software and the information contained herein is PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
11
+ * of a form of NVIDIA software license agreement.
12
+ *
13
+ * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
14
+ * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
15
+ * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
16
+ * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
17
+ * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
18
+ * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
19
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
20
+ * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
21
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
22
+ * OR PERFORMANCE OF THIS SOURCE CODE.
23
+ *
24
+ * U.S. Government End Users. This source code is a "commercial item" as
25
+ * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
26
+ * "commercial computer software" and "commercial computer software
27
+ * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
28
+ * and is provided to the U.S. Government only as a commercial end item.
29
+ * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
30
+ * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
31
+ * source code with only those rights set forth herein.
32
+ *
33
+ * Any use of this source code in individual and commercial software must
34
+ * include, in the user documentation and internal comments to the code,
35
+ * the above Disclaimer and U.S. Government End Users Notice.
36
+ */
37
+
38
+ #ifndef NVTOOLSEXT_CUDART_H_
39
+ #define NVTOOLSEXT_CUDART_H_
40
+
41
+ #include "cuda.h"
42
+ #include "driver_types.h"
43
+
44
+ #include "nvToolsExt.h"
45
+
46
+ #ifdef __cplusplus
47
+ extern "C" {
48
+ #endif /* __cplusplus */
49
+
50
+ /* ========================================================================= */
51
+ /** \name Functions for CUDA Resource Naming
52
+ */
53
+ /** \addtogroup RESOURCE_NAMING
54
+ * \section RESOURCE_NAMING_CUDART CUDA Runtime Resource Naming
55
+ *
56
+ * This section covers the API functions that allow to annotate CUDA resources
57
+ * with user-provided names.
58
+ *
59
+ * @{
60
+ */
61
+
62
+ /* ------------------------------------------------------------------------- */
63
+ /* \cond SHOW_HIDDEN
64
+ * \brief Used to build a non-colliding value for resource types separated class
65
+ * \version \NVTX_VERSION_2
66
+ */
67
+ #define NVTX_RESOURCE_CLASS_CUDART 5
68
+ /** \endcond */
69
+
70
+ /* ------------------------------------------------------------------------- */
71
+ /** \brief Resource types for CUDART
72
+ */
73
+ typedef enum nvtxResourceCUDARTType_t
74
+ {
75
+ NVTX_RESOURCE_TYPE_CUDART_DEVICE = NVTX_RESOURCE_MAKE_TYPE(CUDART, 0), /* int device */
76
+ NVTX_RESOURCE_TYPE_CUDART_STREAM = NVTX_RESOURCE_MAKE_TYPE(CUDART, 1), /* cudaStream_t */
77
+ NVTX_RESOURCE_TYPE_CUDART_EVENT = NVTX_RESOURCE_MAKE_TYPE(CUDART, 2) /* cudaEvent_t */
78
+ } nvtxResourceCUDARTType_t;
79
+
80
+
81
+ /* ------------------------------------------------------------------------- */
82
+ /** \brief Annotates a CUDA device.
83
+ *
84
+ * Allows the user to associate a CUDA device with a user-provided name.
85
+ *
86
+ * \param device - The id of the CUDA device to name.
87
+ * \param name - The name of the CUDA device.
88
+ *
89
+ * \version \NVTX_VERSION_1
90
+ * @{ */
91
+ NVTX_DECLSPEC void NVTX_API nvtxNameCudaDeviceA(int device, const char* name);
92
+ NVTX_DECLSPEC void NVTX_API nvtxNameCudaDeviceW(int device, const wchar_t* name);
93
+ /** @} */
94
+
95
+ /* ------------------------------------------------------------------------- */
96
+ /** \brief Annotates a CUDA stream.
97
+ *
98
+ * Allows the user to associate a CUDA stream with a user-provided name.
99
+ *
100
+ * \param stream - The handle of the CUDA stream to name.
101
+ * \param name - The name of the CUDA stream.
102
+ *
103
+ * \version \NVTX_VERSION_1
104
+ * @{ */
105
+ NVTX_DECLSPEC void NVTX_API nvtxNameCudaStreamA(cudaStream_t stream, const char* name);
106
+ NVTX_DECLSPEC void NVTX_API nvtxNameCudaStreamW(cudaStream_t stream, const wchar_t* name);
107
+ /** @} */
108
+
109
+ /* ------------------------------------------------------------------------- */
110
+ /** \brief Annotates a CUDA event.
111
+ *
112
+ * Allows the user to associate a CUDA event with a user-provided name.
113
+ *
114
+ * \param event - The handle of the CUDA event to name.
115
+ * \param name - The name of the CUDA event.
116
+ *
117
+ * \version \NVTX_VERSION_1
118
+ * @{ */
119
+ NVTX_DECLSPEC void NVTX_API nvtxNameCudaEventA(cudaEvent_t event, const char* name);
120
+ NVTX_DECLSPEC void NVTX_API nvtxNameCudaEventW(cudaEvent_t event, const wchar_t* name);
121
+ /** @} */
122
+
123
+ /** @} */ /* END RESOURCE_NAMING */
124
+
125
+ /* ========================================================================= */
126
+ #ifdef UNICODE
127
+ #define nvtxNameCudaDevice nvtxNameCudaDeviceW
128
+ #define nvtxNameCudaStream nvtxNameCudaStreamW
129
+ #define nvtxNameCudaEvent nvtxNameCudaEventW
130
+ #else
131
+ #define nvtxNameCudaDevice nvtxNameCudaDeviceA
132
+ #define nvtxNameCudaStream nvtxNameCudaStreamA
133
+ #define nvtxNameCudaEvent nvtxNameCudaEventA
134
+ #endif
135
+
136
+ #ifdef __cplusplus
137
+ }
138
+ #endif /* __cplusplus */
139
+
140
+ #endif /* NVTOOLSEXT_CUDART_H_ */
evalkit_tf437/lib/python3.10/site-packages/nvidia/nvtx/include/nvtx3/nvToolsExt.h ADDED
@@ -0,0 +1,1499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2009-2016 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO USER:
5
+ *
6
+ * This source code is subject to NVIDIA ownership rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * This software and the information contained herein is PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
11
+ * of a form of NVIDIA software license agreement.
12
+ *
13
+ * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
14
+ * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
15
+ * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
16
+ * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
17
+ * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
18
+ * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
19
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
20
+ * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
21
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
22
+ * OR PERFORMANCE OF THIS SOURCE CODE.
23
+ *
24
+ * U.S. Government End Users. This source code is a "commercial item" as
25
+ * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
26
+ * "commercial computer software" and "commercial computer software
27
+ * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
28
+ * and is provided to the U.S. Government only as a commercial end item.
29
+ * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
30
+ * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
31
+ * source code with only those rights set forth herein.
32
+ *
33
+ * Any use of this source code in individual and commercial software must
34
+ * include, in the user documentation and internal comments to the code,
35
+ * the above Disclaimer and U.S. Government End Users Notice.
36
+ */
37
+
38
+ /** \file nvToolsExt.h
39
+ */
40
+
41
+ /* ========================================================================= */
42
+ /** \mainpage
43
+ * \tableofcontents
44
+ * \section INTRODUCTION Introduction
45
+ *
46
+ * The NVIDIA Tools Extension library is a set of functions that a
47
+ * developer can use to provide additional information to tools.
48
+ * The additional information is used by the tool to improve
49
+ * analysis and visualization of data.
50
+ *
51
+ * The library introduces close to zero overhead if no tool is
52
+ * attached to the application. The overhead when a tool is
53
+ * attached is specific to the tool.
54
+ *
55
+ * \section INITIALIZATION_SECTION Initialization
56
+ *
57
+ * Typically the tool's library that plugs into NVTX is indirectly
58
+ * loaded via enviromental properties that are platform specific.
59
+ * For some platform or special cases, the user may be required
60
+ * to instead explicity initialize instead though. This can also
61
+ * be helpful to control when the API loads a tool's library instead
62
+ * of what would typically be the first function call to emit info.
63
+ * For these rare case, see \ref INITIALIZATION for additional information.
64
+ *
65
+ * \section MARKERS_AND_RANGES Markers and Ranges
66
+ *
67
+ * Markers and ranges are used to describe events at a specific time (markers)
68
+ * or over a time span (ranges) during the execution of the application
69
+ * respectively.
70
+ *
71
+ * \subsection MARKERS Markers
72
+ *
73
+ * Markers denote specific moments in time.
74
+ *
75
+ *
76
+ * See \ref DOMAINS and \ref EVENT_ATTRIBUTES for additional information on
77
+ * how to specify the domain.
78
+ *
79
+ * \subsection THREAD_RANGES Thread Ranges
80
+ *
81
+ * Thread ranges denote nested time ranges. Nesting is maintained per thread
82
+ * per domain and does not require any additional correlation mechanism. The
83
+ * duration of a thread range is defined by the corresponding pair of
84
+ * nvtxRangePush* to nvtxRangePop API calls.
85
+ *
86
+ * See \ref DOMAINS and \ref EVENT_ATTRIBUTES for additional information on
87
+ * how to specify the domain.
88
+ *
89
+ * \subsection PROCESS_RANGES Process Ranges
90
+ *
91
+ * Process ranges denote a time span that can expose arbitrary concurrency, as
92
+ * opposed to thread ranges that only support nesting. In addition the range
93
+ * start event can happen on a different thread than the end marker. For the
94
+ * correlation of a start/end pair an unique correlation ID is used that is
95
+ * returned from the start API call and needs to be passed into the end API
96
+ * call.
97
+ *
98
+ * \subsection EVENT_ATTRIBUTES Event Attributes
99
+ *
100
+ * \ref MARKERS_AND_RANGES can be annotated with various attributes to provide
101
+ * additional information for an event or to guide the tool's visualization of
102
+ * the data. Each of the attributes is optional and if left unused the
103
+ * attributes fall back to a default value. The attributes include:
104
+ * - color
105
+ * - category
106
+ *
107
+ * To specify any attribute other than the text message, the \ref
108
+ * EVENT_ATTRIBUTE_STRUCTURE "Event Attribute Structure" must be used.
109
+ *
110
+ * \section DOMAINS Domains
111
+ *
112
+ * Domains enable developers to scope annotations. By default all events and
113
+ * annotations are in the default domain. Additional domains can be registered.
114
+ * This allows developers to scope markers, ranges, and resources names to
115
+ * avoid conflicts.
116
+ *
117
+ * The function ::nvtxDomainCreateA or ::nvtxDomainCreateW is used to create
118
+ * a named domain.
119
+ *
120
+ * Each domain maintains its own
121
+ * - categories
122
+ * - thread range stacks
123
+ * - registered strings
124
+ *
125
+ * The function ::nvtxDomainDestroy marks the end of the domain. Destroying
126
+ * a domain unregisters and destroys all objects associated with it such as
127
+ * registered strings, resource objects, named categories, and started ranges.
128
+ *
129
+ * \section RESOURCE_NAMING Resource Naming
130
+ *
131
+ * This section covers calls that allow to annotate objects with user-provided
132
+ * names in order to allow for a better analysis of complex trace data. All of
133
+ * the functions take the handle or the ID of the object to name and the name.
134
+ * The functions can be called multiple times during the execution of an
135
+ * application, however, in that case it is implementation dependent which
136
+ * name will be reported by the tool.
137
+ *
138
+ * \subsection CATEGORY_NAMING Category Naming
139
+ *
140
+ * Some function in this library support associating an integer category
141
+ * to enable filtering and sorting. The category naming functions allow
142
+ * the application to associate a user friendly name with the integer
143
+ * category. Support for domains have been added in NVTX_VERSION_2 to
144
+ * avoid collisions when domains are developed independantly.
145
+ *
146
+ * \subsection RESOURCE_OBJECTS Resource Objects
147
+ *
148
+ * Resource objects are a generic mechanism for attaching data to an application
149
+ * resource. The identifier field makes the association to a pointer or handle,
150
+ * while the type field helps provide deeper understanding of the identifier as
151
+ * well as enabling differentiation in cases where handles generated by different
152
+ * APIs may collide. The resource object may also have an associated message to
153
+ * associate with the application resource, enabling further annotation of this
154
+ * object and how it is used.
155
+ *
156
+ * The resource object was introduced in NVTX_VERSION_2 to supersede existing naming
157
+ * functions and allow the application resource identified by those functions to be
158
+ * associated to a domain. The other naming functions are still supported for backward
159
+ * compatibility but will be associated only to the default domain.
160
+ *
161
+ * \subsection RESOURCE_NAMING_OS Resource Naming
162
+ *
163
+ * Some operating system resources creation APIs do not support providing a user friendly
164
+ * name, such as some OS thread creation APIs. This API support resource naming though
165
+ * both through resource objects and functions following the pattern
166
+ * nvtxName[RESOURCE_TYPE][A|W](identifier, name). Resource objects introduced in NVTX_VERSION 2
167
+ * supersede the other functions with a a more general method of assigning names to OS resources,
168
+ * along with associating them to domains too. The older nvtxName* functions are only associated
169
+ * with the default domain.
170
+ * \section EXTENSIONS Optional Extensions
171
+ * Optional extensions will either appear within the existing sections the extend or appear
172
+ * in the "Related Pages" when they introduce new concepts.
173
+ */
174
+
175
+ /**
176
+ * Tools Extension API version
177
+ */
178
+ #if defined(NVTX_VERSION) && NVTX_VERSION < 3
179
+ #error "Trying to #include NVTX version 3 in a source file where an older NVTX version has already been included. If you are not directly using NVTX (the NVIDIA Tools Extension library), you are getting this error because libraries you are using have included different versions of NVTX. Suggested solutions are: (1) reorder #includes so the newest NVTX version is included first, (2) avoid using the conflicting libraries in the same .c/.cpp file, or (3) update the library using the older NVTX version to use the newer version instead."
180
+ #endif
181
+
182
+ /* Header guard */
183
+ #if !defined(NVTX_VERSION)
184
+ #define NVTX_VERSION 3
185
+
186
+ #if defined(_MSC_VER)
187
+ #define NVTX_API __stdcall
188
+ #define NVTX_INLINE_STATIC __inline static
189
+ #else /*defined(__GNUC__)*/
190
+ #define NVTX_API
191
+ #define NVTX_INLINE_STATIC inline static
192
+ #endif /* Platform */
193
+
194
+ #if defined(NVTX_NO_IMPL)
195
+ /* When omitting implementation, avoid declaring functions inline */
196
+ /* without definitions, since this causes compiler warnings. */
197
+ #define NVTX_DECLSPEC
198
+ #elif defined(NVTX_EXPORT_API)
199
+ /* Allow overriding definition of NVTX_DECLSPEC when exporting API. */
200
+ /* Default is empty, meaning non-inline with external linkage. */
201
+ #if !defined(NVTX_DECLSPEC)
202
+ #define NVTX_DECLSPEC
203
+ #endif
204
+ #else
205
+ /* Normal NVTX usage defines the NVTX API inline with static */
206
+ /* (internal) linkage. */
207
+ #define NVTX_DECLSPEC NVTX_INLINE_STATIC
208
+ #endif
209
+
210
+ #include "nvtxDetail/nvtxLinkOnce.h"
211
+
212
+ #define NVTX_VERSIONED_IDENTIFIER_L3(NAME, VERSION) NAME##_v##VERSION
213
+ #define NVTX_VERSIONED_IDENTIFIER_L2(NAME, VERSION) NVTX_VERSIONED_IDENTIFIER_L3(NAME, VERSION)
214
+ #define NVTX_VERSIONED_IDENTIFIER(NAME) NVTX_VERSIONED_IDENTIFIER_L2(NAME, NVTX_VERSION)
215
+
216
+ /**
217
+ * The nvToolsExt library depends on stdint.h. If the build tool chain in use
218
+ * does not include stdint.h then define NVTX_STDINT_TYPES_ALREADY_DEFINED
219
+ * and define the following types:
220
+ * <ul>
221
+ * <li>uint8_t
222
+ * <li>int8_t
223
+ * <li>uint16_t
224
+ * <li>int16_t
225
+ * <li>uint32_t
226
+ * <li>int32_t
227
+ * <li>uint64_t
228
+ * <li>int64_t
229
+ * <li>uintptr_t
230
+ * <li>intptr_t
231
+ * </ul>
232
+ * #define NVTX_STDINT_TYPES_ALREADY_DEFINED if you are using your own header file.
233
+ */
234
+ #ifndef NVTX_STDINT_TYPES_ALREADY_DEFINED
235
+ #include <stdint.h>
236
+ #endif
237
+
238
+ #include <stddef.h>
239
+
240
+ #ifdef __cplusplus
241
+ extern "C" {
242
+ #endif /* __cplusplus */
243
+
244
+ /**
245
+ * Result Codes
246
+ */
247
+
248
+ #define NVTX_SUCCESS 0
249
+ #define NVTX_FAIL 1
250
+ #define NVTX_ERR_INIT_LOAD_PROPERTY 2
251
+ #define NVTX_ERR_INIT_ACCESS_LIBRARY 3
252
+ #define NVTX_ERR_INIT_LOAD_LIBRARY 4
253
+ #define NVTX_ERR_INIT_MISSING_LIBRARY_ENTRY_POINT 5
254
+ #define NVTX_ERR_INIT_FAILED_LIBRARY_ENTRY_POINT 6
255
+ #define NVTX_ERR_NO_INJECTION_LIBRARY_AVAILABLE 7
256
+
257
+ /**
258
+ * Size of the nvtxEventAttributes_t structure.
259
+ */
260
+ #define NVTX_EVENT_ATTRIB_STRUCT_SIZE ( (uint16_t)( sizeof(nvtxEventAttributes_t) ) )
261
+
262
+ #define NVTX_NO_PUSH_POP_TRACKING ((int)-2)
263
+
264
+ typedef uint64_t nvtxRangeId_t;
265
+
266
+ /* Forward declaration of opaque domain registration structure */
267
+ struct nvtxDomainRegistration_st;
268
+ typedef struct nvtxDomainRegistration_st nvtxDomainRegistration;
269
+
270
+ /* \brief Domain Handle Structure.
271
+ * \anchor DOMAIN_HANDLE_STRUCTURE
272
+ *
273
+ * This structure is opaque to the user and is used as a handle to reference
274
+ * a domain. This type is returned from tools when using the NVTX API to
275
+ * create a domain.
276
+ *
277
+ */
278
+ typedef nvtxDomainRegistration* nvtxDomainHandle_t;
279
+
280
+ /* Forward declaration of opaque string registration structure */
281
+ struct nvtxStringRegistration_st;
282
+ typedef struct nvtxStringRegistration_st nvtxStringRegistration;
283
+
284
+ /* \brief Registered String Handle Structure.
285
+ * \anchor REGISTERED_STRING_HANDLE_STRUCTURE
286
+ *
287
+ * This structure is opaque to the user and is used as a handle to reference
288
+ * a registered string. This type is returned from tools when using the NVTX
289
+ * API to create a registered string.
290
+ *
291
+ */
292
+ typedef nvtxStringRegistration* nvtxStringHandle_t;
293
+
294
+ /* ========================================================================= */
295
+ /** \defgroup GENERAL General
296
+ * @{
297
+ */
298
+
299
+ /** ---------------------------------------------------------------------------
300
+ * Color Types
301
+ * ------------------------------------------------------------------------- */
302
+ typedef enum nvtxColorType_t
303
+ {
304
+ NVTX_COLOR_UNKNOWN = 0, /**< Color attribute is unused. */
305
+ NVTX_COLOR_ARGB = 1 /**< An ARGB color is provided. */
306
+ } nvtxColorType_t;
307
+
308
+ /** ---------------------------------------------------------------------------
309
+ * Message Types
310
+ * ------------------------------------------------------------------------- */
311
+ typedef enum nvtxMessageType_t
312
+ {
313
+ NVTX_MESSAGE_UNKNOWN = 0, /**< Message payload is unused. */
314
+ NVTX_MESSAGE_TYPE_ASCII = 1, /**< A character sequence is used as payload. */
315
+ NVTX_MESSAGE_TYPE_UNICODE = 2, /**< A wide character sequence is used as payload. */
316
+ /* NVTX_VERSION_2 */
317
+ NVTX_MESSAGE_TYPE_REGISTERED = 3, /**< A unique string handle that was registered
318
+ with \ref nvtxDomainRegisterStringA() or
319
+ \ref nvtxDomainRegisterStringW(). */
320
+ } nvtxMessageType_t;
321
+
322
+ typedef union nvtxMessageValue_t
323
+ {
324
+ const char* ascii;
325
+ const wchar_t* unicode;
326
+ /* NVTX_VERSION_2 */
327
+ nvtxStringHandle_t registered;
328
+ } nvtxMessageValue_t;
329
+
330
+
331
+ /** @} */ /*END defgroup*/
332
+ /* ------------------------------------------------------------------------- */
333
+ /** \brief Force initialization (optional)
334
+ *
335
+ * Force NVTX library to initialize. The first call to any NVTX API function
336
+ * will automatically initialize the entire API. This can make the first call
337
+ * much slower than subsequent calls. In applications where the first call to
338
+ * NVTX may be in a performance-critical section, calling nvtxInitialize before
339
+ * any performance-critical sections will ensure NVTX initialization occurs at
340
+ * an acceptable time. Since nvtxInitialize takes no parameters and has no
341
+ * expected behavior besides initialization, it is convenient to add a call to
342
+ * nvtxInitialize in NVTX-instrumented applications that need to force earlier
343
+ * initialization without changing any other code. For example, if an app's
344
+ * first NVTX call is nvtxDomainCreate, and it is difficult to move that call
345
+ * earlier because the domain handle must be stored in an object only created
346
+ * at that point, adding a call to nvtxInitialize at the top of main() will
347
+ * ensure the later call to nvtxDomainCreate is as fast as possible.
348
+ *
349
+ * \version \NVTX_VERSION_3
350
+ *
351
+ * \param reserved - must be zero or NULL.
352
+ *
353
+ * @{ */
354
+ NVTX_DECLSPEC void NVTX_API nvtxInitialize(const void* reserved);
355
+ /** @} */
356
+
357
+
358
+ /** @} */ /*END defgroup*/
359
+
360
+ /* ========================================================================= */
361
+ /** \defgroup EVENT_ATTRIBUTES Event Attributes
362
+ * @{
363
+ */
364
+
365
+ /** ---------------------------------------------------------------------------
366
+ * Payload Types
367
+ * ------------------------------------------------------------------------- */
368
+ typedef enum nvtxPayloadType_t
369
+ {
370
+ NVTX_PAYLOAD_UNKNOWN = 0, /**< Color payload is unused. */
371
+ NVTX_PAYLOAD_TYPE_UNSIGNED_INT64 = 1, /**< A 64 bit unsigned integer value is used as payload. */
372
+ NVTX_PAYLOAD_TYPE_INT64 = 2, /**< A 64 bit signed integer value is used as payload. */
373
+ NVTX_PAYLOAD_TYPE_DOUBLE = 3, /**< A 64 bit floating point value is used as payload. */
374
+ /* NVTX_VERSION_2 */
375
+ NVTX_PAYLOAD_TYPE_UNSIGNED_INT32 = 4, /**< A 32 bit floating point value is used as payload. */
376
+ NVTX_PAYLOAD_TYPE_INT32 = 5, /**< A 32 bit floating point value is used as payload. */
377
+ NVTX_PAYLOAD_TYPE_FLOAT = 6 /**< A 32 bit floating point value is used as payload. */
378
+ } nvtxPayloadType_t;
379
+
380
+ /** \brief Event Attribute Structure.
381
+ * \anchor EVENT_ATTRIBUTE_STRUCTURE
382
+ *
383
+ * This structure is used to describe the attributes of an event. The layout of
384
+ * the structure is defined by a specific version of the tools extension
385
+ * library and can change between different versions of the Tools Extension
386
+ * library.
387
+ *
388
+ * \par Initializing the Attributes
389
+ *
390
+ * The caller should always perform the following three tasks when using
391
+ * attributes:
392
+ * <ul>
393
+ * <li>Zero the structure
394
+ * <li>Set the version field
395
+ * <li>Set the size field
396
+ * </ul>
397
+ *
398
+ * Zeroing the structure sets all the event attributes types and values
399
+ * to the default value.
400
+ *
401
+ * The version and size field are used by the Tools Extension
402
+ * implementation to handle multiple versions of the attributes structure.
403
+ *
404
+ * It is recommended that the caller use one of the following to methods
405
+ * to initialize the event attributes structure:
406
+ *
407
+ * \par Method 1: Initializing nvtxEventAttributes for future compatibility
408
+ * \code
409
+ * nvtxEventAttributes_t eventAttrib = {0};
410
+ * eventAttrib.version = NVTX_VERSION;
411
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
412
+ * \endcode
413
+ *
414
+ * \par Method 2: Initializing nvtxEventAttributes for a specific version
415
+ * \code
416
+ * nvtxEventAttributes_t eventAttrib = {0};
417
+ * eventAttrib.version = 1;
418
+ * eventAttrib.size = (uint16_t)(sizeof(nvtxEventAttributes_v1));
419
+ * \endcode
420
+ *
421
+ * If the caller uses Method 1 it is critical that the entire binary
422
+ * layout of the structure be configured to 0 so that all fields
423
+ * are initialized to the default value.
424
+ *
425
+ * The caller should either use both NVTX_VERSION and
426
+ * NVTX_EVENT_ATTRIB_STRUCT_SIZE (Method 1) or use explicit values
427
+ * and a versioned type (Method 2). Using a mix of the two methods
428
+ * will likely cause either source level incompatibility or binary
429
+ * incompatibility in the future.
430
+ *
431
+ * \par Settings Attribute Types and Values
432
+ *
433
+ *
434
+ * \par Example:
435
+ * \code
436
+ * // Initialize
437
+ * nvtxEventAttributes_t eventAttrib = {0};
438
+ * eventAttrib.version = NVTX_VERSION;
439
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
440
+ *
441
+ * // Configure the Attributes
442
+ * eventAttrib.colorType = NVTX_COLOR_ARGB;
443
+ * eventAttrib.color = 0xFF880000;
444
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
445
+ * eventAttrib.message.ascii = "Example";
446
+ * \endcode
447
+ *
448
+ * In the example the caller does not have to set the value of
449
+ * \ref ::nvtxEventAttributes_v2::category or
450
+ * \ref ::nvtxEventAttributes_v2::payload as these fields were set to
451
+ * the default value by {0}.
452
+ * \sa
453
+ * ::nvtxDomainMarkEx
454
+ * ::nvtxDomainRangeStartEx
455
+ * ::nvtxDomainRangePushEx
456
+ */
457
+ typedef struct nvtxEventAttributes_v2
458
+ {
459
+ /**
460
+ * \brief Version flag of the structure.
461
+ *
462
+ * Needs to be set to NVTX_VERSION to indicate the version of NVTX APIs
463
+ * supported in this header file. This can optionally be overridden to
464
+ * another version of the tools extension library.
465
+ */
466
+ uint16_t version;
467
+
468
+ /**
469
+ * \brief Size of the structure.
470
+ *
471
+ * Needs to be set to the size in bytes of the event attribute
472
+ * structure used to specify the event.
473
+ */
474
+ uint16_t size;
475
+
476
+ /**
477
+ * \brief ID of the category the event is assigned to.
478
+ *
479
+ * A category is a user-controlled ID that can be used to group
480
+ * events. The tool may use category IDs to improve filtering or
481
+ * enable grouping of events in the same category. The functions
482
+ * \ref ::nvtxNameCategoryA or \ref ::nvtxNameCategoryW can be used
483
+ * to name a category.
484
+ *
485
+ * Default Value is 0
486
+ */
487
+ uint32_t category;
488
+
489
+ /** \brief Color type specified in this attribute structure.
490
+ *
491
+ * Defines the color format of the attribute structure's \ref COLOR_FIELD
492
+ * "color" field.
493
+ *
494
+ * Default Value is NVTX_COLOR_UNKNOWN
495
+ */
496
+ int32_t colorType; /* nvtxColorType_t */
497
+
498
+ /** \brief Color assigned to this event. \anchor COLOR_FIELD
499
+ *
500
+ * The color that the tool should use to visualize the event.
501
+ */
502
+ uint32_t color;
503
+
504
+ /**
505
+ * \brief Payload type specified in this attribute structure.
506
+ *
507
+ * Defines the payload format of the attribute structure's \ref PAYLOAD_FIELD
508
+ * "payload" field.
509
+ *
510
+ * Default Value is NVTX_PAYLOAD_UNKNOWN
511
+ */
512
+ int32_t payloadType; /* nvtxPayloadType_t */
513
+
514
+ int32_t reserved0;
515
+
516
+ /**
517
+ * \brief Payload assigned to this event. \anchor PAYLOAD_FIELD
518
+ *
519
+ * A numerical value that can be used to annotate an event. The tool could
520
+ * use the payload data to reconstruct graphs and diagrams.
521
+ */
522
+ union payload_t
523
+ {
524
+ uint64_t ullValue;
525
+ int64_t llValue;
526
+ double dValue;
527
+ /* NVTX_VERSION_2 */
528
+ uint32_t uiValue;
529
+ int32_t iValue;
530
+ float fValue;
531
+ } payload;
532
+
533
+ /** \brief Message type specified in this attribute structure.
534
+ *
535
+ * Defines the message format of the attribute structure's \ref MESSAGE_FIELD
536
+ * "message" field.
537
+ *
538
+ * Default Value is NVTX_MESSAGE_UNKNOWN
539
+ */
540
+ int32_t messageType; /* nvtxMessageType_t */
541
+
542
+ /** \brief Message assigned to this attribute structure. \anchor MESSAGE_FIELD
543
+ *
544
+ * The text message that is attached to an event.
545
+ */
546
+ nvtxMessageValue_t message;
547
+
548
+ } nvtxEventAttributes_v2;
549
+
550
+ typedef struct nvtxEventAttributes_v2 nvtxEventAttributes_t;
551
+
552
+ /** @} */ /*END defgroup*/
553
+ /* ========================================================================= */
554
+ /** \defgroup MARKERS_AND_RANGES Markers and Ranges
555
+ *
556
+ * See \ref MARKERS_AND_RANGES for more details
557
+ *
558
+ * @{
559
+ */
560
+
561
+ /** \name Marker */
562
+
563
+ /* ------------------------------------------------------------------------- */
564
+ /** \brief Marks an instantaneous event in the application.
565
+ *
566
+ * A marker can contain a text message or specify additional information
567
+ * using the event attributes structure. These attributes include a text
568
+ * message, color, category, and a payload. Each of the attributes is optional
569
+ * and can only be sent out using the \ref nvtxDomainMarkEx function.
570
+ *
571
+ * nvtxDomainMarkEx(NULL, event) is equivalent to calling
572
+ * nvtxMarkEx(event).
573
+ *
574
+ * \param domain - The domain of scoping the category.
575
+ * \param eventAttrib - The event attribute structure defining the marker's
576
+ * attribute types and attribute values.
577
+ *
578
+ * \sa
579
+ * ::nvtxMarkEx
580
+ *
581
+ * \version \NVTX_VERSION_2
582
+ * @{ */
583
+ NVTX_DECLSPEC void NVTX_API nvtxDomainMarkEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib);
584
+ /** @} */
585
+
586
+ /* ------------------------------------------------------------------------- */
587
+ /** \brief Marks an instantaneous event in the application.
588
+ *
589
+ * A marker can contain a text message or specify additional information
590
+ * using the event attributes structure. These attributes include a text
591
+ * message, color, category, and a payload. Each of the attributes is optional
592
+ * and can only be sent out using the \ref nvtxMarkEx function.
593
+ * If \ref nvtxMarkA or \ref nvtxMarkW are used to specify the marker
594
+ * or if an attribute is unspecified then a default value will be used.
595
+ *
596
+ * \param eventAttrib - The event attribute structure defining the marker's
597
+ * attribute types and attribute values.
598
+ *
599
+ * \par Example:
600
+ * \code
601
+ * // zero the structure
602
+ * nvtxEventAttributes_t eventAttrib = {0};
603
+ * // set the version and the size information
604
+ * eventAttrib.version = NVTX_VERSION;
605
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
606
+ * // configure the attributes. 0 is the default for all attributes.
607
+ * eventAttrib.colorType = NVTX_COLOR_ARGB;
608
+ * eventAttrib.color = 0xFF880000;
609
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
610
+ * eventAttrib.message.ascii = "Example nvtxMarkEx";
611
+ * nvtxMarkEx(&eventAttrib);
612
+ * \endcode
613
+ *
614
+ * \sa
615
+ * ::nvtxDomainMarkEx
616
+ *
617
+ * \version \NVTX_VERSION_1
618
+ * @{ */
619
+ NVTX_DECLSPEC void NVTX_API nvtxMarkEx(const nvtxEventAttributes_t* eventAttrib);
620
+ /** @} */
621
+
622
+ /* ------------------------------------------------------------------------- */
623
+ /** \brief Marks an instantaneous event in the application.
624
+ *
625
+ * A marker created using \ref nvtxMarkA or \ref nvtxMarkW contains only a
626
+ * text message.
627
+ *
628
+ * \param message - The message associated to this marker event.
629
+ *
630
+ * \par Example:
631
+ * \code
632
+ * nvtxMarkA("Example nvtxMarkA");
633
+ * nvtxMarkW(L"Example nvtxMarkW");
634
+ * \endcode
635
+ *
636
+ * \sa
637
+ * ::nvtxDomainMarkEx
638
+ * ::nvtxMarkEx
639
+ *
640
+ * \version \NVTX_VERSION_0
641
+ * @{ */
642
+ NVTX_DECLSPEC void NVTX_API nvtxMarkA(const char* message);
643
+ NVTX_DECLSPEC void NVTX_API nvtxMarkW(const wchar_t* message);
644
+ /** @} */
645
+
646
+
647
+ /** \name Process Ranges */
648
+
649
+ /* ------------------------------------------------------------------------- */
650
+ /** \brief Starts a process range in a domain.
651
+ *
652
+ * \param domain - The domain of scoping the category.
653
+ * \param eventAttrib - The event attribute structure defining the range's
654
+ * attribute types and attribute values.
655
+ *
656
+ * \return The unique ID used to correlate a pair of Start and End events.
657
+ *
658
+ * \remarks Ranges defined by Start/End can overlap.
659
+ *
660
+ * \par Example:
661
+ * \code
662
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("my domain");
663
+ * nvtxEventAttributes_t eventAttrib = {0};
664
+ * eventAttrib.version = NVTX_VERSION;
665
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
666
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
667
+ * eventAttrib.message.ascii = "my range";
668
+ * nvtxRangeId_t rangeId = nvtxDomainRangeStartEx(&eventAttrib);
669
+ * // ...
670
+ * nvtxDomainRangeEnd(rangeId);
671
+ * \endcode
672
+ *
673
+ * \sa
674
+ * ::nvtxDomainRangeEnd
675
+ *
676
+ * \version \NVTX_VERSION_2
677
+ * @{ */
678
+ NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxDomainRangeStartEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib);
679
+ /** @} */
680
+
681
+ /* ------------------------------------------------------------------------- */
682
+ /** \brief Starts a process range.
683
+ *
684
+ * \param eventAttrib - The event attribute structure defining the range's
685
+ * attribute types and attribute values.
686
+ *
687
+ * \return The unique ID used to correlate a pair of Start and End events.
688
+ *
689
+ * \remarks Ranges defined by Start/End can overlap.
690
+ *
691
+ * \par Example:
692
+ * \code
693
+ * nvtxEventAttributes_t eventAttrib = {0};
694
+ * eventAttrib.version = NVTX_VERSION;
695
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
696
+ * eventAttrib.category = 3;
697
+ * eventAttrib.colorType = NVTX_COLOR_ARGB;
698
+ * eventAttrib.color = 0xFF0088FF;
699
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
700
+ * eventAttrib.message.ascii = "Example Range";
701
+ * nvtxRangeId_t rangeId = nvtxRangeStartEx(&eventAttrib);
702
+ * // ...
703
+ * nvtxRangeEnd(rangeId);
704
+ * \endcode
705
+ *
706
+ * \sa
707
+ * ::nvtxRangeEnd
708
+ * ::nvtxDomainRangeStartEx
709
+ *
710
+ * \version \NVTX_VERSION_1
711
+ * @{ */
712
+ NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartEx(const nvtxEventAttributes_t* eventAttrib);
713
+ /** @} */
714
+
715
+ /* ------------------------------------------------------------------------- */
716
+ /** \brief Starts a process range.
717
+ *
718
+ * \param message - The event message associated to this range event.
719
+ *
720
+ * \return The unique ID used to correlate a pair of Start and End events.
721
+ *
722
+ * \remarks Ranges defined by Start/End can overlap.
723
+ *
724
+ * \par Example:
725
+ * \code
726
+ * nvtxRangeId_t r1 = nvtxRangeStartA("Range 1");
727
+ * nvtxRangeId_t r2 = nvtxRangeStartW(L"Range 2");
728
+ * nvtxRangeEnd(r1);
729
+ * nvtxRangeEnd(r2);
730
+ * \endcode
731
+ *
732
+ * \sa
733
+ * ::nvtxRangeEnd
734
+ * ::nvtxRangeStartEx
735
+ * ::nvtxDomainRangeStartEx
736
+ *
737
+ * \version \NVTX_VERSION_0
738
+ * @{ */
739
+ NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartA(const char* message);
740
+ NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartW(const wchar_t* message);
741
+ /** @} */
742
+
743
+ /* ------------------------------------------------------------------------- */
744
+ /** \brief Ends a process range.
745
+ *
746
+ * \param domain - The domain
747
+ * \param id - The correlation ID returned from a nvtxRangeStart call.
748
+ *
749
+ * \remarks This function is offered completeness but is an alias for ::nvtxRangeEnd.
750
+ * It does not need a domain param since that is associated iwth the range ID at ::nvtxDomainRangeStartEx
751
+ *
752
+ * \par Example:
753
+ * \code
754
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("my domain");
755
+ * nvtxEventAttributes_t eventAttrib = {0};
756
+ * eventAttrib.version = NVTX_VERSION;
757
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
758
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
759
+ * eventAttrib.message.ascii = "my range";
760
+ * nvtxRangeId_t rangeId = nvtxDomainRangeStartEx(&eventAttrib);
761
+ * // ...
762
+ * nvtxDomainRangeEnd(rangeId);
763
+ * \endcode
764
+ *
765
+ * \sa
766
+ * ::nvtxDomainRangeStartEx
767
+ *
768
+ * \version \NVTX_VERSION_2
769
+ * @{ */
770
+ NVTX_DECLSPEC void NVTX_API nvtxDomainRangeEnd(nvtxDomainHandle_t domain, nvtxRangeId_t id);
771
+ /** @} */
772
+
773
+ /* ------------------------------------------------------------------------- */
774
+ /** \brief Ends a process range.
775
+ *
776
+ * \param id - The correlation ID returned from an nvtxRangeStart call.
777
+ *
778
+ * \sa
779
+ * ::nvtxDomainRangeStartEx
780
+ * ::nvtxRangeStartEx
781
+ * ::nvtxRangeStartA
782
+ * ::nvtxRangeStartW
783
+ *
784
+ * \version \NVTX_VERSION_0
785
+ * @{ */
786
+ NVTX_DECLSPEC void NVTX_API nvtxRangeEnd(nvtxRangeId_t id);
787
+ /** @} */
788
+
789
+ /** \name Thread Ranges */
790
+
791
+ /* ------------------------------------------------------------------------- */
792
+ /** \brief Starts a nested thread range.
793
+ *
794
+ * \param domain - The domain of scoping.
795
+ * \param eventAttrib - The event attribute structure defining the range's
796
+ * attribute types and attribute values.
797
+ *
798
+ * \return The 0 based level of range being started. This value is scoped to the domain.
799
+ * If an error occurs, a negative value is returned.
800
+ *
801
+ * \par Example:
802
+ * \code
803
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain");
804
+ * nvtxEventAttributes_t eventAttrib = {0};
805
+ * eventAttrib.version = NVTX_VERSION;
806
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
807
+ * eventAttrib.colorType = NVTX_COLOR_ARGB;
808
+ * eventAttrib.color = 0xFFFF0000;
809
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
810
+ * eventAttrib.message.ascii = "Level 0";
811
+ * nvtxDomainRangePushEx(domain, &eventAttrib);
812
+ *
813
+ * // Re-use eventAttrib
814
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_UNICODE;
815
+ * eventAttrib.message.unicode = L"Level 1";
816
+ * nvtxDomainRangePushEx(domain, &eventAttrib);
817
+ *
818
+ * nvtxDomainRangePop(domain); //level 1
819
+ * nvtxDomainRangePop(domain); //level 0
820
+ * \endcode
821
+ *
822
+ * \sa
823
+ * ::nvtxDomainRangePop
824
+ *
825
+ * \version \NVTX_VERSION_2
826
+ * @{ */
827
+ NVTX_DECLSPEC int NVTX_API nvtxDomainRangePushEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib);
828
+ /** @} */
829
+
830
+ /* ------------------------------------------------------------------------- */
831
+ /** \brief Starts a nested thread range.
832
+ *
833
+ * \param eventAttrib - The event attribute structure defining the range's
834
+ * attribute types and attribute values.
835
+ *
836
+ * \return The 0 based level of range being started. This level is per domain.
837
+ * If an error occurs a negative value is returned.
838
+ *
839
+ * \par Example:
840
+ * \code
841
+ * nvtxEventAttributes_t eventAttrib = {0};
842
+ * eventAttrib.version = NVTX_VERSION;
843
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
844
+ * eventAttrib.colorType = NVTX_COLOR_ARGB;
845
+ * eventAttrib.color = 0xFFFF0000;
846
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
847
+ * eventAttrib.message.ascii = "Level 0";
848
+ * nvtxRangePushEx(&eventAttrib);
849
+ *
850
+ * // Re-use eventAttrib
851
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_UNICODE;
852
+ * eventAttrib.message.unicode = L"Level 1";
853
+ * nvtxRangePushEx(&eventAttrib);
854
+ *
855
+ * nvtxRangePop();
856
+ * nvtxRangePop();
857
+ * \endcode
858
+ *
859
+ * \sa
860
+ * ::nvtxDomainRangePushEx
861
+ * ::nvtxRangePop
862
+ *
863
+ * \version \NVTX_VERSION_1
864
+ * @{ */
865
+ NVTX_DECLSPEC int NVTX_API nvtxRangePushEx(const nvtxEventAttributes_t* eventAttrib);
866
+ /** @} */
867
+
868
+ /* ------------------------------------------------------------------------- */
869
+ /** \brief Starts a nested thread range.
870
+ *
871
+ * \param message - The event message associated to this range event.
872
+ *
873
+ * \return The 0 based level of range being started. If an error occurs a
874
+ * negative value is returned.
875
+ *
876
+ * \par Example:
877
+ * \code
878
+ * nvtxRangePushA("Level 0");
879
+ * nvtxRangePushW(L"Level 1");
880
+ * nvtxRangePop();
881
+ * nvtxRangePop();
882
+ * \endcode
883
+ *
884
+ * \sa
885
+ * ::nvtxDomainRangePushEx
886
+ * ::nvtxRangePop
887
+ *
888
+ * \version \NVTX_VERSION_0
889
+ * @{ */
890
+ NVTX_DECLSPEC int NVTX_API nvtxRangePushA(const char* message);
891
+ NVTX_DECLSPEC int NVTX_API nvtxRangePushW(const wchar_t* message);
892
+ /** @} */
893
+
894
+
895
+ /* ------------------------------------------------------------------------- */
896
+ /** \brief Ends a nested thread range.
897
+ *
898
+ * \return The level of the range being ended. If an error occurs a negative
899
+ * value is returned on the current thread.
900
+ *
901
+ * \par Example:
902
+ * \code
903
+ * nvtxDomainHandle_t domain = nvtxDomainCreate("example library");
904
+ * nvtxDomainRangePushA(domain, "Level 0");
905
+ * nvtxDomainRangePushW(domain, L"Level 1");
906
+ * nvtxDomainRangePop(domain);
907
+ * nvtxDomainRangePop(domain);
908
+ * \endcode
909
+ *
910
+ * \sa
911
+ * ::nvtxRangePushEx
912
+ * ::nvtxRangePushA
913
+ * ::nvtxRangePushW
914
+ *
915
+ * \version \NVTX_VERSION_2
916
+ * @{ */
917
+ NVTX_DECLSPEC int NVTX_API nvtxDomainRangePop(nvtxDomainHandle_t domain);
918
+ /** @} */
919
+
920
+ /* ------------------------------------------------------------------------- */
921
+ /** \brief Ends a nested thread range.
922
+ *
923
+ * \return The level of the range being ended. If an error occurs a negative
924
+ * value is returned on the current thread.
925
+ *
926
+ * \par Example:
927
+ * \code
928
+ * nvtxRangePushA("Level 0");
929
+ * nvtxRangePushW(L"Level 1");
930
+ * nvtxRangePop();
931
+ * nvtxRangePop();
932
+ * \endcode
933
+ *
934
+ * \sa
935
+ * ::nvtxRangePushEx
936
+ * ::nvtxRangePushA
937
+ * ::nvtxRangePushW
938
+ *
939
+ * \version \NVTX_VERSION_0
940
+ * @{ */
941
+ NVTX_DECLSPEC int NVTX_API nvtxRangePop(void);
942
+ /** @} */
943
+
944
+
945
+ /** @} */ /*END defgroup*/
946
+ /* ========================================================================= */
947
+ /** \defgroup RESOURCE_NAMING Resource Naming
948
+ *
949
+ * See \ref RESOURCE_NAMING for more details
950
+ *
951
+ * @{
952
+ */
953
+
954
+
955
+ /* ------------------------------------------------------------------------- */
956
+ /** \name Functions for Generic Resource Naming*/
957
+ /* ------------------------------------------------------------------------- */
958
+
959
+ /* ------------------------------------------------------------------------- */
960
+ /** \cond SHOW_HIDDEN
961
+ * \brief Resource typing helpers.
962
+ *
963
+ * Classes are used to make it easy to create a series of resource types
964
+ * per API without collisions
965
+ */
966
+ #define NVTX_RESOURCE_MAKE_TYPE(CLASS, INDEX) ((((uint32_t)(NVTX_RESOURCE_CLASS_ ## CLASS))<<16)|((uint32_t)(INDEX)))
967
+ #define NVTX_RESOURCE_CLASS_GENERIC 1
968
+ /** \endcond */
969
+
970
+ /* ------------------------------------------------------------------------- */
971
+ /** \brief Generic resource type for when a resource class is not available.
972
+ *
973
+ * \sa
974
+ * ::nvtxDomainResourceCreate
975
+ *
976
+ * \version \NVTX_VERSION_2
977
+ */
978
+ typedef enum nvtxResourceGenericType_t
979
+ {
980
+ NVTX_RESOURCE_TYPE_UNKNOWN = 0,
981
+ NVTX_RESOURCE_TYPE_GENERIC_POINTER = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 1), /**< Generic pointer assumed to have no collisions with other pointers. */
982
+ NVTX_RESOURCE_TYPE_GENERIC_HANDLE = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 2), /**< Generic handle assumed to have no collisions with other handles. */
983
+ NVTX_RESOURCE_TYPE_GENERIC_THREAD_NATIVE = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 3), /**< OS native thread identifier. */
984
+ NVTX_RESOURCE_TYPE_GENERIC_THREAD_POSIX = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 4) /**< POSIX pthread identifier. */
985
+ } nvtxResourceGenericType_t;
986
+
987
+
988
+
989
+ /** \brief Resource Attribute Structure.
990
+ * \anchor RESOURCE_ATTRIBUTE_STRUCTURE
991
+ *
992
+ * This structure is used to describe the attributes of a resource. The layout of
993
+ * the structure is defined by a specific version of the tools extension
994
+ * library and can change between different versions of the Tools Extension
995
+ * library.
996
+ *
997
+ * \par Initializing the Attributes
998
+ *
999
+ * The caller should always perform the following three tasks when using
1000
+ * attributes:
1001
+ * <ul>
1002
+ * <li>Zero the structure
1003
+ * <li>Set the version field
1004
+ * <li>Set the size field
1005
+ * </ul>
1006
+ *
1007
+ * Zeroing the structure sets all the resource attributes types and values
1008
+ * to the default value.
1009
+ *
1010
+ * The version and size field are used by the Tools Extension
1011
+ * implementation to handle multiple versions of the attributes structure.
1012
+ *
1013
+ * It is recommended that the caller use one of the following to methods
1014
+ * to initialize the event attributes structure:
1015
+ *
1016
+ * \par Method 1: Initializing nvtxEventAttributes for future compatibility
1017
+ * \code
1018
+ * nvtxResourceAttributes_t attribs = {0};
1019
+ * attribs.version = NVTX_VERSION;
1020
+ * attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE;
1021
+ * \endcode
1022
+ *
1023
+ * \par Method 2: Initializing nvtxEventAttributes for a specific version
1024
+ * \code
1025
+ * nvtxResourceAttributes_v0 attribs = {0};
1026
+ * attribs.version = 2;
1027
+ * attribs.size = (uint16_t)(sizeof(nvtxResourceAttributes_v0));
1028
+ * \endcode
1029
+ *
1030
+ * If the caller uses Method 1 it is critical that the entire binary
1031
+ * layout of the structure be configured to 0 so that all fields
1032
+ * are initialized to the default value.
1033
+ *
1034
+ * The caller should either use both NVTX_VERSION and
1035
+ * NVTX_RESOURCE_ATTRIB_STRUCT_SIZE (Method 1) or use explicit values
1036
+ * and a versioned type (Method 2). Using a mix of the two methods
1037
+ * will likely cause either source level incompatibility or binary
1038
+ * incompatibility in the future.
1039
+ *
1040
+ * \par Settings Attribute Types and Values
1041
+ *
1042
+ *
1043
+ * \par Example:
1044
+ * \code
1045
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain");
1046
+ *
1047
+ * // Initialize
1048
+ * nvtxResourceAttributes_t attribs = {0};
1049
+ * attribs.version = NVTX_VERSION;
1050
+ * attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE;
1051
+ *
1052
+ * // Configure the Attributes
1053
+ * attribs.identifierType = NVTX_RESOURCE_TYPE_GENERIC_POINTER;
1054
+ * attribs.identifier.pValue = (const void*)pMutex;
1055
+ * attribs.messageType = NVTX_MESSAGE_TYPE_ASCII;
1056
+ * attribs.message.ascii = "Single thread access to database.";
1057
+ *
1058
+ * nvtxResourceHandle_t handle = nvtxDomainResourceCreate(domain, attribs);
1059
+ * \endcode
1060
+ *
1061
+ * \sa
1062
+ * ::nvtxDomainResourceCreate
1063
+ */
1064
+ typedef struct nvtxResourceAttributes_v0
1065
+ {
1066
+ /**
1067
+ * \brief Version flag of the structure.
1068
+ *
1069
+ * Needs to be set to NVTX_VERSION to indicate the version of NVTX APIs
1070
+ * supported in this header file. This can optionally be overridden to
1071
+ * another version of the tools extension library.
1072
+ */
1073
+ uint16_t version;
1074
+
1075
+ /**
1076
+ * \brief Size of the structure.
1077
+ *
1078
+ * Needs to be set to the size in bytes of this attribute
1079
+ * structure.
1080
+ */
1081
+ uint16_t size;
1082
+
1083
+ /**
1084
+ * \brief Identifier type specifies how to interpret the identifier field
1085
+ *
1086
+ * Defines the identifier format of the attribute structure's \ref RESOURCE_IDENTIFIER_FIELD
1087
+ * "identifier" field.
1088
+ *
1089
+ * Default Value is NVTX_RESOURCE_TYPE_UNKNOWN
1090
+ */
1091
+ int32_t identifierType; /* values from enums following the pattern nvtxResource[name]Type_t */
1092
+
1093
+ /**
1094
+ * \brief Identifier for the resource.
1095
+ * \anchor RESOURCE_IDENTIFIER_FIELD
1096
+ *
1097
+ * An identifier may be a pointer or a handle to an OS or middleware API object.
1098
+ * The resource type will assist in avoiding collisions where handles values may collide.
1099
+ */
1100
+ union identifier_t
1101
+ {
1102
+ const void* pValue;
1103
+ uint64_t ullValue;
1104
+ } identifier;
1105
+
1106
+ /** \brief Message type specified in this attribute structure.
1107
+ *
1108
+ * Defines the message format of the attribute structure's \ref RESOURCE_MESSAGE_FIELD
1109
+ * "message" field.
1110
+ *
1111
+ * Default Value is NVTX_MESSAGE_UNKNOWN
1112
+ */
1113
+ int32_t messageType; /* nvtxMessageType_t */
1114
+
1115
+ /** \brief Message assigned to this attribute structure. \anchor RESOURCE_MESSAGE_FIELD
1116
+ *
1117
+ * The text message that is attached to a resource.
1118
+ */
1119
+ nvtxMessageValue_t message;
1120
+
1121
+ } nvtxResourceAttributes_v0;
1122
+
1123
+ typedef struct nvtxResourceAttributes_v0 nvtxResourceAttributes_t;
1124
+
1125
+ /* \cond SHOW_HIDDEN
1126
+ * \version \NVTX_VERSION_2
1127
+ */
1128
+ #define NVTX_RESOURCE_ATTRIB_STRUCT_SIZE ( (uint16_t)( sizeof(nvtxResourceAttributes_v0) ) )
1129
+ typedef struct nvtxResourceHandle* nvtxResourceHandle_t;
1130
+ /** \endcond */
1131
+
1132
+
1133
+
1134
+ /* ------------------------------------------------------------------------- */
1135
+ /** \brief Create a resource object to track and associate data with OS and middleware objects
1136
+ *
1137
+ * Allows users to associate an API handle or pointer with a user-provided name.
1138
+ *
1139
+ *
1140
+ * \param domain - Domain to own the resource object
1141
+ * \param attribs - Attributes to be associated with the resource
1142
+ *
1143
+ * \return A handle that represents the newly created resource object.
1144
+ *
1145
+ * \par Example:
1146
+ * \code
1147
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain");
1148
+ * nvtxResourceAttributes_t attribs = {0};
1149
+ * attribs.version = NVTX_VERSION;
1150
+ * attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE;
1151
+ * attribs.identifierType = NVTX_RESOURCE_TYPE_GENERIC_POINTER;
1152
+ * attribs.identifier.pValue = (const void*)pMutex;
1153
+ * attribs.messageType = NVTX_MESSAGE_TYPE_ASCII;
1154
+ * attribs.message.ascii = "Single thread access to database.";
1155
+ * nvtxResourceHandle_t handle = nvtxDomainResourceCreate(domain, attribs);
1156
+ * \endcode
1157
+ *
1158
+ * \sa
1159
+ * ::nvtxResourceAttributes_t
1160
+ * ::nvtxDomainResourceDestroy
1161
+ *
1162
+ * \version \NVTX_VERSION_2
1163
+ * @{ */
1164
+ NVTX_DECLSPEC nvtxResourceHandle_t NVTX_API nvtxDomainResourceCreate(nvtxDomainHandle_t domain, nvtxResourceAttributes_t* attribs);
1165
+ /** @} */
1166
+
1167
+ /* ------------------------------------------------------------------------- */
1168
+ /** \brief Destroy a resource object to track and associate data with OS and middleware objects
1169
+ *
1170
+ * Allows users to associate an API handle or pointer with a user-provided name.
1171
+ *
1172
+ * \param resource - Handle to the resource in which to operate.
1173
+ *
1174
+ * \par Example:
1175
+ * \code
1176
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain");
1177
+ * nvtxResourceAttributes_t attribs = {0};
1178
+ * attribs.version = NVTX_VERSION;
1179
+ * attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE;
1180
+ * attribs.identifierType = NVTX_RESOURCE_TYPE_GENERIC_POINTER;
1181
+ * attribs.identifier.pValue = (const void*)pMutex;
1182
+ * attribs.messageType = NVTX_MESSAGE_TYPE_ASCII;
1183
+ * attribs.message.ascii = "Single thread access to database.";
1184
+ * nvtxResourceHandle_t handle = nvtxDomainResourceCreate(domain, attribs);
1185
+ * nvtxDomainResourceDestroy(handle);
1186
+ * \endcode
1187
+ *
1188
+ * \sa
1189
+ * ::nvtxDomainResourceCreate
1190
+ *
1191
+ * \version \NVTX_VERSION_2
1192
+ * @{ */
1193
+ NVTX_DECLSPEC void NVTX_API nvtxDomainResourceDestroy(nvtxResourceHandle_t resource);
1194
+ /** @} */
1195
+
1196
+
1197
+ /** \name Functions for NVTX Category Naming*/
1198
+
1199
+ /* ------------------------------------------------------------------------- */
1200
+ /**
1201
+ * \brief Annotate an NVTX category used within a domain.
1202
+ *
1203
+ * Categories are used to group sets of events. Each category is identified
1204
+ * through a unique ID and that ID is passed into any of the marker/range
1205
+ * events to assign that event to a specific category. The nvtxDomainNameCategory
1206
+ * function calls allow the user to assign a name to a category ID that is
1207
+ * specific to the domain.
1208
+ *
1209
+ * nvtxDomainNameCategory(NULL, category, name) is equivalent to calling
1210
+ * nvtxNameCategory(category, name).
1211
+ *
1212
+ * \param domain - The domain of scoping the category.
1213
+ * \param category - The category ID to name.
1214
+ * \param name - The name of the category.
1215
+ *
1216
+ * \remarks The category names are tracked per domain.
1217
+ *
1218
+ * \par Example:
1219
+ * \code
1220
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("example");
1221
+ * nvtxDomainNameCategoryA(domain, 1, "Memory Allocation");
1222
+ * nvtxDomainNameCategoryW(domain, 2, L"Memory Transfer");
1223
+ * \endcode
1224
+ *
1225
+ * \version \NVTX_VERSION_2
1226
+ * @{ */
1227
+ NVTX_DECLSPEC void NVTX_API nvtxDomainNameCategoryA(nvtxDomainHandle_t domain, uint32_t category, const char* name);
1228
+ NVTX_DECLSPEC void NVTX_API nvtxDomainNameCategoryW(nvtxDomainHandle_t domain, uint32_t category, const wchar_t* name);
1229
+ /** @} */
1230
+
1231
+ /** \brief Annotate an NVTX category.
1232
+ *
1233
+ * Categories are used to group sets of events. Each category is identified
1234
+ * through a unique ID and that ID is passed into any of the marker/range
1235
+ * events to assign that event to a specific category. The nvtxNameCategory
1236
+ * function calls allow the user to assign a name to a category ID.
1237
+ *
1238
+ * \param category - The category ID to name.
1239
+ * \param name - The name of the category.
1240
+ *
1241
+ * \remarks The category names are tracked per process.
1242
+ *
1243
+ * \par Example:
1244
+ * \code
1245
+ * nvtxNameCategory(1, "Memory Allocation");
1246
+ * nvtxNameCategory(2, "Memory Transfer");
1247
+ * nvtxNameCategory(3, "Memory Object Lifetime");
1248
+ * \endcode
1249
+ *
1250
+ * \version \NVTX_VERSION_1
1251
+ * @{ */
1252
+ NVTX_DECLSPEC void NVTX_API nvtxNameCategoryA(uint32_t category, const char* name);
1253
+ NVTX_DECLSPEC void NVTX_API nvtxNameCategoryW(uint32_t category, const wchar_t* name);
1254
+ /** @} */
1255
+
1256
+ /** \name Functions for OS Threads Naming*/
1257
+
1258
+ /* ------------------------------------------------------------------------- */
1259
+ /** \brief Annotate an OS thread.
1260
+ *
1261
+ * Allows the user to name an active thread of the current process. If an
1262
+ * invalid thread ID is provided or a thread ID from a different process is
1263
+ * used the behavior of the tool is implementation dependent.
1264
+ *
1265
+ * Tools expect thread ID to be a number that uniquely identifies the thread
1266
+ * at the time of the call. Note that a thread's ID can be reused after
1267
+ * it is destroyed. Tools may choose how to handle aliasing of thread IDs.
1268
+ *
1269
+ * POSIX pthread_t type returned by pthread_self() may not comply with these
1270
+ * expectations. Please use OS-specific thread ID instead of pthread_t.
1271
+ *
1272
+ * The thread name is associated to the default domain. To support domains
1273
+ * use resource objects via ::nvtxDomainResourceCreate.
1274
+ *
1275
+ * \param threadId - The ID of the thread to name.
1276
+ * \param name - The name of the thread.
1277
+ *
1278
+ * \par Examples:
1279
+ * MS Windows:
1280
+ * \code
1281
+ * #include <windows.h>
1282
+ * nvtxNameOsThread(GetCurrentThreadId(), "Current thread");
1283
+ * nvtxNameOsThread(GetThreadId(SomeThreadHandle), "Other thread");
1284
+ * \endcode
1285
+ *
1286
+ * Android:
1287
+ * \code
1288
+ * #include <unistd.h>
1289
+ * nvtxNameOsThreadA(gettid(), "Current thread");
1290
+ * nvtxNameOsThreadA(getpid(), "Main thread");
1291
+ * \endcode
1292
+ *
1293
+ * Linux:
1294
+ * \code
1295
+ * #include <sys/syscall.h>
1296
+ * nvtxNameOsThreadA(syscall(SYS_gettid), "Current thread");
1297
+ * \endcode
1298
+ * \code
1299
+ * #include <unistd.h>
1300
+ * nvtxNameOsThreadA(getpid(), "Main thread");
1301
+ * \endcode
1302
+ *
1303
+ * OS X:
1304
+ * \code
1305
+ * #include <sys/syscall.h>
1306
+ * nvtxNameOsThreadA(syscall(SYS_thread_selfid), "Current thread");
1307
+ * \endcode
1308
+ * \code
1309
+ * #include <pthread.h>
1310
+ * __uint64_t id;
1311
+ * pthread_threadid_np(pthread_self(), &id);
1312
+ * nvtxNameOsThreadA(id, "Current thread");
1313
+ * pthread_threadid_np(somePThreadId, &id);
1314
+ * nvtxNameOsThreadA(id, "Other thread");
1315
+ * \endcode
1316
+ *
1317
+ * \version \NVTX_VERSION_1
1318
+ * @{ */
1319
+ NVTX_DECLSPEC void NVTX_API nvtxNameOsThreadA(uint32_t threadId, const char* name);
1320
+ NVTX_DECLSPEC void NVTX_API nvtxNameOsThreadW(uint32_t threadId, const wchar_t* name);
1321
+ /** @} */
1322
+
1323
+
1324
+ /** @} */ /*END defgroup*/
1325
+ /* ========================================================================= */
1326
+ /** \defgroup STRING_REGISTRATION String Registration
1327
+ *
1328
+ * Registered strings are intended to increase performance by lowering instrumentation
1329
+ * overhead. String may be registered once and the handle may be passed in place of
1330
+ * a string where an the APIs may allow.
1331
+ *
1332
+ * See \ref STRING_REGISTRATION for more details
1333
+ *
1334
+ * @{
1335
+ */
1336
+
1337
+ /* ------------------------------------------------------------------------- */
1338
+ /** \brief Register a string.
1339
+
1340
+ * Registers an immutable string with NVTX. Once registered the pointer used
1341
+ * to register the domain name can be used in nvtxEventAttributes_t
1342
+ * \ref MESSAGE_FIELD. This allows NVTX implementation to skip copying the
1343
+ * contents of the message on each event invocation.
1344
+ *
1345
+ * String registration is an optimization. It is recommended to use string
1346
+ * registration if the string will be passed to an event many times.
1347
+ *
1348
+ * String are not unregistered, except that by unregistering the entire domain
1349
+ *
1350
+ * \param domain - Domain handle. If NULL then the global domain is used.
1351
+ * \param string - A unique pointer to a sequence of characters.
1352
+ *
1353
+ * \return A handle representing the registered string.
1354
+ *
1355
+ * \par Example:
1356
+ * \code
1357
+ * nvtxDomainCreateA("com.nvidia.nvtx.example");
1358
+ * nvtxStringHandle_t message = nvtxDomainRegisterStringA(domain, "registered string");
1359
+ * nvtxEventAttributes_t eventAttrib = {0};
1360
+ * eventAttrib.version = NVTX_VERSION;
1361
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
1362
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_REGISTERED;
1363
+ * eventAttrib.message.registered = message;
1364
+ * \endcode
1365
+ *
1366
+ * \version \NVTX_VERSION_2
1367
+ * @{ */
1368
+ NVTX_DECLSPEC nvtxStringHandle_t NVTX_API nvtxDomainRegisterStringA(nvtxDomainHandle_t domain, const char* string);
1369
+ NVTX_DECLSPEC nvtxStringHandle_t NVTX_API nvtxDomainRegisterStringW(nvtxDomainHandle_t domain, const wchar_t* string);
1370
+ /** @} */
1371
+
1372
+ /** @} */ /*END defgroup*/
1373
+ /* ========================================================================= */
1374
+ /** \defgroup DOMAINS Domains
1375
+ *
1376
+ * Domains are used to group events to a developer defined scope. Middleware
1377
+ * vendors may also scope their own events to avoid collisions with the
1378
+ * the application developer's events, so that the application developer may
1379
+ * inspect both parts and easily differentiate or filter them. By default
1380
+ * all events are scoped to a global domain where NULL is provided or when
1381
+ * using APIs provided b versions of NVTX below v2
1382
+ *
1383
+ * Domains are intended to be typically long lived objects with the intention
1384
+ * of logically separating events of large modules from each other such as
1385
+ * middleware libraries from each other and the main application.
1386
+ *
1387
+ * See \ref DOMAINS for more details
1388
+ *
1389
+ * @{
1390
+ */
1391
+
1392
+ /* ------------------------------------------------------------------------- */
1393
+ /** \brief Register a NVTX domain.
1394
+ *
1395
+ * Domains are used to scope annotations. All NVTX_VERSION_0 and NVTX_VERSION_1
1396
+ * annotations are scoped to the global domain. The function nvtxDomainCreate
1397
+ * creates a new named domain.
1398
+ *
1399
+ * Each domain maintains its own nvtxRangePush and nvtxRangePop stack.
1400
+ *
1401
+ * \param name - A unique string representing the domain.
1402
+ *
1403
+ * \return A handle representing the domain.
1404
+ *
1405
+ * \par Example:
1406
+ * \code
1407
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("com.nvidia.nvtx.example");
1408
+ *
1409
+ * nvtxMarkA("nvtxMarkA to global domain");
1410
+ *
1411
+ * nvtxEventAttributes_t eventAttrib1 = {0};
1412
+ * eventAttrib1.version = NVTX_VERSION;
1413
+ * eventAttrib1.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
1414
+ * eventAttrib1.message.ascii = "nvtxDomainMarkEx to global domain";
1415
+ * nvtxDomainMarkEx(NULL, &eventAttrib1);
1416
+ *
1417
+ * nvtxEventAttributes_t eventAttrib2 = {0};
1418
+ * eventAttrib2.version = NVTX_VERSION;
1419
+ * eventAttrib2.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
1420
+ * eventAttrib2.message.ascii = "nvtxDomainMarkEx to com.nvidia.nvtx.example";
1421
+ * nvtxDomainMarkEx(domain, &eventAttrib2);
1422
+ * nvtxDomainDestroy(domain);
1423
+ * \endcode
1424
+ *
1425
+ * \sa
1426
+ * ::nvtxDomainDestroy
1427
+ *
1428
+ * \version \NVTX_VERSION_2
1429
+ * @{ */
1430
+ NVTX_DECLSPEC nvtxDomainHandle_t NVTX_API nvtxDomainCreateA(const char* name);
1431
+ NVTX_DECLSPEC nvtxDomainHandle_t NVTX_API nvtxDomainCreateW(const wchar_t* name);
1432
+ /** @} */
1433
+
1434
+ /* ------------------------------------------------------------------------- */
1435
+ /** \brief Unregister a NVTX domain.
1436
+ *
1437
+ * Unregisters the domain handle and frees all domain specific resources.
1438
+ *
1439
+ * \param domain - the domain handle
1440
+ *
1441
+ * \par Example:
1442
+ * \code
1443
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("com.nvidia.nvtx.example");
1444
+ * nvtxDomainDestroy(domain);
1445
+ * \endcode
1446
+ *
1447
+ * \sa
1448
+ * ::nvtxDomainCreateA
1449
+ * ::nvtxDomainCreateW
1450
+ *
1451
+ * \version \NVTX_VERSION_2
1452
+ * @{ */
1453
+ NVTX_DECLSPEC void NVTX_API nvtxDomainDestroy(nvtxDomainHandle_t domain);
1454
+ /** @} */
1455
+
1456
+
1457
+ /** @} */ /*END defgroup*/
1458
+ /* ========================================================================= */
1459
+ /** \cond SHOW_HIDDEN */
1460
+
1461
+ #ifdef UNICODE
1462
+ #define nvtxMark nvtxMarkW
1463
+ #define nvtxRangeStart nvtxRangeStartW
1464
+ #define nvtxRangePush nvtxRangePushW
1465
+ #define nvtxNameCategory nvtxNameCategoryW
1466
+ #define nvtxNameOsThread nvtxNameOsThreadW
1467
+ /* NVTX_VERSION_2 */
1468
+ #define nvtxDomainCreate nvtxDomainCreateW
1469
+ #define nvtxDomainRegisterString nvtxDomainRegisterStringW
1470
+ #define nvtxDomainNameCategory nvtxDomainNameCategoryW
1471
+ #else
1472
+ #define nvtxMark nvtxMarkA
1473
+ #define nvtxRangeStart nvtxRangeStartA
1474
+ #define nvtxRangePush nvtxRangePushA
1475
+ #define nvtxNameCategory nvtxNameCategoryA
1476
+ #define nvtxNameOsThread nvtxNameOsThreadA
1477
+ /* NVTX_VERSION_2 */
1478
+ #define nvtxDomainCreate nvtxDomainCreateA
1479
+ #define nvtxDomainRegisterString nvtxDomainRegisterStringA
1480
+ #define nvtxDomainNameCategory nvtxDomainNameCategoryA
1481
+ #endif
1482
+
1483
+ /** \endcond */
1484
+
1485
+ #ifdef __cplusplus
1486
+ } /* extern "C" */
1487
+ #endif /* __cplusplus */
1488
+
1489
+ #define NVTX_IMPL_GUARD /* Ensure other headers cannot included directly */
1490
+
1491
+ #include "nvtxDetail/nvtxTypes.h"
1492
+
1493
+ #ifndef NVTX_NO_IMPL
1494
+ #include "nvtxDetail/nvtxImpl.h"
1495
+ #endif /*NVTX_NO_IMPL*/
1496
+
1497
+ #undef NVTX_IMPL_GUARD
1498
+
1499
+ #endif /* !defined(NVTX_VERSION) */
evalkit_tf437/lib/python3.10/site-packages/nvidia/nvtx/include/nvtx3/nvToolsExtCuda.h ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2009-2016 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO USER:
5
+ *
6
+ * This source code is subject to NVIDIA ownership rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * This software and the information contained herein is PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
11
+ * of a form of NVIDIA software license agreement.
12
+ *
13
+ * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
14
+ * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
15
+ * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
16
+ * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
17
+ * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
18
+ * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
19
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
20
+ * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
21
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
22
+ * OR PERFORMANCE OF THIS SOURCE CODE.
23
+ *
24
+ * U.S. Government End Users. This source code is a "commercial item" as
25
+ * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
26
+ * "commercial computer software" and "commercial computer software
27
+ * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
28
+ * and is provided to the U.S. Government only as a commercial end item.
29
+ * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
30
+ * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
31
+ * source code with only those rights set forth herein.
32
+ *
33
+ * Any use of this source code in individual and commercial software must
34
+ * include, in the user documentation and internal comments to the code,
35
+ * the above Disclaimer and U.S. Government End Users Notice.
36
+ */
37
+
38
+ #include "nvToolsExt.h"
39
+
40
+ #include "cuda.h"
41
+
42
+ #ifndef NVTOOLSEXT_CUDA_V3
43
+ #define NVTOOLSEXT_CUDA_V3
44
+
45
+ #ifdef __cplusplus
46
+ extern "C" {
47
+ #endif /* __cplusplus */
48
+
49
+ /* ========================================================================= */
50
+ /** \name Functions for CUDA Resource Naming
51
+ */
52
+ /** \addtogroup RESOURCE_NAMING
53
+ * \section RESOURCE_NAMING_CUDA CUDA Resource Naming
54
+ *
55
+ * This section covers the API functions that allow to annotate CUDA resources
56
+ * with user-provided names.
57
+ *
58
+ * @{
59
+ */
60
+
61
+ /* ------------------------------------------------------------------------- */
62
+ /* \cond SHOW_HIDDEN
63
+ * \brief Used to build a non-colliding value for resource types separated class
64
+ * \version \NVTX_VERSION_2
65
+ */
66
+ #define NVTX_RESOURCE_CLASS_CUDA 4
67
+ /** \endcond */
68
+
69
+ /* ------------------------------------------------------------------------- */
70
+ /** \brief Resource types for CUDA
71
+ */
72
+ typedef enum nvtxResourceCUDAType_t
73
+ {
74
+ NVTX_RESOURCE_TYPE_CUDA_DEVICE = NVTX_RESOURCE_MAKE_TYPE(CUDA, 1), /* CUdevice */
75
+ NVTX_RESOURCE_TYPE_CUDA_CONTEXT = NVTX_RESOURCE_MAKE_TYPE(CUDA, 2), /* CUcontext */
76
+ NVTX_RESOURCE_TYPE_CUDA_STREAM = NVTX_RESOURCE_MAKE_TYPE(CUDA, 3), /* CUstream */
77
+ NVTX_RESOURCE_TYPE_CUDA_EVENT = NVTX_RESOURCE_MAKE_TYPE(CUDA, 4), /* CUevent */
78
+ } nvtxResourceCUDAType_t;
79
+
80
+
81
+ /* ------------------------------------------------------------------------- */
82
+ /** \brief Annotates a CUDA device.
83
+ *
84
+ * Allows the user to associate a CUDA device with a user-provided name.
85
+ *
86
+ * \param device - The handle of the CUDA device to name.
87
+ * \param name - The name of the CUDA device.
88
+ *
89
+ * \version \NVTX_VERSION_1
90
+ * @{ */
91
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuDeviceA(CUdevice device, const char* name);
92
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuDeviceW(CUdevice device, const wchar_t* name);
93
+ /** @} */
94
+
95
+ /* ------------------------------------------------------------------------- */
96
+ /** \brief Annotates a CUDA context.
97
+ *
98
+ * Allows the user to associate a CUDA context with a user-provided name.
99
+ *
100
+ * \param context - The handle of the CUDA context to name.
101
+ * \param name - The name of the CUDA context.
102
+ *
103
+ * \par Example:
104
+ * \code
105
+ * CUresult status = cuCtxCreate( &cuContext, 0, cuDevice );
106
+ * if ( CUDA_SUCCESS != status )
107
+ * goto Error;
108
+ * nvtxNameCuContext(cuContext, "CTX_NAME");
109
+ * \endcode
110
+ *
111
+ * \version \NVTX_VERSION_1
112
+ * @{ */
113
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuContextA(CUcontext context, const char* name);
114
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuContextW(CUcontext context, const wchar_t* name);
115
+ /** @} */
116
+
117
+ /* ------------------------------------------------------------------------- */
118
+ /** \brief Annotates a CUDA stream.
119
+ *
120
+ * Allows the user to associate a CUDA stream with a user-provided name.
121
+ *
122
+ * \param stream - The handle of the CUDA stream to name.
123
+ * \param name - The name of the CUDA stream.
124
+ *
125
+ * \version \NVTX_VERSION_1
126
+ * @{ */
127
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuStreamA(CUstream stream, const char* name);
128
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuStreamW(CUstream stream, const wchar_t* name);
129
+ /** @} */
130
+
131
+ /* ------------------------------------------------------------------------- */
132
+ /** \brief Annotates a CUDA event.
133
+ *
134
+ * Allows the user to associate a CUDA event with a user-provided name.
135
+ *
136
+ * \param event - The handle of the CUDA event to name.
137
+ * \param name - The name of the CUDA event.
138
+ *
139
+ * \version \NVTX_VERSION_1
140
+ * @{ */
141
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuEventA(CUevent event, const char* name);
142
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuEventW(CUevent event, const wchar_t* name);
143
+ /** @} */
144
+
145
+ /** @} */ /* END RESOURCE_NAMING */
146
+
147
+ /* ========================================================================= */
148
+ #ifdef UNICODE
149
+ #define nvtxNameCuDevice nvtxNameCuDeviceW
150
+ #define nvtxNameCuContext nvtxNameCuContextW
151
+ #define nvtxNameCuStream nvtxNameCuStreamW
152
+ #define nvtxNameCuEvent nvtxNameCuEventW
153
+ #else
154
+ #define nvtxNameCuDevice nvtxNameCuDeviceA
155
+ #define nvtxNameCuContext nvtxNameCuContextA
156
+ #define nvtxNameCuStream nvtxNameCuStreamA
157
+ #define nvtxNameCuEvent nvtxNameCuEventA
158
+ #endif
159
+
160
+ #ifdef __cplusplus
161
+ }
162
+ #endif /* __cplusplus */
163
+
164
+ #ifndef NVTX_NO_IMPL
165
+ #define NVTX_IMPL_GUARD_CUDA /* Ensure other headers cannot included directly */
166
+ #include "nvtxDetail/nvtxImplCuda_v3.h"
167
+ #undef NVTX_IMPL_GUARD_CUDA
168
+ #endif /*NVTX_NO_IMPL*/
169
+
170
+ #endif /* NVTOOLSEXT_CUDA_V3 */
evalkit_tf437/lib/python3.10/site-packages/nvidia/nvtx/include/nvtx3/nvToolsExtOpenCL.h ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2009-2016 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO USER:
5
+ *
6
+ * This source code is subject to NVIDIA ownership rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * This software and the information contained herein is PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
11
+ * of a form of NVIDIA software license agreement.
12
+ *
13
+ * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
14
+ * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
15
+ * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
16
+ * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
17
+ * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
18
+ * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
19
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
20
+ * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
21
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
22
+ * OR PERFORMANCE OF THIS SOURCE CODE.
23
+ *
24
+ * U.S. Government End Users. This source code is a "commercial item" as
25
+ * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
26
+ * "commercial computer software" and "commercial computer software
27
+ * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
28
+ * and is provided to the U.S. Government only as a commercial end item.
29
+ * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
30
+ * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
31
+ * source code with only those rights set forth herein.
32
+ *
33
+ * Any use of this source code in individual and commercial software must
34
+ * include, in the user documentation and internal comments to the code,
35
+ * the above Disclaimer and U.S. Government End Users Notice.
36
+ */
37
+
38
+ #include "nvToolsExt.h"
39
+
40
+ #include <CL/cl.h>
41
+
42
+ #ifndef NVTOOLSEXT_OPENCL_V3
43
+ #define NVTOOLSEXT_OPENCL_V3
44
+
45
+ #ifdef __cplusplus
46
+ extern "C" {
47
+ #endif /* __cplusplus */
48
+
49
+ /* ========================================================================= */
50
+ /** \name Functions for OpenCL Resource Naming
51
+ */
52
+ /** \addtogroup RESOURCE_NAMING
53
+ * \section RESOURCE_NAMING_OPENCL OpenCL Resource Naming
54
+ *
55
+ * This section covers the API functions that allow to annotate OpenCL resources
56
+ * with user-provided names.
57
+ *
58
+ * @{
59
+ */
60
+
61
+ /* ------------------------------------------------------------------------- */
62
+ /* \cond SHOW_HIDDEN
63
+ * \brief Used to build a non-colliding value for resource types separated class
64
+ * \version \NVTX_VERSION_2
65
+ */
66
+ #define NVTX_RESOURCE_CLASS_OPENCL 6
67
+ /** \endcond */
68
+
69
+ /* ------------------------------------------------------------------------- */
70
+ /** \brief Resource types for OpenCL
71
+ */
72
+ typedef enum nvtxResourceOpenCLType_t
73
+ {
74
+ NVTX_RESOURCE_TYPE_OPENCL_DEVICE = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 1),
75
+ NVTX_RESOURCE_TYPE_OPENCL_CONTEXT = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 2),
76
+ NVTX_RESOURCE_TYPE_OPENCL_COMMANDQUEUE = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 3),
77
+ NVTX_RESOURCE_TYPE_OPENCL_MEMOBJECT = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 4),
78
+ NVTX_RESOURCE_TYPE_OPENCL_SAMPLER = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 5),
79
+ NVTX_RESOURCE_TYPE_OPENCL_PROGRAM = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 6),
80
+ NVTX_RESOURCE_TYPE_OPENCL_EVENT = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 7),
81
+ } nvtxResourceOpenCLType_t;
82
+
83
+
84
+ /* ------------------------------------------------------------------------- */
85
+ /** \brief Annotates an OpenCL device.
86
+ *
87
+ * Allows to associate an OpenCL device with a user-provided name.
88
+ *
89
+ * \param device - The handle of the OpenCL device to name.
90
+ * \param name - The name of the OpenCL device.
91
+ *
92
+ * \version \NVTX_VERSION_1
93
+ * @{ */
94
+ NVTX_DECLSPEC void NVTX_API nvtxNameClDeviceA(cl_device_id device, const char* name);
95
+ NVTX_DECLSPEC void NVTX_API nvtxNameClDeviceW(cl_device_id device, const wchar_t* name);
96
+ /** @} */
97
+
98
+ /* ------------------------------------------------------------------------- */
99
+ /** \brief Annotates an OpenCL context.
100
+ *
101
+ * Allows to associate an OpenCL context with a user-provided name.
102
+ *
103
+ * \param context - The handle of the OpenCL context to name.
104
+ * \param name - The name of the OpenCL context.
105
+ *
106
+ * \version \NVTX_VERSION_1
107
+ * @{ */
108
+ NVTX_DECLSPEC void NVTX_API nvtxNameClContextA(cl_context context, const char* name);
109
+ NVTX_DECLSPEC void NVTX_API nvtxNameClContextW(cl_context context, const wchar_t* name);
110
+ /** @} */
111
+
112
+ /* ------------------------------------------------------------------------- */
113
+ /** \brief Annotates an OpenCL command queue.
114
+ *
115
+ * Allows to associate an OpenCL command queue with a user-provided name.
116
+ *
117
+ * \param command_queue - The handle of the OpenCL command queue to name.
118
+ * \param name - The name of the OpenCL command queue.
119
+ *
120
+ * \version \NVTX_VERSION_1
121
+ * @{ */
122
+ NVTX_DECLSPEC void NVTX_API nvtxNameClCommandQueueA(cl_command_queue command_queue, const char* name);
123
+ NVTX_DECLSPEC void NVTX_API nvtxNameClCommandQueueW(cl_command_queue command_queue, const wchar_t* name);
124
+ /** @} */
125
+
126
+ /* ------------------------------------------------------------------------- */
127
+ /** \brief Annotates an OpenCL memory object.
128
+ *
129
+ * Allows to associate an OpenCL memory object with a user-provided name.
130
+ *
131
+ * \param memobj - The handle of the OpenCL memory object to name.
132
+ * \param name - The name of the OpenCL memory object.
133
+ *
134
+ * \version \NVTX_VERSION_1
135
+ * @{ */
136
+ NVTX_DECLSPEC void NVTX_API nvtxNameClMemObjectA(cl_mem memobj, const char* name);
137
+ NVTX_DECLSPEC void NVTX_API nvtxNameClMemObjectW(cl_mem memobj, const wchar_t* name);
138
+ /** @} */
139
+
140
+ /* ------------------------------------------------------------------------- */
141
+ /** \brief Annotates an OpenCL sampler.
142
+ *
143
+ * Allows to associate an OpenCL sampler with a user-provided name.
144
+ *
145
+ * \param sampler - The handle of the OpenCL sampler to name.
146
+ * \param name - The name of the OpenCL sampler.
147
+ *
148
+ * \version \NVTX_VERSION_1
149
+ * @{ */
150
+ NVTX_DECLSPEC void NVTX_API nvtxNameClSamplerA(cl_sampler sampler, const char* name);
151
+ NVTX_DECLSPEC void NVTX_API nvtxNameClSamplerW(cl_sampler sampler, const wchar_t* name);
152
+ /** @} */
153
+
154
+ /* ------------------------------------------------------------------------- */
155
+ /** \brief Annotates an OpenCL program.
156
+ *
157
+ * Allows to associate an OpenCL program with a user-provided name.
158
+ *
159
+ * \param program - The handle of the OpenCL program to name.
160
+ * \param name - The name of the OpenCL program.
161
+ *
162
+ * \code
163
+ * cpProgram = clCreateProgramWithSource(cxGPUContext, 1,
164
+ * (const char **) &cSourceCL, &program_length, &ciErrNum);
165
+ * shrCheckErrorEX(ciErrNum, CL_SUCCESS, pCleanup);
166
+ * nvtxNameClProgram(cpProgram, L"PROGRAM_NAME");
167
+ * \endcode
168
+ *
169
+ * \version \NVTX_VERSION_1
170
+ * @{ */
171
+ NVTX_DECLSPEC void NVTX_API nvtxNameClProgramA(cl_program program, const char* name);
172
+ NVTX_DECLSPEC void NVTX_API nvtxNameClProgramW(cl_program program, const wchar_t* name);
173
+ /** @} */
174
+
175
+ /* ------------------------------------------------------------------------- */
176
+ /** \brief Annotates an OpenCL event.
177
+ *
178
+ * Allows to associate an OpenCL event with a user-provided name.
179
+ *
180
+ * \param evnt - The handle of the OpenCL event to name.
181
+ * \param name - The name of the OpenCL event.
182
+ *
183
+ * \version \NVTX_VERSION_1
184
+ * @{ */
185
+ NVTX_DECLSPEC void NVTX_API nvtxNameClEventA(cl_event evnt, const char* name);
186
+ NVTX_DECLSPEC void NVTX_API nvtxNameClEventW(cl_event evnt, const wchar_t* name);
187
+ /** @} */
188
+
189
+ /** @} */ /* END RESOURCE_NAMING */
190
+
191
+ /* ========================================================================= */
192
+ #ifdef UNICODE
193
+ #define nvtxNameClDevice nvtxNameClDeviceW
194
+ #define nvtxNameClContext nvtxNameClContextW
195
+ #define nvtxNameClCommandQueue nvtxNameClCommandQueueW
196
+ #define nvtxNameClMemObject nvtxNameClMemObjectW
197
+ #define nvtxNameClSampler nvtxNameClSamplerW
198
+ #define nvtxNameClProgram nvtxNameClProgramW
199
+ #define nvtxNameClEvent nvtxNameClEventW
200
+ #else
201
+ #define nvtxNameClDevice nvtxNameClDeviceA
202
+ #define nvtxNameClContext nvtxNameClContextA
203
+ #define nvtxNameClCommandQueue nvtxNameClCommandQueueA
204
+ #define nvtxNameClMemObject nvtxNameClMemObjectA
205
+ #define nvtxNameClSampler nvtxNameClSamplerA
206
+ #define nvtxNameClProgram nvtxNameClProgramA
207
+ #define nvtxNameClEvent nvtxNameClEventA
208
+ #endif
209
+
210
+ #ifdef __cplusplus
211
+ }
212
+ #endif /* __cplusplus */
213
+
214
+ #ifndef NVTX_NO_IMPL
215
+ #define NVTX_IMPL_GUARD_OPENCL /* Ensure other headers cannot included directly */
216
+ #include "nvtxDetail/nvtxImplOpenCL_v3.h"
217
+ #undef NVTX_IMPL_GUARD_OPENCL
218
+ #endif /*NVTX_NO_IMPL*/
219
+
220
+ #endif /* NVTOOLSEXT_OPENCL_V3 */
evalkit_tf437/lib/python3.10/site-packages/nvidia/nvtx/lib/libnvToolsExt.so.1 ADDED
Binary file (40.1 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/portalocker-2.10.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
evalkit_tf437/lib/python3.10/site-packages/portalocker-2.10.1.dist-info/METADATA ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: portalocker
3
+ Version: 2.10.1
4
+ Summary: Wraps the portalocker recipe for easy usage
5
+ Author-email: Rick van Hattem <wolph@wol.ph>
6
+ License: BSD-3-Clause
7
+ Project-URL: bugs, https://github.com/wolph/portalocker/issues
8
+ Project-URL: documentation, https://portalocker.readthedocs.io/en/latest/
9
+ Project-URL: repository, https://github.com/wolph/portalocker/
10
+ Keywords: locking,locks,with,statement,windows,linux,unix
11
+ Platform: any
12
+ Classifier: Development Status :: 5 - Production/Stable
13
+ Classifier: Development Status :: 6 - Mature
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: License :: OSI Approved :: BSD License
16
+ Classifier: Natural Language :: English
17
+ Classifier: Operating System :: MacOS :: MacOS X
18
+ Classifier: Operating System :: MacOS
19
+ Classifier: Operating System :: Microsoft :: MS-DOS
20
+ Classifier: Operating System :: Microsoft :: Windows
21
+ Classifier: Operating System :: Microsoft
22
+ Classifier: Operating System :: POSIX :: BSD :: FreeBSD
23
+ Classifier: Operating System :: POSIX :: BSD
24
+ Classifier: Operating System :: POSIX :: Linux
25
+ Classifier: Operating System :: POSIX :: SunOS/Solaris
26
+ Classifier: Operating System :: POSIX
27
+ Classifier: Operating System :: Unix
28
+ Classifier: Programming Language :: Python :: 3 :: Only
29
+ Classifier: Programming Language :: Python :: 3
30
+ Classifier: Programming Language :: Python :: 3.10
31
+ Classifier: Programming Language :: Python :: 3.11
32
+ Classifier: Programming Language :: Python :: 3.12
33
+ Classifier: Programming Language :: Python :: 3.8
34
+ Classifier: Programming Language :: Python :: 3.9
35
+ Classifier: Programming Language :: Python :: Implementation :: CPython
36
+ Classifier: Programming Language :: Python :: Implementation :: IronPython
37
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
38
+ Classifier: Programming Language :: Python :: Implementation
39
+ Classifier: Programming Language :: Python
40
+ Classifier: Topic :: Education :: Testing
41
+ Classifier: Topic :: Office/Business
42
+ Classifier: Topic :: Other/Nonlisted Topic
43
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
44
+ Classifier: Topic :: Software Development :: Libraries
45
+ Classifier: Topic :: System :: Monitoring
46
+ Requires-Python: >=3.8
47
+ Description-Content-Type: text/x-rst
48
+ License-File: LICENSE
49
+ Requires-Dist: pywin32 >=226 ; platform_system == "Windows"
50
+ Provides-Extra: docs
51
+ Requires-Dist: sphinx >=1.7.1 ; extra == 'docs'
52
+ Provides-Extra: redis
53
+ Requires-Dist: redis ; extra == 'redis'
54
+ Provides-Extra: tests
55
+ Requires-Dist: pytest >=5.4.1 ; extra == 'tests'
56
+ Requires-Dist: pytest-cov >=2.8.1 ; extra == 'tests'
57
+ Requires-Dist: pytest-timeout >=2.1.0 ; extra == 'tests'
58
+ Requires-Dist: sphinx >=6.0.0 ; extra == 'tests'
59
+ Requires-Dist: pytest-mypy >=0.8.0 ; extra == 'tests'
60
+ Requires-Dist: types-redis ; extra == 'tests'
61
+ Requires-Dist: redis ; extra == 'tests'
62
+
63
+ ############################################
64
+ portalocker - Cross-platform locking library
65
+ ############################################
66
+
67
+ .. image:: https://github.com/WoLpH/portalocker/actions/workflows/python-package.yml/badge.svg?branch=master
68
+ :alt: Linux Test Status
69
+ :target: https://github.com/WoLpH/portalocker/actions/
70
+
71
+ .. image:: https://ci.appveyor.com/api/projects/status/mgqry98hgpy4prhh?svg=true
72
+ :alt: Windows Tests Status
73
+ :target: https://ci.appveyor.com/project/WoLpH/portalocker
74
+
75
+ .. image:: https://coveralls.io/repos/WoLpH/portalocker/badge.svg?branch=master
76
+ :alt: Coverage Status
77
+ :target: https://coveralls.io/r/WoLpH/portalocker?branch=master
78
+
79
+ Overview
80
+ --------
81
+
82
+ Portalocker is a library to provide an easy API to file locking.
83
+
84
+ An important detail to note is that on Linux and Unix systems the locks are
85
+ advisory by default. By specifying the `-o mand` option to the mount command it
86
+ is possible to enable mandatory file locking on Linux. This is generally not
87
+ recommended however. For more information about the subject:
88
+
89
+ - https://en.wikipedia.org/wiki/File_locking
90
+ - http://stackoverflow.com/questions/39292051/portalocker-does-not-seem-to-lock
91
+ - https://stackoverflow.com/questions/12062466/mandatory-file-lock-on-linux
92
+
93
+ The module is currently maintained by Rick van Hattem <Wolph@wol.ph>.
94
+ The project resides at https://github.com/WoLpH/portalocker . Bugs and feature
95
+ requests can be submitted there. Patches are also very welcome.
96
+
97
+ Security contact information
98
+ ------------------------------------------------------------------------------
99
+
100
+ To report a security vulnerability, please use the
101
+ `Tidelift security contact <https://tidelift.com/security>`_.
102
+ Tidelift will coordinate the fix and disclosure.
103
+
104
+ Redis Locks
105
+ -----------
106
+
107
+ This library now features a lock based on Redis which allows for locks across
108
+ multiple threads, processes and even distributed locks across multiple
109
+ computers.
110
+
111
+ It is an extremely reliable Redis lock that is based on pubsub.
112
+
113
+ As opposed to most Redis locking systems based on key/value pairs,
114
+ this locking method is based on the pubsub system. The big advantage is
115
+ that if the connection gets killed due to network issues, crashing
116
+ processes or otherwise, it will still immediately unlock instead of
117
+ waiting for a lock timeout.
118
+
119
+ First make sure you have everything installed correctly:
120
+
121
+ ::
122
+
123
+ pip install "portalocker[redis]"
124
+
125
+ Usage is really easy:
126
+
127
+ ::
128
+
129
+ import portalocker
130
+
131
+ lock = portalocker.RedisLock('some_lock_channel_name')
132
+
133
+ with lock:
134
+ print('do something here')
135
+
136
+ The API is essentially identical to the other ``Lock`` classes so in addition
137
+ to the ``with`` statement you can also use ``lock.acquire(...)``.
138
+
139
+ Python 2
140
+ --------
141
+
142
+ Python 2 was supported in versions before Portalocker 2.0. If you are still
143
+ using
144
+ Python 2,
145
+ you can run this to install:
146
+
147
+ ::
148
+
149
+ pip install "portalocker<2"
150
+
151
+ Tips
152
+ ----
153
+
154
+ On some networked filesystems it might be needed to force a `os.fsync()` before
155
+ closing the file so it's actually written before another client reads the file.
156
+ Effectively this comes down to:
157
+
158
+ ::
159
+
160
+ with portalocker.Lock('some_file', 'rb+', timeout=60) as fh:
161
+ # do what you need to do
162
+ ...
163
+
164
+ # flush and sync to filesystem
165
+ fh.flush()
166
+ os.fsync(fh.fileno())
167
+
168
+ Links
169
+ -----
170
+
171
+ * Documentation
172
+ - http://portalocker.readthedocs.org/en/latest/
173
+ * Source
174
+ - https://github.com/WoLpH/portalocker
175
+ * Bug reports
176
+ - https://github.com/WoLpH/portalocker/issues
177
+ * Package homepage
178
+ - https://pypi.python.org/pypi/portalocker
179
+ * My blog
180
+ - http://w.wol.ph/
181
+
182
+ Examples
183
+ --------
184
+
185
+ To make sure your cache generation scripts don't race, use the `Lock` class:
186
+
187
+ >>> import portalocker
188
+ >>> with portalocker.Lock('somefile', timeout=1) as fh:
189
+ ... print('writing some stuff to my cache...', file=fh)
190
+
191
+ To customize the opening and locking a manual approach is also possible:
192
+
193
+ >>> import portalocker
194
+ >>> file = open('somefile', 'r+')
195
+ >>> portalocker.lock(file, portalocker.LockFlags.EXCLUSIVE)
196
+ >>> file.seek(12)
197
+ >>> file.write('foo')
198
+ >>> file.close()
199
+
200
+ Explicitly unlocking is not needed in most cases but omitting it has been known
201
+ to cause issues:
202
+ https://github.com/AzureAD/microsoft-authentication-extensions-for-python/issues/42#issuecomment-601108266
203
+
204
+ If needed, it can be done through:
205
+
206
+ >>> portalocker.unlock(file)
207
+
208
+ Do note that your data might still be in a buffer so it is possible that your
209
+ data is not available until you `flush()` or `close()`.
210
+
211
+ To create a cross platform bounded semaphore across multiple processes you can
212
+ use the `BoundedSemaphore` class which functions somewhat similar to
213
+ `threading.BoundedSemaphore`:
214
+
215
+ >>> import portalocker
216
+ >>> n = 2
217
+ >>> timeout = 0.1
218
+
219
+ >>> semaphore_a = portalocker.BoundedSemaphore(n, timeout=timeout)
220
+ >>> semaphore_b = portalocker.BoundedSemaphore(n, timeout=timeout)
221
+ >>> semaphore_c = portalocker.BoundedSemaphore(n, timeout=timeout)
222
+
223
+ >>> semaphore_a.acquire()
224
+ <portalocker.utils.Lock object at ...>
225
+ >>> semaphore_b.acquire()
226
+ <portalocker.utils.Lock object at ...>
227
+ >>> semaphore_c.acquire()
228
+ Traceback (most recent call last):
229
+ ...
230
+ portalocker.exceptions.AlreadyLocked
231
+
232
+
233
+ More examples can be found in the
234
+ `tests <http://portalocker.readthedocs.io/en/latest/_modules/tests/tests.html>`_.
235
+
236
+
237
+ Versioning
238
+ ----------
239
+
240
+ This library follows `Semantic Versioning <http://semver.org/>`_.
241
+
242
+
243
+ Changelog
244
+ ---------
245
+
246
+ Every release has a ``git tag`` with a commit message for the tag
247
+ explaining what was added and/or changed. The list of tags/releases
248
+ including the commit messages can be found here:
249
+ https://github.com/WoLpH/portalocker/releases
250
+
251
+ License
252
+ -------
253
+
254
+ See the `LICENSE <https://github.com/WoLpH/portalocker/blob/develop/LICENSE>`_ file.
255
+
evalkit_tf437/lib/python3.10/site-packages/pygments/__pycache__/lexer.cpython-310.pyc ADDED
Binary file (26.6 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/lexers/__pycache__/_cl_builtins.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/lexers/__pycache__/_lasso_builtins.cpython-310.pyc ADDED
Binary file (76.8 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/lexers/__pycache__/amdgpu.cpython-310.pyc ADDED
Binary file (1.75 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/lexers/__pycache__/cplint.cpython-310.pyc ADDED
Binary file (1.54 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/lexers/__pycache__/jslt.cpython-310.pyc ADDED
Binary file (2.7 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__init__.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.styles
3
+ ~~~~~~~~~~~~~~~
4
+
5
+ Contains built-in styles.
6
+
7
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ from pygments.plugin import find_plugin_styles
12
+ from pygments.util import ClassNotFound
13
+ from pygments.styles._mapping import STYLES
14
+
15
+ #: A dictionary of built-in styles, mapping style names to
16
+ #: ``'submodule::classname'`` strings.
17
+ #: This list is deprecated. Use `pygments.styles.STYLES` instead
18
+ STYLE_MAP = {v[1]: v[0].split('.')[-1] + '::' + k for k, v in STYLES.items()}
19
+
20
+ #: Internal reverse mapping to make `get_style_by_name` more efficient
21
+ _STYLE_NAME_TO_MODULE_MAP = {v[1]: (v[0], k) for k, v in STYLES.items()}
22
+
23
+
24
+ def get_style_by_name(name):
25
+ """
26
+ Return a style class by its short name. The names of the builtin styles
27
+ are listed in :data:`pygments.styles.STYLE_MAP`.
28
+
29
+ Will raise :exc:`pygments.util.ClassNotFound` if no style of that name is
30
+ found.
31
+ """
32
+ if name in _STYLE_NAME_TO_MODULE_MAP:
33
+ mod, cls = _STYLE_NAME_TO_MODULE_MAP[name]
34
+ builtin = "yes"
35
+ else:
36
+ for found_name, style in find_plugin_styles():
37
+ if name == found_name:
38
+ return style
39
+ # perhaps it got dropped into our styles package
40
+ builtin = ""
41
+ mod = 'pygments.styles.' + name
42
+ cls = name.title() + "Style"
43
+
44
+ try:
45
+ mod = __import__(mod, None, None, [cls])
46
+ except ImportError:
47
+ raise ClassNotFound(f"Could not find style module {mod!r}" +
48
+ (builtin and ", though it should be builtin")
49
+ + ".")
50
+ try:
51
+ return getattr(mod, cls)
52
+ except AttributeError:
53
+ raise ClassNotFound(f"Could not find style class {cls!r} in style module.")
54
+
55
+
56
+ def get_all_styles():
57
+ """Return a generator for all styles by name, both builtin and plugin."""
58
+ for v in STYLES.values():
59
+ yield v[1]
60
+ for name, _ in find_plugin_styles():
61
+ yield name
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/_mapping.cpython-310.pyc ADDED
Binary file (3.29 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/colorful.cpython-310.pyc ADDED
Binary file (2.29 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/default.cpython-310.pyc ADDED
Binary file (2.08 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/dracula.cpython-310.pyc ADDED
Binary file (2.22 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/emacs.cpython-310.pyc ADDED
Binary file (2.11 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/friendly_grayscale.cpython-310.pyc ADDED
Binary file (2.41 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/igor.cpython-310.pyc ADDED
Binary file (968 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/inkpot.cpython-310.pyc ADDED
Binary file (1.95 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/lilypond.cpython-310.pyc ADDED
Binary file (1.72 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/lovelace.cpython-310.pyc ADDED
Binary file (2.64 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/manni.cpython-310.pyc ADDED
Binary file (2.33 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/onedark.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/pastie.cpython-310.pyc ADDED
Binary file (2.19 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/rainbow_dash.cpython-310.pyc ADDED
Binary file (2.48 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/solarized.cpython-310.pyc ADDED
Binary file (3.29 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/trac.cpython-310.pyc ADDED
Binary file (1.77 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/__pycache__/vs.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/_mapping.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Automatically generated by scripts/gen_mapfiles.py.
2
+ # DO NOT EDIT BY HAND; run `tox -e mapfiles` instead.
3
+
4
+ STYLES = {
5
+ 'AbapStyle': ('pygments.styles.abap', 'abap', ()),
6
+ 'AlgolStyle': ('pygments.styles.algol', 'algol', ()),
7
+ 'Algol_NuStyle': ('pygments.styles.algol_nu', 'algol_nu', ()),
8
+ 'ArduinoStyle': ('pygments.styles.arduino', 'arduino', ()),
9
+ 'AutumnStyle': ('pygments.styles.autumn', 'autumn', ()),
10
+ 'BlackWhiteStyle': ('pygments.styles.bw', 'bw', ()),
11
+ 'BorlandStyle': ('pygments.styles.borland', 'borland', ()),
12
+ 'CoffeeStyle': ('pygments.styles.coffee', 'coffee', ()),
13
+ 'ColorfulStyle': ('pygments.styles.colorful', 'colorful', ()),
14
+ 'DefaultStyle': ('pygments.styles.default', 'default', ()),
15
+ 'DraculaStyle': ('pygments.styles.dracula', 'dracula', ()),
16
+ 'EmacsStyle': ('pygments.styles.emacs', 'emacs', ()),
17
+ 'FriendlyGrayscaleStyle': ('pygments.styles.friendly_grayscale', 'friendly_grayscale', ()),
18
+ 'FriendlyStyle': ('pygments.styles.friendly', 'friendly', ()),
19
+ 'FruityStyle': ('pygments.styles.fruity', 'fruity', ()),
20
+ 'GhDarkStyle': ('pygments.styles.gh_dark', 'github-dark', ()),
21
+ 'GruvboxDarkStyle': ('pygments.styles.gruvbox', 'gruvbox-dark', ()),
22
+ 'GruvboxLightStyle': ('pygments.styles.gruvbox', 'gruvbox-light', ()),
23
+ 'IgorStyle': ('pygments.styles.igor', 'igor', ()),
24
+ 'InkPotStyle': ('pygments.styles.inkpot', 'inkpot', ()),
25
+ 'LightbulbStyle': ('pygments.styles.lightbulb', 'lightbulb', ()),
26
+ 'LilyPondStyle': ('pygments.styles.lilypond', 'lilypond', ()),
27
+ 'LovelaceStyle': ('pygments.styles.lovelace', 'lovelace', ()),
28
+ 'ManniStyle': ('pygments.styles.manni', 'manni', ()),
29
+ 'MaterialStyle': ('pygments.styles.material', 'material', ()),
30
+ 'MonokaiStyle': ('pygments.styles.monokai', 'monokai', ()),
31
+ 'MurphyStyle': ('pygments.styles.murphy', 'murphy', ()),
32
+ 'NativeStyle': ('pygments.styles.native', 'native', ()),
33
+ 'NordDarkerStyle': ('pygments.styles.nord', 'nord-darker', ()),
34
+ 'NordStyle': ('pygments.styles.nord', 'nord', ()),
35
+ 'OneDarkStyle': ('pygments.styles.onedark', 'one-dark', ()),
36
+ 'ParaisoDarkStyle': ('pygments.styles.paraiso_dark', 'paraiso-dark', ()),
37
+ 'ParaisoLightStyle': ('pygments.styles.paraiso_light', 'paraiso-light', ()),
38
+ 'PastieStyle': ('pygments.styles.pastie', 'pastie', ()),
39
+ 'PerldocStyle': ('pygments.styles.perldoc', 'perldoc', ()),
40
+ 'RainbowDashStyle': ('pygments.styles.rainbow_dash', 'rainbow_dash', ()),
41
+ 'RrtStyle': ('pygments.styles.rrt', 'rrt', ()),
42
+ 'SasStyle': ('pygments.styles.sas', 'sas', ()),
43
+ 'SolarizedDarkStyle': ('pygments.styles.solarized', 'solarized-dark', ()),
44
+ 'SolarizedLightStyle': ('pygments.styles.solarized', 'solarized-light', ()),
45
+ 'StarofficeStyle': ('pygments.styles.staroffice', 'staroffice', ()),
46
+ 'StataDarkStyle': ('pygments.styles.stata_dark', 'stata-dark', ()),
47
+ 'StataLightStyle': ('pygments.styles.stata_light', 'stata-light', ()),
48
+ 'TangoStyle': ('pygments.styles.tango', 'tango', ()),
49
+ 'TracStyle': ('pygments.styles.trac', 'trac', ()),
50
+ 'VimStyle': ('pygments.styles.vim', 'vim', ()),
51
+ 'VisualStudioStyle': ('pygments.styles.vs', 'vs', ()),
52
+ 'XcodeStyle': ('pygments.styles.xcode', 'xcode', ()),
53
+ 'ZenburnStyle': ('pygments.styles.zenburn', 'zenburn', ()),
54
+ }
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/algol.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.styles.algol
3
+ ~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Algol publication style.
6
+
7
+ This style renders source code for publication of algorithms in
8
+ scientific papers and academic texts, where its format is frequently used.
9
+
10
+ It is based on the style of the revised Algol-60 language report[1].
11
+
12
+ o No colours, only black, white and shades of grey are used.
13
+ o Keywords are rendered in lowercase underline boldface.
14
+ o Builtins are rendered in lowercase boldface italic.
15
+ o Docstrings and pragmas are rendered in dark grey boldface.
16
+ o Library identifiers are rendered in dark grey boldface italic.
17
+ o Comments are rendered in grey italic.
18
+
19
+ To render keywords without underlining, refer to the `Algol_Nu` style.
20
+
21
+ For lowercase conversion of keywords and builtins in languages where
22
+ these are not or might not be lowercase, a supporting lexer is required.
23
+ The Algol and Modula-2 lexers automatically convert to lowercase whenever
24
+ this style is selected.
25
+
26
+ [1] `Revised Report on the Algorithmic Language Algol-60 <http://www.masswerk.at/algol60/report.htm>`
27
+
28
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
29
+ :license: BSD, see LICENSE for details.
30
+ """
31
+
32
+ from pygments.style import Style
33
+ from pygments.token import Keyword, Name, Comment, String, Error, Operator
34
+
35
+
36
+ __all__ = ['AlgolStyle']
37
+
38
+
39
+ class AlgolStyle(Style):
40
+ name = 'algol'
41
+
42
+ background_color = "#ffffff"
43
+
44
+ styles = {
45
+ Comment: "italic #888",
46
+ Comment.Preproc: "bold noitalic #888",
47
+ Comment.Special: "bold noitalic #888",
48
+
49
+ Keyword: "underline bold",
50
+ Keyword.Declaration: "italic",
51
+
52
+ Name.Builtin: "bold italic",
53
+ Name.Builtin.Pseudo: "bold italic",
54
+ Name.Namespace: "bold italic #666",
55
+ Name.Class: "bold italic #666",
56
+ Name.Function: "bold italic #666",
57
+ Name.Variable: "bold italic #666",
58
+ Name.Constant: "bold italic #666",
59
+
60
+ Operator.Word: "bold",
61
+
62
+ String: "italic #666",
63
+
64
+ Error: "border:#FF0000"
65
+ }
evalkit_tf437/lib/python3.10/site-packages/pygments/styles/algol_nu.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.styles.algol_nu
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Algol publication style without underlining of keywords.
6
+
7
+ This style renders source code for publication of algorithms in
8
+ scientific papers and academic texts, where its format is frequently used.
9
+
10
+ It is based on the style of the revised Algol-60 language report[1].
11
+
12
+ o No colours, only black, white and shades of grey are used.
13
+ o Keywords are rendered in lowercase boldface.
14
+ o Builtins are rendered in lowercase boldface italic.
15
+ o Docstrings and pragmas are rendered in dark grey boldface.
16
+ o Library identifiers are rendered in dark grey boldface italic.
17
+ o Comments are rendered in grey italic.
18
+
19
+ To render keywords with underlining, refer to the `Algol` style.
20
+
21
+ For lowercase conversion of keywords and builtins in languages where
22
+ these are not or might not be lowercase, a supporting lexer is required.
23
+ The Algol and Modula-2 lexers automatically convert to lowercase whenever
24
+ this style is selected.
25
+
26
+ [1] `Revised Report on the Algorithmic Language Algol-60 <http://www.masswerk.at/algol60/report.htm>`
27
+
28
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
29
+ :license: BSD, see LICENSE for details.
30
+ """
31
+
32
+ from pygments.style import Style
33
+ from pygments.token import Keyword, Name, Comment, String, Error, Operator
34
+
35
+
36
+ __all__ = ['Algol_NuStyle']
37
+
38
+
39
+ class Algol_NuStyle(Style):
40
+ name = 'algol_nu'
41
+
42
+ background_color = "#ffffff"
43
+
44
+ styles = {
45
+ Comment: "italic #888",
46
+ Comment.Preproc: "bold noitalic #888",
47
+ Comment.Special: "bold noitalic #888",
48
+
49
+ Keyword: "bold",
50
+ Keyword.Declaration: "italic",
51
+
52
+ Name.Builtin: "bold italic",
53
+ Name.Builtin.Pseudo: "bold italic",
54
+ Name.Namespace: "bold italic #666",
55
+ Name.Class: "bold italic #666",
56
+ Name.Function: "bold italic #666",
57
+ Name.Variable: "bold italic #666",
58
+ Name.Constant: "bold italic #666",
59
+
60
+ Operator.Word: "bold",
61
+
62
+ String: "italic #666",
63
+
64
+ Error: "border:#FF0000"
65
+ }