ZTWHHH commited on
Commit
9454285
·
verified ·
1 Parent(s): f1568b7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. mantis_evalkit/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/__init__.cpython-310.pyc +0 -0
  3. mantis_evalkit/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/test_inference.cpython-310.pyc +0 -0
  4. mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/__init__.py +0 -0
  5. mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/interval/__init__.py +0 -0
  6. mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_arithmetic.py +192 -0
  7. mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_constructors.py +51 -0
  8. mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_contains.py +73 -0
  9. mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_formats.py +11 -0
  10. mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_interval.py +87 -0
  11. mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_overlaps.py +67 -0
  12. mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/test_na_scalar.py +316 -0
  13. mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/test_nat.py +709 -0
  14. moondream/lib/python3.10/site-packages/gradio/components/base.pyi +426 -0
  15. moondream/lib/python3.10/site-packages/gradio/components/datetime.py +155 -0
  16. moondream/lib/python3.10/site-packages/gradio/components/dropdown.pyi +462 -0
  17. moondream/lib/python3.10/site-packages/gradio/components/fallback.pyi +33 -0
  18. moondream/lib/python3.10/site-packages/gradio/components/file_explorer.pyi +256 -0
  19. moondream/lib/python3.10/site-packages/gradio/components/image.pyi +443 -0
  20. moondream/lib/python3.10/site-packages/gradio/components/markdown.py +118 -0
  21. moondream/lib/python3.10/site-packages/gradio/components/radio.pyi +268 -0
  22. moondream/lib/python3.10/site-packages/gradio/components/slider.py +140 -0
  23. moondream/lib/python3.10/site-packages/gradio/components/upload_button.pyi +318 -0
  24. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/__init__.py +0 -0
  25. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/__pycache__/__init__.cpython-310.pyc +0 -0
  26. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__init__.py +0 -0
  27. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cuda_stdint.h +112 -0
  28. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti.h +123 -0
  29. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_activity.h +0 -0
  30. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_callbacks.h +762 -0
  31. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_checkpoint.h +127 -0
  32. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_driver_cbid.h +725 -0
  33. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_events.h +1371 -0
  34. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_nvtx_cbid.h +111 -0
  35. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling.h +950 -0
  36. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling_util.h +419 -0
  37. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_profiler_target.h +589 -0
  38. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_result.h +328 -0
  39. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_target.h +43 -0
  40. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_version.h +131 -0
  41. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudaGL_meta.h +116 -0
  42. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudaVDPAU_meta.h +46 -0
  43. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_gl_interop_meta.h +71 -0
  44. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_meta.h +3293 -0
  45. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_runtime_api_meta.h +2126 -0
  46. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_vdpau_interop_meta.h +38 -0
  47. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudart_removed_meta.h +162 -0
  48. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_nvtx_meta.h +247 -0
  49. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_cuda_host.h +197 -0
  50. moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_host.h +1528 -0
.gitattributes CHANGED
@@ -548,3 +548,5 @@ mantis_evalkit/lib/python3.10/site-packages/pyarrow/libparquet.so.1900 filter=lf
548
  parrot/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
549
  mantis_evalkit/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 filter=lfs diff=lfs merge=lfs -text
550
  moondream/lib/python3.10/site-packages/fontTools/subset/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
548
  parrot/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
549
  mantis_evalkit/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 filter=lfs diff=lfs merge=lfs -text
550
  moondream/lib/python3.10/site-packages/fontTools/subset/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
551
+ moondream/lib/python3.10/site-packages/pygments/lexers/__pycache__/lisp.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
552
+ parrot/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text
mantis_evalkit/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (179 Bytes). View file
 
mantis_evalkit/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/test_inference.cpython-310.pyc ADDED
Binary file (58.5 kB). View file
 
mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/__init__.py ADDED
File without changes
mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/interval/__init__.py ADDED
File without changes
mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_arithmetic.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import timedelta
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas import (
7
+ Interval,
8
+ Timedelta,
9
+ Timestamp,
10
+ )
11
+ import pandas._testing as tm
12
+
13
+
14
+ class TestIntervalArithmetic:
15
+ def test_interval_add(self, closed):
16
+ interval = Interval(0, 1, closed=closed)
17
+ expected = Interval(1, 2, closed=closed)
18
+
19
+ result = interval + 1
20
+ assert result == expected
21
+
22
+ result = 1 + interval
23
+ assert result == expected
24
+
25
+ result = interval
26
+ result += 1
27
+ assert result == expected
28
+
29
+ msg = r"unsupported operand type\(s\) for \+"
30
+ with pytest.raises(TypeError, match=msg):
31
+ interval + interval
32
+
33
+ with pytest.raises(TypeError, match=msg):
34
+ interval + "foo"
35
+
36
+ def test_interval_sub(self, closed):
37
+ interval = Interval(0, 1, closed=closed)
38
+ expected = Interval(-1, 0, closed=closed)
39
+
40
+ result = interval - 1
41
+ assert result == expected
42
+
43
+ result = interval
44
+ result -= 1
45
+ assert result == expected
46
+
47
+ msg = r"unsupported operand type\(s\) for -"
48
+ with pytest.raises(TypeError, match=msg):
49
+ interval - interval
50
+
51
+ with pytest.raises(TypeError, match=msg):
52
+ interval - "foo"
53
+
54
+ def test_interval_mult(self, closed):
55
+ interval = Interval(0, 1, closed=closed)
56
+ expected = Interval(0, 2, closed=closed)
57
+
58
+ result = interval * 2
59
+ assert result == expected
60
+
61
+ result = 2 * interval
62
+ assert result == expected
63
+
64
+ result = interval
65
+ result *= 2
66
+ assert result == expected
67
+
68
+ msg = r"unsupported operand type\(s\) for \*"
69
+ with pytest.raises(TypeError, match=msg):
70
+ interval * interval
71
+
72
+ msg = r"can\'t multiply sequence by non-int"
73
+ with pytest.raises(TypeError, match=msg):
74
+ interval * "foo"
75
+
76
+ def test_interval_div(self, closed):
77
+ interval = Interval(0, 1, closed=closed)
78
+ expected = Interval(0, 0.5, closed=closed)
79
+
80
+ result = interval / 2.0
81
+ assert result == expected
82
+
83
+ result = interval
84
+ result /= 2.0
85
+ assert result == expected
86
+
87
+ msg = r"unsupported operand type\(s\) for /"
88
+ with pytest.raises(TypeError, match=msg):
89
+ interval / interval
90
+
91
+ with pytest.raises(TypeError, match=msg):
92
+ interval / "foo"
93
+
94
+ def test_interval_floordiv(self, closed):
95
+ interval = Interval(1, 2, closed=closed)
96
+ expected = Interval(0, 1, closed=closed)
97
+
98
+ result = interval // 2
99
+ assert result == expected
100
+
101
+ result = interval
102
+ result //= 2
103
+ assert result == expected
104
+
105
+ msg = r"unsupported operand type\(s\) for //"
106
+ with pytest.raises(TypeError, match=msg):
107
+ interval // interval
108
+
109
+ with pytest.raises(TypeError, match=msg):
110
+ interval // "foo"
111
+
112
+ @pytest.mark.parametrize("method", ["__add__", "__sub__"])
113
+ @pytest.mark.parametrize(
114
+ "interval",
115
+ [
116
+ Interval(
117
+ Timestamp("2017-01-01 00:00:00"), Timestamp("2018-01-01 00:00:00")
118
+ ),
119
+ Interval(Timedelta(days=7), Timedelta(days=14)),
120
+ ],
121
+ )
122
+ @pytest.mark.parametrize(
123
+ "delta", [Timedelta(days=7), timedelta(7), np.timedelta64(7, "D")]
124
+ )
125
+ def test_time_interval_add_subtract_timedelta(self, interval, delta, method):
126
+ # https://github.com/pandas-dev/pandas/issues/32023
127
+ result = getattr(interval, method)(delta)
128
+ left = getattr(interval.left, method)(delta)
129
+ right = getattr(interval.right, method)(delta)
130
+ expected = Interval(left, right)
131
+
132
+ assert result == expected
133
+
134
+ @pytest.mark.parametrize("interval", [Interval(1, 2), Interval(1.0, 2.0)])
135
+ @pytest.mark.parametrize(
136
+ "delta", [Timedelta(days=7), timedelta(7), np.timedelta64(7, "D")]
137
+ )
138
+ def test_numeric_interval_add_timedelta_raises(self, interval, delta):
139
+ # https://github.com/pandas-dev/pandas/issues/32023
140
+ msg = "|".join(
141
+ [
142
+ "unsupported operand",
143
+ "cannot use operands",
144
+ "Only numeric, Timestamp and Timedelta endpoints are allowed",
145
+ ]
146
+ )
147
+ with pytest.raises((TypeError, ValueError), match=msg):
148
+ interval + delta
149
+
150
+ with pytest.raises((TypeError, ValueError), match=msg):
151
+ delta + interval
152
+
153
+ @pytest.mark.parametrize("klass", [timedelta, np.timedelta64, Timedelta])
154
+ def test_timedelta_add_timestamp_interval(self, klass):
155
+ delta = klass(0)
156
+ expected = Interval(Timestamp("2020-01-01"), Timestamp("2020-02-01"))
157
+
158
+ result = delta + expected
159
+ assert result == expected
160
+
161
+ result = expected + delta
162
+ assert result == expected
163
+
164
+
165
+ class TestIntervalComparisons:
166
+ def test_interval_equal(self):
167
+ assert Interval(0, 1) == Interval(0, 1, closed="right")
168
+ assert Interval(0, 1) != Interval(0, 1, closed="left")
169
+ assert Interval(0, 1) != 0
170
+
171
+ def test_interval_comparison(self):
172
+ msg = (
173
+ "'<' not supported between instances of "
174
+ "'pandas._libs.interval.Interval' and 'int'"
175
+ )
176
+ with pytest.raises(TypeError, match=msg):
177
+ Interval(0, 1) < 2
178
+
179
+ assert Interval(0, 1) < Interval(1, 2)
180
+ assert Interval(0, 1) < Interval(0, 2)
181
+ assert Interval(0, 1) < Interval(0.5, 1.5)
182
+ assert Interval(0, 1) <= Interval(0, 1)
183
+ assert Interval(0, 1) > Interval(-1, 2)
184
+ assert Interval(0, 1) >= Interval(0, 1)
185
+
186
+ def test_equality_comparison_broadcasts_over_array(self):
187
+ # https://github.com/pandas-dev/pandas/issues/35931
188
+ interval = Interval(0, 1)
189
+ arr = np.array([interval, interval])
190
+ result = interval == arr
191
+ expected = np.array([True, True])
192
+ tm.assert_numpy_array_equal(result, expected)
mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_constructors.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas import (
4
+ Interval,
5
+ Period,
6
+ Timestamp,
7
+ )
8
+
9
+
10
+ class TestIntervalConstructors:
11
+ @pytest.mark.parametrize(
12
+ "left, right",
13
+ [
14
+ ("a", "z"),
15
+ (("a", "b"), ("c", "d")),
16
+ (list("AB"), list("ab")),
17
+ (Interval(0, 1), Interval(1, 2)),
18
+ (Period("2018Q1", freq="Q"), Period("2018Q1", freq="Q")),
19
+ ],
20
+ )
21
+ def test_construct_errors(self, left, right):
22
+ # GH#23013
23
+ msg = "Only numeric, Timestamp and Timedelta endpoints are allowed"
24
+ with pytest.raises(ValueError, match=msg):
25
+ Interval(left, right)
26
+
27
+ def test_constructor_errors(self):
28
+ msg = "invalid option for 'closed': foo"
29
+ with pytest.raises(ValueError, match=msg):
30
+ Interval(0, 1, closed="foo")
31
+
32
+ msg = "left side of interval must be <= right side"
33
+ with pytest.raises(ValueError, match=msg):
34
+ Interval(1, 0)
35
+
36
+ @pytest.mark.parametrize(
37
+ "tz_left, tz_right", [(None, "UTC"), ("UTC", None), ("UTC", "US/Eastern")]
38
+ )
39
+ def test_constructor_errors_tz(self, tz_left, tz_right):
40
+ # GH#18538
41
+ left = Timestamp("2017-01-01", tz=tz_left)
42
+ right = Timestamp("2017-01-02", tz=tz_right)
43
+
44
+ if tz_left is None or tz_right is None:
45
+ error = TypeError
46
+ msg = "Cannot compare tz-naive and tz-aware timestamps"
47
+ else:
48
+ error = ValueError
49
+ msg = "left and right must have the same time zone"
50
+ with pytest.raises(error, match=msg):
51
+ Interval(left, right)
mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_contains.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas import (
4
+ Interval,
5
+ Timedelta,
6
+ Timestamp,
7
+ )
8
+
9
+
10
+ class TestContains:
11
+ def test_contains(self):
12
+ interval = Interval(0, 1)
13
+ assert 0.5 in interval
14
+ assert 1 in interval
15
+ assert 0 not in interval
16
+
17
+ interval_both = Interval(0, 1, "both")
18
+ assert 0 in interval_both
19
+ assert 1 in interval_both
20
+
21
+ interval_neither = Interval(0, 1, closed="neither")
22
+ assert 0 not in interval_neither
23
+ assert 0.5 in interval_neither
24
+ assert 1 not in interval_neither
25
+
26
+ def test_contains_interval(self, inclusive_endpoints_fixture):
27
+ interval1 = Interval(0, 1, "both")
28
+ interval2 = Interval(0, 1, inclusive_endpoints_fixture)
29
+ assert interval1 in interval1
30
+ assert interval2 in interval2
31
+ assert interval2 in interval1
32
+ assert interval1 not in interval2 or inclusive_endpoints_fixture == "both"
33
+
34
+ def test_contains_infinite_length(self):
35
+ interval1 = Interval(0, 1, "both")
36
+ interval2 = Interval(float("-inf"), float("inf"), "neither")
37
+ assert interval1 in interval2
38
+ assert interval2 not in interval1
39
+
40
+ def test_contains_zero_length(self):
41
+ interval1 = Interval(0, 1, "both")
42
+ interval2 = Interval(-1, -1, "both")
43
+ interval3 = Interval(0.5, 0.5, "both")
44
+ assert interval2 not in interval1
45
+ assert interval3 in interval1
46
+ assert interval2 not in interval3 and interval3 not in interval2
47
+ assert interval1 not in interval2 and interval1 not in interval3
48
+
49
+ @pytest.mark.parametrize(
50
+ "type1",
51
+ [
52
+ (0, 1),
53
+ (Timestamp(2000, 1, 1, 0), Timestamp(2000, 1, 1, 1)),
54
+ (Timedelta("0h"), Timedelta("1h")),
55
+ ],
56
+ )
57
+ @pytest.mark.parametrize(
58
+ "type2",
59
+ [
60
+ (0, 1),
61
+ (Timestamp(2000, 1, 1, 0), Timestamp(2000, 1, 1, 1)),
62
+ (Timedelta("0h"), Timedelta("1h")),
63
+ ],
64
+ )
65
+ def test_contains_mixed_types(self, type1, type2):
66
+ interval1 = Interval(*type1)
67
+ interval2 = Interval(*type2)
68
+ if type1 == type2:
69
+ assert interval1 in interval2
70
+ else:
71
+ msg = "^'<=' not supported between instances of"
72
+ with pytest.raises(TypeError, match=msg):
73
+ interval1 in interval2
mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_formats.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas import Interval
2
+
3
+
4
+ def test_interval_repr():
5
+ interval = Interval(0, 1)
6
+ assert repr(interval) == "Interval(0, 1, closed='right')"
7
+ assert str(interval) == "(0, 1]"
8
+
9
+ interval_left = Interval(0, 1, closed="left")
10
+ assert repr(interval_left) == "Interval(0, 1, closed='left')"
11
+ assert str(interval_left) == "[0, 1)"
mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_interval.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ Interval,
6
+ Timedelta,
7
+ Timestamp,
8
+ )
9
+
10
+
11
+ @pytest.fixture
12
+ def interval():
13
+ return Interval(0, 1)
14
+
15
+
16
+ class TestInterval:
17
+ def test_properties(self, interval):
18
+ assert interval.closed == "right"
19
+ assert interval.left == 0
20
+ assert interval.right == 1
21
+ assert interval.mid == 0.5
22
+
23
+ def test_hash(self, interval):
24
+ # should not raise
25
+ hash(interval)
26
+
27
+ @pytest.mark.parametrize(
28
+ "left, right, expected",
29
+ [
30
+ (0, 5, 5),
31
+ (-2, 5.5, 7.5),
32
+ (10, 10, 0),
33
+ (10, np.inf, np.inf),
34
+ (-np.inf, -5, np.inf),
35
+ (-np.inf, np.inf, np.inf),
36
+ (Timedelta("0 days"), Timedelta("5 days"), Timedelta("5 days")),
37
+ (Timedelta("10 days"), Timedelta("10 days"), Timedelta("0 days")),
38
+ (Timedelta("1h10min"), Timedelta("5h5min"), Timedelta("3h55min")),
39
+ (Timedelta("5s"), Timedelta("1h"), Timedelta("59min55s")),
40
+ ],
41
+ )
42
+ def test_length(self, left, right, expected):
43
+ # GH 18789
44
+ iv = Interval(left, right)
45
+ result = iv.length
46
+ assert result == expected
47
+
48
+ @pytest.mark.parametrize(
49
+ "left, right, expected",
50
+ [
51
+ ("2017-01-01", "2017-01-06", "5 days"),
52
+ ("2017-01-01", "2017-01-01 12:00:00", "12 hours"),
53
+ ("2017-01-01 12:00", "2017-01-01 12:00:00", "0 days"),
54
+ ("2017-01-01 12:01", "2017-01-05 17:31:00", "4 days 5 hours 30 min"),
55
+ ],
56
+ )
57
+ @pytest.mark.parametrize("tz", (None, "UTC", "CET", "US/Eastern"))
58
+ def test_length_timestamp(self, tz, left, right, expected):
59
+ # GH 18789
60
+ iv = Interval(Timestamp(left, tz=tz), Timestamp(right, tz=tz))
61
+ result = iv.length
62
+ expected = Timedelta(expected)
63
+ assert result == expected
64
+
65
+ @pytest.mark.parametrize(
66
+ "left, right",
67
+ [
68
+ (0, 1),
69
+ (Timedelta("0 days"), Timedelta("1 day")),
70
+ (Timestamp("2018-01-01"), Timestamp("2018-01-02")),
71
+ (
72
+ Timestamp("2018-01-01", tz="US/Eastern"),
73
+ Timestamp("2018-01-02", tz="US/Eastern"),
74
+ ),
75
+ ],
76
+ )
77
+ def test_is_empty(self, left, right, closed):
78
+ # GH27219
79
+ # non-empty always return False
80
+ iv = Interval(left, right, closed)
81
+ assert iv.is_empty is False
82
+
83
+ # same endpoint is empty except when closed='both' (contains one point)
84
+ iv = Interval(left, left, closed)
85
+ result = iv.is_empty
86
+ expected = closed != "both"
87
+ assert result is expected
mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_overlaps.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas import (
4
+ Interval,
5
+ Timedelta,
6
+ Timestamp,
7
+ )
8
+
9
+
10
+ @pytest.fixture(
11
+ params=[
12
+ (Timedelta("0 days"), Timedelta("1 day")),
13
+ (Timestamp("2018-01-01"), Timedelta("1 day")),
14
+ (0, 1),
15
+ ],
16
+ ids=lambda x: type(x[0]).__name__,
17
+ )
18
+ def start_shift(request):
19
+ """
20
+ Fixture for generating intervals of types from a start value and a shift
21
+ value that can be added to start to generate an endpoint
22
+ """
23
+ return request.param
24
+
25
+
26
+ class TestOverlaps:
27
+ def test_overlaps_self(self, start_shift, closed):
28
+ start, shift = start_shift
29
+ interval = Interval(start, start + shift, closed)
30
+ assert interval.overlaps(interval)
31
+
32
+ def test_overlaps_nested(self, start_shift, closed, other_closed):
33
+ start, shift = start_shift
34
+ interval1 = Interval(start, start + 3 * shift, other_closed)
35
+ interval2 = Interval(start + shift, start + 2 * shift, closed)
36
+
37
+ # nested intervals should always overlap
38
+ assert interval1.overlaps(interval2)
39
+
40
+ def test_overlaps_disjoint(self, start_shift, closed, other_closed):
41
+ start, shift = start_shift
42
+ interval1 = Interval(start, start + shift, other_closed)
43
+ interval2 = Interval(start + 2 * shift, start + 3 * shift, closed)
44
+
45
+ # disjoint intervals should never overlap
46
+ assert not interval1.overlaps(interval2)
47
+
48
+ def test_overlaps_endpoint(self, start_shift, closed, other_closed):
49
+ start, shift = start_shift
50
+ interval1 = Interval(start, start + shift, other_closed)
51
+ interval2 = Interval(start + shift, start + 2 * shift, closed)
52
+
53
+ # overlap if shared endpoint is closed for both (overlap at a point)
54
+ result = interval1.overlaps(interval2)
55
+ expected = interval1.closed_right and interval2.closed_left
56
+ assert result == expected
57
+
58
+ @pytest.mark.parametrize(
59
+ "other",
60
+ [10, True, "foo", Timedelta("1 day"), Timestamp("2018-01-01")],
61
+ ids=lambda x: type(x).__name__,
62
+ )
63
+ def test_overlaps_invalid_type(self, other):
64
+ interval = Interval(0, 1)
65
+ msg = f"`other` must be an Interval, got {type(other).__name__}"
66
+ with pytest.raises(TypeError, match=msg):
67
+ interval.overlaps(other)
mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/test_na_scalar.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ date,
3
+ time,
4
+ timedelta,
5
+ )
6
+ import pickle
7
+
8
+ import numpy as np
9
+ import pytest
10
+
11
+ from pandas._libs.missing import NA
12
+
13
+ from pandas.core.dtypes.common import is_scalar
14
+
15
+ import pandas as pd
16
+ import pandas._testing as tm
17
+
18
+
19
+ def test_singleton():
20
+ assert NA is NA
21
+ new_NA = type(NA)()
22
+ assert new_NA is NA
23
+
24
+
25
+ def test_repr():
26
+ assert repr(NA) == "<NA>"
27
+ assert str(NA) == "<NA>"
28
+
29
+
30
+ def test_format():
31
+ # GH-34740
32
+ assert format(NA) == "<NA>"
33
+ assert format(NA, ">10") == " <NA>"
34
+ assert format(NA, "xxx") == "<NA>" # NA is flexible, accept any format spec
35
+
36
+ assert f"{NA}" == "<NA>"
37
+ assert f"{NA:>10}" == " <NA>"
38
+ assert f"{NA:xxx}" == "<NA>"
39
+
40
+
41
+ def test_truthiness():
42
+ msg = "boolean value of NA is ambiguous"
43
+
44
+ with pytest.raises(TypeError, match=msg):
45
+ bool(NA)
46
+
47
+ with pytest.raises(TypeError, match=msg):
48
+ not NA
49
+
50
+
51
+ def test_hashable():
52
+ assert hash(NA) == hash(NA)
53
+ d = {NA: "test"}
54
+ assert d[NA] == "test"
55
+
56
+
57
+ @pytest.mark.parametrize(
58
+ "other", [NA, 1, 1.0, "a", b"a", np.int64(1), np.nan], ids=repr
59
+ )
60
+ def test_arithmetic_ops(all_arithmetic_functions, other):
61
+ op = all_arithmetic_functions
62
+
63
+ if op.__name__ in ("pow", "rpow", "rmod") and isinstance(other, (str, bytes)):
64
+ pytest.skip(reason=f"{op.__name__} with NA and {other} not defined.")
65
+ if op.__name__ in ("divmod", "rdivmod"):
66
+ assert op(NA, other) is (NA, NA)
67
+ else:
68
+ if op.__name__ == "rpow":
69
+ # avoid special case
70
+ other += 1
71
+ assert op(NA, other) is NA
72
+
73
+
74
+ @pytest.mark.parametrize(
75
+ "other",
76
+ [
77
+ NA,
78
+ 1,
79
+ 1.0,
80
+ "a",
81
+ b"a",
82
+ np.int64(1),
83
+ np.nan,
84
+ np.bool_(True),
85
+ time(0),
86
+ date(1, 2, 3),
87
+ timedelta(1),
88
+ pd.NaT,
89
+ ],
90
+ )
91
+ def test_comparison_ops(comparison_op, other):
92
+ assert comparison_op(NA, other) is NA
93
+ assert comparison_op(other, NA) is NA
94
+
95
+
96
+ @pytest.mark.parametrize(
97
+ "value",
98
+ [
99
+ 0,
100
+ 0.0,
101
+ -0,
102
+ -0.0,
103
+ False,
104
+ np.bool_(False),
105
+ np.int_(0),
106
+ np.float64(0),
107
+ np.int_(-0),
108
+ np.float64(-0),
109
+ ],
110
+ )
111
+ @pytest.mark.parametrize("asarray", [True, False])
112
+ def test_pow_special(value, asarray):
113
+ if asarray:
114
+ value = np.array([value])
115
+ result = NA**value
116
+
117
+ if asarray:
118
+ result = result[0]
119
+ else:
120
+ # this assertion isn't possible for ndarray.
121
+ assert isinstance(result, type(value))
122
+ assert result == 1
123
+
124
+
125
+ @pytest.mark.parametrize(
126
+ "value", [1, 1.0, True, np.bool_(True), np.int_(1), np.float64(1)]
127
+ )
128
+ @pytest.mark.parametrize("asarray", [True, False])
129
+ def test_rpow_special(value, asarray):
130
+ if asarray:
131
+ value = np.array([value])
132
+ result = value**NA
133
+
134
+ if asarray:
135
+ result = result[0]
136
+ elif not isinstance(value, (np.float64, np.bool_, np.int_)):
137
+ # this assertion isn't possible with asarray=True
138
+ assert isinstance(result, type(value))
139
+
140
+ assert result == value
141
+
142
+
143
+ @pytest.mark.parametrize("value", [-1, -1.0, np.int_(-1), np.float64(-1)])
144
+ @pytest.mark.parametrize("asarray", [True, False])
145
+ def test_rpow_minus_one(value, asarray):
146
+ if asarray:
147
+ value = np.array([value])
148
+ result = value**NA
149
+
150
+ if asarray:
151
+ result = result[0]
152
+
153
+ assert pd.isna(result)
154
+
155
+
156
+ def test_unary_ops():
157
+ assert +NA is NA
158
+ assert -NA is NA
159
+ assert abs(NA) is NA
160
+ assert ~NA is NA
161
+
162
+
163
+ def test_logical_and():
164
+ assert NA & True is NA
165
+ assert True & NA is NA
166
+ assert NA & False is False
167
+ assert False & NA is False
168
+ assert NA & NA is NA
169
+
170
+ msg = "unsupported operand type"
171
+ with pytest.raises(TypeError, match=msg):
172
+ NA & 5
173
+
174
+
175
+ def test_logical_or():
176
+ assert NA | True is True
177
+ assert True | NA is True
178
+ assert NA | False is NA
179
+ assert False | NA is NA
180
+ assert NA | NA is NA
181
+
182
+ msg = "unsupported operand type"
183
+ with pytest.raises(TypeError, match=msg):
184
+ NA | 5
185
+
186
+
187
+ def test_logical_xor():
188
+ assert NA ^ True is NA
189
+ assert True ^ NA is NA
190
+ assert NA ^ False is NA
191
+ assert False ^ NA is NA
192
+ assert NA ^ NA is NA
193
+
194
+ msg = "unsupported operand type"
195
+ with pytest.raises(TypeError, match=msg):
196
+ NA ^ 5
197
+
198
+
199
+ def test_logical_not():
200
+ assert ~NA is NA
201
+
202
+
203
+ @pytest.mark.parametrize("shape", [(3,), (3, 3), (1, 2, 3)])
204
+ def test_arithmetic_ndarray(shape, all_arithmetic_functions):
205
+ op = all_arithmetic_functions
206
+ a = np.zeros(shape)
207
+ if op.__name__ == "pow":
208
+ a += 5
209
+ result = op(NA, a)
210
+ expected = np.full(a.shape, NA, dtype=object)
211
+ tm.assert_numpy_array_equal(result, expected)
212
+
213
+
214
+ def test_is_scalar():
215
+ assert is_scalar(NA) is True
216
+
217
+
218
+ def test_isna():
219
+ assert pd.isna(NA) is True
220
+ assert pd.notna(NA) is False
221
+
222
+
223
+ def test_series_isna():
224
+ s = pd.Series([1, NA], dtype=object)
225
+ expected = pd.Series([False, True])
226
+ tm.assert_series_equal(s.isna(), expected)
227
+
228
+
229
+ def test_ufunc():
230
+ assert np.log(NA) is NA
231
+ assert np.add(NA, 1) is NA
232
+ result = np.divmod(NA, 1)
233
+ assert result[0] is NA and result[1] is NA
234
+
235
+ result = np.frexp(NA)
236
+ assert result[0] is NA and result[1] is NA
237
+
238
+
239
+ def test_ufunc_raises():
240
+ msg = "ufunc method 'at'"
241
+ with pytest.raises(ValueError, match=msg):
242
+ np.log.at(NA, 0)
243
+
244
+
245
+ def test_binary_input_not_dunder():
246
+ a = np.array([1, 2, 3])
247
+ expected = np.array([NA, NA, NA], dtype=object)
248
+ result = np.logaddexp(a, NA)
249
+ tm.assert_numpy_array_equal(result, expected)
250
+
251
+ result = np.logaddexp(NA, a)
252
+ tm.assert_numpy_array_equal(result, expected)
253
+
254
+ # all NA, multiple inputs
255
+ assert np.logaddexp(NA, NA) is NA
256
+
257
+ result = np.modf(NA, NA)
258
+ assert len(result) == 2
259
+ assert all(x is NA for x in result)
260
+
261
+
262
+ def test_divmod_ufunc():
263
+ # binary in, binary out.
264
+ a = np.array([1, 2, 3])
265
+ expected = np.array([NA, NA, NA], dtype=object)
266
+
267
+ result = np.divmod(a, NA)
268
+ assert isinstance(result, tuple)
269
+ for arr in result:
270
+ tm.assert_numpy_array_equal(arr, expected)
271
+ tm.assert_numpy_array_equal(arr, expected)
272
+
273
+ result = np.divmod(NA, a)
274
+ for arr in result:
275
+ tm.assert_numpy_array_equal(arr, expected)
276
+ tm.assert_numpy_array_equal(arr, expected)
277
+
278
+
279
+ def test_integer_hash_collision_dict():
280
+ # GH 30013
281
+ result = {NA: "foo", hash(NA): "bar"}
282
+
283
+ assert result[NA] == "foo"
284
+ assert result[hash(NA)] == "bar"
285
+
286
+
287
+ def test_integer_hash_collision_set():
288
+ # GH 30013
289
+ result = {NA, hash(NA)}
290
+
291
+ assert len(result) == 2
292
+ assert NA in result
293
+ assert hash(NA) in result
294
+
295
+
296
+ def test_pickle_roundtrip():
297
+ # https://github.com/pandas-dev/pandas/issues/31847
298
+ result = pickle.loads(pickle.dumps(NA))
299
+ assert result is NA
300
+
301
+
302
+ def test_pickle_roundtrip_pandas():
303
+ result = tm.round_trip_pickle(NA)
304
+ assert result is NA
305
+
306
+
307
+ @pytest.mark.parametrize(
308
+ "values, dtype", [([1, 2, NA], "Int64"), (["A", "B", NA], "string")]
309
+ )
310
+ @pytest.mark.parametrize("as_frame", [True, False])
311
+ def test_pickle_roundtrip_containers(as_frame, values, dtype):
312
+ s = pd.Series(pd.array(values, dtype=dtype))
313
+ if as_frame:
314
+ s = s.to_frame(name="A")
315
+ result = tm.round_trip_pickle(s)
316
+ tm.assert_equal(result, s)
mantis_evalkit/lib/python3.10/site-packages/pandas/tests/scalar/test_nat.py ADDED
@@ -0,0 +1,709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ datetime,
3
+ timedelta,
4
+ )
5
+ import operator
6
+
7
+ import numpy as np
8
+ import pytest
9
+ import pytz
10
+
11
+ from pandas._libs.tslibs import iNaT
12
+ from pandas.compat.numpy import np_version_gte1p24p3
13
+
14
+ from pandas import (
15
+ DatetimeIndex,
16
+ DatetimeTZDtype,
17
+ Index,
18
+ NaT,
19
+ Period,
20
+ Series,
21
+ Timedelta,
22
+ TimedeltaIndex,
23
+ Timestamp,
24
+ isna,
25
+ offsets,
26
+ )
27
+ import pandas._testing as tm
28
+ from pandas.core import roperator
29
+ from pandas.core.arrays import (
30
+ DatetimeArray,
31
+ PeriodArray,
32
+ TimedeltaArray,
33
+ )
34
+
35
+
36
+ class TestNaTFormatting:
37
+ def test_repr(self):
38
+ assert repr(NaT) == "NaT"
39
+
40
+ def test_str(self):
41
+ assert str(NaT) == "NaT"
42
+
43
+ def test_isoformat(self):
44
+ assert NaT.isoformat() == "NaT"
45
+
46
+
47
+ @pytest.mark.parametrize(
48
+ "nat,idx",
49
+ [
50
+ (Timestamp("NaT"), DatetimeArray),
51
+ (Timedelta("NaT"), TimedeltaArray),
52
+ (Period("NaT", freq="M"), PeriodArray),
53
+ ],
54
+ )
55
+ def test_nat_fields(nat, idx):
56
+ for field in idx._field_ops:
57
+ # weekday is a property of DTI, but a method
58
+ # on NaT/Timestamp for compat with datetime
59
+ if field == "weekday":
60
+ continue
61
+
62
+ result = getattr(NaT, field)
63
+ assert np.isnan(result)
64
+
65
+ result = getattr(nat, field)
66
+ assert np.isnan(result)
67
+
68
+ for field in idx._bool_ops:
69
+ result = getattr(NaT, field)
70
+ assert result is False
71
+
72
+ result = getattr(nat, field)
73
+ assert result is False
74
+
75
+
76
+ def test_nat_vector_field_access():
77
+ idx = DatetimeIndex(["1/1/2000", None, None, "1/4/2000"])
78
+
79
+ for field in DatetimeArray._field_ops:
80
+ # weekday is a property of DTI, but a method
81
+ # on NaT/Timestamp for compat with datetime
82
+ if field == "weekday":
83
+ continue
84
+
85
+ result = getattr(idx, field)
86
+ expected = Index([getattr(x, field) for x in idx])
87
+ tm.assert_index_equal(result, expected)
88
+
89
+ ser = Series(idx)
90
+
91
+ for field in DatetimeArray._field_ops:
92
+ # weekday is a property of DTI, but a method
93
+ # on NaT/Timestamp for compat with datetime
94
+ if field == "weekday":
95
+ continue
96
+
97
+ result = getattr(ser.dt, field)
98
+ expected = [getattr(x, field) for x in idx]
99
+ tm.assert_series_equal(result, Series(expected))
100
+
101
+ for field in DatetimeArray._bool_ops:
102
+ result = getattr(ser.dt, field)
103
+ expected = [getattr(x, field) for x in idx]
104
+ tm.assert_series_equal(result, Series(expected))
105
+
106
+
107
+ @pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period])
108
+ @pytest.mark.parametrize(
109
+ "value", [None, np.nan, iNaT, float("nan"), NaT, "NaT", "nat", "", "NAT"]
110
+ )
111
+ def test_identity(klass, value):
112
+ assert klass(value) is NaT
113
+
114
+
115
+ @pytest.mark.parametrize("klass", [Timestamp, Timedelta])
116
+ @pytest.mark.parametrize("method", ["round", "floor", "ceil"])
117
+ @pytest.mark.parametrize("freq", ["s", "5s", "min", "5min", "h", "5h"])
118
+ def test_round_nat(klass, method, freq):
119
+ # see gh-14940
120
+ ts = klass("nat")
121
+
122
+ round_method = getattr(ts, method)
123
+ assert round_method(freq) is ts
124
+
125
+
126
+ @pytest.mark.parametrize(
127
+ "method",
128
+ [
129
+ "astimezone",
130
+ "combine",
131
+ "ctime",
132
+ "dst",
133
+ "fromordinal",
134
+ "fromtimestamp",
135
+ "fromisocalendar",
136
+ "isocalendar",
137
+ "strftime",
138
+ "strptime",
139
+ "time",
140
+ "timestamp",
141
+ "timetuple",
142
+ "timetz",
143
+ "toordinal",
144
+ "tzname",
145
+ "utcfromtimestamp",
146
+ "utcnow",
147
+ "utcoffset",
148
+ "utctimetuple",
149
+ "timestamp",
150
+ ],
151
+ )
152
+ def test_nat_methods_raise(method):
153
+ # see gh-9513, gh-17329
154
+ msg = f"NaTType does not support {method}"
155
+
156
+ with pytest.raises(ValueError, match=msg):
157
+ getattr(NaT, method)()
158
+
159
+
160
+ @pytest.mark.parametrize("method", ["weekday", "isoweekday"])
161
+ def test_nat_methods_nan(method):
162
+ # see gh-9513, gh-17329
163
+ assert np.isnan(getattr(NaT, method)())
164
+
165
+
166
+ @pytest.mark.parametrize(
167
+ "method", ["date", "now", "replace", "today", "tz_convert", "tz_localize"]
168
+ )
169
+ def test_nat_methods_nat(method):
170
+ # see gh-8254, gh-9513, gh-17329
171
+ assert getattr(NaT, method)() is NaT
172
+
173
+
174
+ @pytest.mark.parametrize(
175
+ "get_nat", [lambda x: NaT, lambda x: Timedelta(x), lambda x: Timestamp(x)]
176
+ )
177
+ def test_nat_iso_format(get_nat):
178
+ # see gh-12300
179
+ assert get_nat("NaT").isoformat() == "NaT"
180
+ assert get_nat("NaT").isoformat(timespec="nanoseconds") == "NaT"
181
+
182
+
183
+ @pytest.mark.parametrize(
184
+ "klass,expected",
185
+ [
186
+ (Timestamp, ["normalize", "to_julian_date", "to_period", "unit"]),
187
+ (
188
+ Timedelta,
189
+ [
190
+ "components",
191
+ "resolution_string",
192
+ "to_pytimedelta",
193
+ "to_timedelta64",
194
+ "unit",
195
+ "view",
196
+ ],
197
+ ),
198
+ ],
199
+ )
200
+ def test_missing_public_nat_methods(klass, expected):
201
+ # see gh-17327
202
+ #
203
+ # NaT should have *most* of the Timestamp and Timedelta methods.
204
+ # Here, we check which public methods NaT does not have. We
205
+ # ignore any missing private methods.
206
+ nat_names = dir(NaT)
207
+ klass_names = dir(klass)
208
+
209
+ missing = [x for x in klass_names if x not in nat_names and not x.startswith("_")]
210
+ missing.sort()
211
+
212
+ assert missing == expected
213
+
214
+
215
+ def _get_overlap_public_nat_methods(klass, as_tuple=False):
216
+ """
217
+ Get overlapping public methods between NaT and another class.
218
+
219
+ Parameters
220
+ ----------
221
+ klass : type
222
+ The class to compare with NaT
223
+ as_tuple : bool, default False
224
+ Whether to return a list of tuples of the form (klass, method).
225
+
226
+ Returns
227
+ -------
228
+ overlap : list
229
+ """
230
+ nat_names = dir(NaT)
231
+ klass_names = dir(klass)
232
+
233
+ overlap = [
234
+ x
235
+ for x in nat_names
236
+ if x in klass_names and not x.startswith("_") and callable(getattr(klass, x))
237
+ ]
238
+
239
+ # Timestamp takes precedence over Timedelta in terms of overlap.
240
+ if klass is Timedelta:
241
+ ts_names = dir(Timestamp)
242
+ overlap = [x for x in overlap if x not in ts_names]
243
+
244
+ if as_tuple:
245
+ overlap = [(klass, method) for method in overlap]
246
+
247
+ overlap.sort()
248
+ return overlap
249
+
250
+
251
+ @pytest.mark.parametrize(
252
+ "klass,expected",
253
+ [
254
+ (
255
+ Timestamp,
256
+ [
257
+ "as_unit",
258
+ "astimezone",
259
+ "ceil",
260
+ "combine",
261
+ "ctime",
262
+ "date",
263
+ "day_name",
264
+ "dst",
265
+ "floor",
266
+ "fromisocalendar",
267
+ "fromisoformat",
268
+ "fromordinal",
269
+ "fromtimestamp",
270
+ "isocalendar",
271
+ "isoformat",
272
+ "isoweekday",
273
+ "month_name",
274
+ "now",
275
+ "replace",
276
+ "round",
277
+ "strftime",
278
+ "strptime",
279
+ "time",
280
+ "timestamp",
281
+ "timetuple",
282
+ "timetz",
283
+ "to_datetime64",
284
+ "to_numpy",
285
+ "to_pydatetime",
286
+ "today",
287
+ "toordinal",
288
+ "tz_convert",
289
+ "tz_localize",
290
+ "tzname",
291
+ "utcfromtimestamp",
292
+ "utcnow",
293
+ "utcoffset",
294
+ "utctimetuple",
295
+ "weekday",
296
+ ],
297
+ ),
298
+ (Timedelta, ["total_seconds"]),
299
+ ],
300
+ )
301
+ def test_overlap_public_nat_methods(klass, expected):
302
+ # see gh-17327
303
+ #
304
+ # NaT should have *most* of the Timestamp and Timedelta methods.
305
+ # In case when Timestamp, Timedelta, and NaT are overlap, the overlap
306
+ # is considered to be with Timestamp and NaT, not Timedelta.
307
+ assert _get_overlap_public_nat_methods(klass) == expected
308
+
309
+
310
+ @pytest.mark.parametrize(
311
+ "compare",
312
+ (
313
+ _get_overlap_public_nat_methods(Timestamp, True)
314
+ + _get_overlap_public_nat_methods(Timedelta, True)
315
+ ),
316
+ ids=lambda x: f"{x[0].__name__}.{x[1]}",
317
+ )
318
+ def test_nat_doc_strings(compare):
319
+ # see gh-17327
320
+ #
321
+ # The docstrings for overlapping methods should match.
322
+ klass, method = compare
323
+ klass_doc = getattr(klass, method).__doc__
324
+
325
+ if klass == Timestamp and method == "isoformat":
326
+ pytest.skip(
327
+ "Ignore differences with Timestamp.isoformat() as they're intentional"
328
+ )
329
+
330
+ if method == "to_numpy":
331
+ # GH#44460 can return either dt64 or td64 depending on dtype,
332
+ # different docstring is intentional
333
+ pytest.skip(f"different docstring for {method} is intentional")
334
+
335
+ nat_doc = getattr(NaT, method).__doc__
336
+ assert klass_doc == nat_doc
337
+
338
+
339
+ _ops = {
340
+ "left_plus_right": lambda a, b: a + b,
341
+ "right_plus_left": lambda a, b: b + a,
342
+ "left_minus_right": lambda a, b: a - b,
343
+ "right_minus_left": lambda a, b: b - a,
344
+ "left_times_right": lambda a, b: a * b,
345
+ "right_times_left": lambda a, b: b * a,
346
+ "left_div_right": lambda a, b: a / b,
347
+ "right_div_left": lambda a, b: b / a,
348
+ }
349
+
350
+
351
+ @pytest.mark.parametrize("op_name", list(_ops.keys()))
352
+ @pytest.mark.parametrize(
353
+ "value,val_type",
354
+ [
355
+ (2, "scalar"),
356
+ (1.5, "floating"),
357
+ (np.nan, "floating"),
358
+ ("foo", "str"),
359
+ (timedelta(3600), "timedelta"),
360
+ (Timedelta("5s"), "timedelta"),
361
+ (datetime(2014, 1, 1), "timestamp"),
362
+ (Timestamp("2014-01-01"), "timestamp"),
363
+ (Timestamp("2014-01-01", tz="UTC"), "timestamp"),
364
+ (Timestamp("2014-01-01", tz="US/Eastern"), "timestamp"),
365
+ (pytz.timezone("Asia/Tokyo").localize(datetime(2014, 1, 1)), "timestamp"),
366
+ ],
367
+ )
368
+ def test_nat_arithmetic_scalar(op_name, value, val_type):
369
+ # see gh-6873
370
+ invalid_ops = {
371
+ "scalar": {"right_div_left"},
372
+ "floating": {
373
+ "right_div_left",
374
+ "left_minus_right",
375
+ "right_minus_left",
376
+ "left_plus_right",
377
+ "right_plus_left",
378
+ },
379
+ "str": set(_ops.keys()),
380
+ "timedelta": {"left_times_right", "right_times_left"},
381
+ "timestamp": {
382
+ "left_times_right",
383
+ "right_times_left",
384
+ "left_div_right",
385
+ "right_div_left",
386
+ },
387
+ }
388
+
389
+ op = _ops[op_name]
390
+
391
+ if op_name in invalid_ops.get(val_type, set()):
392
+ if (
393
+ val_type == "timedelta"
394
+ and "times" in op_name
395
+ and isinstance(value, Timedelta)
396
+ ):
397
+ typs = "(Timedelta|NaTType)"
398
+ msg = rf"unsupported operand type\(s\) for \*: '{typs}' and '{typs}'"
399
+ elif val_type == "str":
400
+ # un-specific check here because the message comes from str
401
+ # and varies by method
402
+ msg = "|".join(
403
+ [
404
+ "can only concatenate str",
405
+ "unsupported operand type",
406
+ "can't multiply sequence",
407
+ "Can't convert 'NaTType'",
408
+ "must be str, not NaTType",
409
+ ]
410
+ )
411
+ else:
412
+ msg = "unsupported operand type"
413
+
414
+ with pytest.raises(TypeError, match=msg):
415
+ op(NaT, value)
416
+ else:
417
+ if val_type == "timedelta" and "div" in op_name:
418
+ expected = np.nan
419
+ else:
420
+ expected = NaT
421
+
422
+ assert op(NaT, value) is expected
423
+
424
+
425
+ @pytest.mark.parametrize(
426
+ "val,expected", [(np.nan, NaT), (NaT, np.nan), (np.timedelta64("NaT"), np.nan)]
427
+ )
428
+ def test_nat_rfloordiv_timedelta(val, expected):
429
+ # see gh-#18846
430
+ #
431
+ # See also test_timedelta.TestTimedeltaArithmetic.test_floordiv
432
+ td = Timedelta(hours=3, minutes=4)
433
+ assert td // val is expected
434
+
435
+
436
+ @pytest.mark.parametrize(
437
+ "op_name",
438
+ ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"],
439
+ )
440
+ @pytest.mark.parametrize(
441
+ "value",
442
+ [
443
+ DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"),
444
+ DatetimeIndex(["2011-01-01", "2011-01-02"], tz="US/Eastern", name="x"),
445
+ DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"], dtype="M8[ns]"),
446
+ DatetimeArray._from_sequence(
447
+ ["2011-01-01", "2011-01-02"], dtype=DatetimeTZDtype(tz="US/Pacific")
448
+ ),
449
+ TimedeltaIndex(["1 day", "2 day"], name="x"),
450
+ ],
451
+ )
452
+ def test_nat_arithmetic_index(op_name, value):
453
+ # see gh-11718
454
+ exp_name = "x"
455
+ exp_data = [NaT] * 2
456
+
457
+ if value.dtype.kind == "M" and "plus" in op_name:
458
+ expected = DatetimeIndex(exp_data, tz=value.tz, name=exp_name)
459
+ else:
460
+ expected = TimedeltaIndex(exp_data, name=exp_name)
461
+ expected = expected.as_unit(value.unit)
462
+
463
+ if not isinstance(value, Index):
464
+ expected = expected.array
465
+
466
+ op = _ops[op_name]
467
+ result = op(NaT, value)
468
+ tm.assert_equal(result, expected)
469
+
470
+
471
+ @pytest.mark.parametrize(
472
+ "op_name",
473
+ ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"],
474
+ )
475
+ @pytest.mark.parametrize("box", [TimedeltaIndex, Series, TimedeltaArray._from_sequence])
476
+ def test_nat_arithmetic_td64_vector(op_name, box):
477
+ # see gh-19124
478
+ vec = box(["1 day", "2 day"], dtype="timedelta64[ns]")
479
+ box_nat = box([NaT, NaT], dtype="timedelta64[ns]")
480
+ tm.assert_equal(_ops[op_name](vec, NaT), box_nat)
481
+
482
+
483
+ @pytest.mark.parametrize(
484
+ "dtype,op,out_dtype",
485
+ [
486
+ ("datetime64[ns]", operator.add, "datetime64[ns]"),
487
+ ("datetime64[ns]", roperator.radd, "datetime64[ns]"),
488
+ ("datetime64[ns]", operator.sub, "timedelta64[ns]"),
489
+ ("datetime64[ns]", roperator.rsub, "timedelta64[ns]"),
490
+ ("timedelta64[ns]", operator.add, "datetime64[ns]"),
491
+ ("timedelta64[ns]", roperator.radd, "datetime64[ns]"),
492
+ ("timedelta64[ns]", operator.sub, "datetime64[ns]"),
493
+ ("timedelta64[ns]", roperator.rsub, "timedelta64[ns]"),
494
+ ],
495
+ )
496
+ def test_nat_arithmetic_ndarray(dtype, op, out_dtype):
497
+ other = np.arange(10).astype(dtype)
498
+ result = op(NaT, other)
499
+
500
+ expected = np.empty(other.shape, dtype=out_dtype)
501
+ expected.fill("NaT")
502
+ tm.assert_numpy_array_equal(result, expected)
503
+
504
+
505
+ def test_nat_pinned_docstrings():
506
+ # see gh-17327
507
+ assert NaT.ctime.__doc__ == Timestamp.ctime.__doc__
508
+
509
+
510
+ def test_to_numpy_alias():
511
+ # GH 24653: alias .to_numpy() for scalars
512
+ expected = NaT.to_datetime64()
513
+ result = NaT.to_numpy()
514
+
515
+ assert isna(expected) and isna(result)
516
+
517
+ # GH#44460
518
+ result = NaT.to_numpy("M8[s]")
519
+ assert isinstance(result, np.datetime64)
520
+ assert result.dtype == "M8[s]"
521
+
522
+ result = NaT.to_numpy("m8[ns]")
523
+ assert isinstance(result, np.timedelta64)
524
+ assert result.dtype == "m8[ns]"
525
+
526
+ result = NaT.to_numpy("m8[s]")
527
+ assert isinstance(result, np.timedelta64)
528
+ assert result.dtype == "m8[s]"
529
+
530
+ with pytest.raises(ValueError, match="NaT.to_numpy dtype must be a "):
531
+ NaT.to_numpy(np.int64)
532
+
533
+
534
+ @pytest.mark.parametrize(
535
+ "other",
536
+ [
537
+ Timedelta(0),
538
+ Timedelta(0).to_pytimedelta(),
539
+ pytest.param(
540
+ Timedelta(0).to_timedelta64(),
541
+ marks=pytest.mark.xfail(
542
+ not np_version_gte1p24p3,
543
+ reason="td64 doesn't return NotImplemented, see numpy#17017",
544
+ # When this xfail is fixed, test_nat_comparisons_numpy
545
+ # can be removed.
546
+ ),
547
+ ),
548
+ Timestamp(0),
549
+ Timestamp(0).to_pydatetime(),
550
+ pytest.param(
551
+ Timestamp(0).to_datetime64(),
552
+ marks=pytest.mark.xfail(
553
+ not np_version_gte1p24p3,
554
+ reason="dt64 doesn't return NotImplemented, see numpy#17017",
555
+ ),
556
+ ),
557
+ Timestamp(0).tz_localize("UTC"),
558
+ NaT,
559
+ ],
560
+ )
561
+ def test_nat_comparisons(compare_operators_no_eq_ne, other):
562
+ # GH 26039
563
+ opname = compare_operators_no_eq_ne
564
+
565
+ assert getattr(NaT, opname)(other) is False
566
+
567
+ op = getattr(operator, opname.strip("_"))
568
+ assert op(NaT, other) is False
569
+ assert op(other, NaT) is False
570
+
571
+
572
+ @pytest.mark.parametrize("other", [np.timedelta64(0, "ns"), np.datetime64("now", "ns")])
573
+ def test_nat_comparisons_numpy(other):
574
+ # Once numpy#17017 is fixed and the xfailed cases in test_nat_comparisons
575
+ # pass, this test can be removed
576
+ assert not NaT == other
577
+ assert NaT != other
578
+ assert not NaT < other
579
+ assert not NaT > other
580
+ assert not NaT <= other
581
+ assert not NaT >= other
582
+
583
+
584
+ @pytest.mark.parametrize("other_and_type", [("foo", "str"), (2, "int"), (2.0, "float")])
585
+ @pytest.mark.parametrize(
586
+ "symbol_and_op",
587
+ [("<=", operator.le), ("<", operator.lt), (">=", operator.ge), (">", operator.gt)],
588
+ )
589
+ def test_nat_comparisons_invalid(other_and_type, symbol_and_op):
590
+ # GH#35585
591
+ other, other_type = other_and_type
592
+ symbol, op = symbol_and_op
593
+
594
+ assert not NaT == other
595
+ assert not other == NaT
596
+
597
+ assert NaT != other
598
+ assert other != NaT
599
+
600
+ msg = f"'{symbol}' not supported between instances of 'NaTType' and '{other_type}'"
601
+ with pytest.raises(TypeError, match=msg):
602
+ op(NaT, other)
603
+
604
+ msg = f"'{symbol}' not supported between instances of '{other_type}' and 'NaTType'"
605
+ with pytest.raises(TypeError, match=msg):
606
+ op(other, NaT)
607
+
608
+
609
+ @pytest.mark.parametrize(
610
+ "other",
611
+ [
612
+ np.array(["foo"] * 2, dtype=object),
613
+ np.array([2, 3], dtype="int64"),
614
+ np.array([2.0, 3.5], dtype="float64"),
615
+ ],
616
+ ids=["str", "int", "float"],
617
+ )
618
+ def test_nat_comparisons_invalid_ndarray(other):
619
+ # GH#40722
620
+ expected = np.array([False, False])
621
+ result = NaT == other
622
+ tm.assert_numpy_array_equal(result, expected)
623
+ result = other == NaT
624
+ tm.assert_numpy_array_equal(result, expected)
625
+
626
+ expected = np.array([True, True])
627
+ result = NaT != other
628
+ tm.assert_numpy_array_equal(result, expected)
629
+ result = other != NaT
630
+ tm.assert_numpy_array_equal(result, expected)
631
+
632
+ for symbol, op in [
633
+ ("<=", operator.le),
634
+ ("<", operator.lt),
635
+ (">=", operator.ge),
636
+ (">", operator.gt),
637
+ ]:
638
+ msg = f"'{symbol}' not supported between"
639
+
640
+ with pytest.raises(TypeError, match=msg):
641
+ op(NaT, other)
642
+
643
+ if other.dtype == np.dtype("object"):
644
+ # uses the reverse operator, so symbol changes
645
+ msg = None
646
+ with pytest.raises(TypeError, match=msg):
647
+ op(other, NaT)
648
+
649
+
650
+ def test_compare_date(fixed_now_ts):
651
+ # GH#39151 comparing NaT with date object is deprecated
652
+ # See also: tests.scalar.timestamps.test_comparisons::test_compare_date
653
+
654
+ dt = fixed_now_ts.to_pydatetime().date()
655
+
656
+ msg = "Cannot compare NaT with datetime.date object"
657
+ for left, right in [(NaT, dt), (dt, NaT)]:
658
+ assert not left == right
659
+ assert left != right
660
+
661
+ with pytest.raises(TypeError, match=msg):
662
+ left < right
663
+ with pytest.raises(TypeError, match=msg):
664
+ left <= right
665
+ with pytest.raises(TypeError, match=msg):
666
+ left > right
667
+ with pytest.raises(TypeError, match=msg):
668
+ left >= right
669
+
670
+
671
+ @pytest.mark.parametrize(
672
+ "obj",
673
+ [
674
+ offsets.YearEnd(2),
675
+ offsets.YearBegin(2),
676
+ offsets.MonthBegin(1),
677
+ offsets.MonthEnd(2),
678
+ offsets.MonthEnd(12),
679
+ offsets.Day(2),
680
+ offsets.Day(5),
681
+ offsets.Hour(24),
682
+ offsets.Hour(3),
683
+ offsets.Minute(),
684
+ np.timedelta64(3, "h"),
685
+ np.timedelta64(4, "h"),
686
+ np.timedelta64(3200, "s"),
687
+ np.timedelta64(3600, "s"),
688
+ np.timedelta64(3600 * 24, "s"),
689
+ np.timedelta64(2, "D"),
690
+ np.timedelta64(365, "D"),
691
+ timedelta(-2),
692
+ timedelta(365),
693
+ timedelta(minutes=120),
694
+ timedelta(days=4, minutes=180),
695
+ timedelta(hours=23),
696
+ timedelta(hours=23, minutes=30),
697
+ timedelta(hours=48),
698
+ ],
699
+ )
700
+ def test_nat_addsub_tdlike_scalar(obj):
701
+ assert NaT + obj is NaT
702
+ assert obj + NaT is NaT
703
+ assert NaT - obj is NaT
704
+
705
+
706
+ def test_pickle():
707
+ # GH#4606
708
+ p = tm.round_trip_pickle(NaT)
709
+ assert p is NaT
moondream/lib/python3.10/site-packages/gradio/components/base.pyi ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Contains all of the components that can be used with Gradio Interface / Blocks.
2
+ Along with the docs for each component, you can find the names of example demos that use
3
+ each component. These demos are located in the `demo` directory."""
4
+
5
+ from __future__ import annotations
6
+
7
+ import abc
8
+ import hashlib
9
+ import json
10
+ import sys
11
+ import warnings
12
+ from abc import ABC, abstractmethod
13
+ from enum import Enum
14
+ from pathlib import Path
15
+ from typing import TYPE_CHECKING, Any, Callable, Type
16
+
17
+ import gradio_client.utils as client_utils
18
+
19
+ from gradio import utils
20
+ from gradio.blocks import Block, BlockContext
21
+ from gradio.component_meta import ComponentMeta
22
+ from gradio.data_classes import GradioDataModel, JsonData
23
+ from gradio.events import EventListener
24
+ from gradio.layouts import Form
25
+ from gradio.processing_utils import move_files_to_cache
26
+
27
+ if TYPE_CHECKING:
28
+ from typing import TypedDict
29
+
30
+ class DataframeData(TypedDict):
31
+ headers: list[str]
32
+ data: list[list[str | int | bool]]
33
+
34
+ from gradio.components import Timer
35
+
36
+
37
+ class _Keywords(Enum):
38
+ NO_VALUE = "NO_VALUE" # Used as a sentinel to determine if nothing is provided as a argument for `value` in `Component.update()`
39
+ FINISHED_ITERATING = "FINISHED_ITERATING" # Used to skip processing of a component's value (needed for generators + state)
40
+
41
+ from gradio.events import Dependency
42
+
43
+ class ComponentBase(ABC, metaclass=ComponentMeta):
44
+ EVENTS: list[EventListener | str] = []
45
+
46
+ @abstractmethod
47
+ def preprocess(self, payload: Any) -> Any:
48
+ """
49
+ Any preprocessing needed to be performed on function input.
50
+ Parameters:
51
+ payload: The input data received by the component from the frontend.
52
+ Returns:
53
+ The preprocessed input data sent to the user's function in the backend.
54
+ """
55
+ return payload
56
+
57
+ @abstractmethod
58
+ def postprocess(self, value):
59
+ """
60
+ Any postprocessing needed to be performed on function output.
61
+ Parameters:
62
+ value: The output data received by the component from the user's function in the backend.
63
+ Returns:
64
+ The postprocessed output data sent to the frontend.
65
+ """
66
+ return value
67
+
68
+ @abstractmethod
69
+ def process_example(self, value):
70
+ """
71
+ Process the input data in a way that can be displayed by the examples dataset component in the front-end.
72
+
73
+ For example, only return the name of a file as opposed to a full path. Or get the head of a dataframe.
74
+ The return value must be able to be json-serializable to put in the config.
75
+ """
76
+ pass
77
+
78
+ @abstractmethod
79
+ def api_info(self) -> dict[str, list[str]]:
80
+ """
81
+ The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description].
82
+ Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output
83
+ """
84
+ pass
85
+
86
+ @abstractmethod
87
+ def example_inputs(self) -> Any:
88
+ """
89
+ Deprecated and replaced by `example_payload()` and `example_value()`.
90
+ """
91
+ pass
92
+
93
+ @abstractmethod
94
+ def flag(self, payload: Any | GradioDataModel, flag_dir: str | Path = "") -> str:
95
+ """
96
+ Write the component's value to a format that can be stored in a csv or jsonl format for flagging.
97
+ """
98
+ pass
99
+
100
+ @abstractmethod
101
+ def read_from_flag(self, payload: Any) -> GradioDataModel | Any:
102
+ """
103
+ Convert the data from the csv or jsonl file into the component state.
104
+ """
105
+ return payload
106
+
107
+ @property
108
+ @abstractmethod
109
+ def skip_api(self):
110
+ """Whether this component should be skipped from the api return value"""
111
+
112
+ @classmethod
113
+ def has_event(cls, event: str | EventListener) -> bool:
114
+ return event in cls.EVENTS
115
+
116
+ @classmethod
117
+ def get_component_class_id(cls) -> str:
118
+ module_name = cls.__module__
119
+ module_path = sys.modules[module_name].__file__
120
+ module_hash = hashlib.md5(f"{cls.__name__}_{module_path}".encode()).hexdigest()
121
+ return module_hash
122
+
123
+
124
+ def server(fn):
125
+ fn._is_server_fn = True
126
+ return fn
127
+
128
+
129
+ class Component(ComponentBase, Block):
130
+ """
131
+ A base class for defining methods that all input/output components should have.
132
+ """
133
+
134
+ def __init__(
135
+ self,
136
+ value: Any = None,
137
+ *,
138
+ label: str | None = None,
139
+ info: str | None = None,
140
+ show_label: bool | None = None,
141
+ container: bool = True,
142
+ scale: int | None = None,
143
+ min_width: int | None = None,
144
+ interactive: bool | None = None,
145
+ visible: bool = True,
146
+ elem_id: str | None = None,
147
+ elem_classes: list[str] | str | None = None,
148
+ render: bool = True,
149
+ key: int | str | None = None,
150
+ load_fn: Callable | None = None,
151
+ every: Timer | float | None = None,
152
+ inputs: Component | list[Component] | set[Component] | None = None,
153
+ ):
154
+ self.server_fns = [
155
+ getattr(self, value)
156
+ for value in dir(self.__class__)
157
+ if callable(getattr(self, value))
158
+ and getattr(getattr(self, value), "_is_server_fn", False)
159
+ ]
160
+
161
+ # Svelte components expect elem_classes to be a list
162
+ # If we don't do this, returning a new component for an
163
+ # update will break the frontend
164
+ if not elem_classes:
165
+ elem_classes = []
166
+
167
+ # This gets overridden when `select` is called
168
+ self._selectable = False
169
+ if not hasattr(self, "data_model"):
170
+ self.data_model: Type[GradioDataModel] | None = None
171
+
172
+ Block.__init__(
173
+ self,
174
+ elem_id=elem_id,
175
+ elem_classes=elem_classes,
176
+ visible=visible,
177
+ render=render,
178
+ key=key,
179
+ )
180
+ if isinstance(self, StreamingInput):
181
+ self.check_streamable()
182
+
183
+ self.label = label
184
+ self.info = info
185
+ if not container:
186
+ if show_label:
187
+ warnings.warn("show_label has no effect when container is False.")
188
+ show_label = False
189
+ if show_label is None:
190
+ show_label = True
191
+ self.show_label = show_label
192
+ self.container = container
193
+ if scale is not None and scale != round(scale):
194
+ warnings.warn(
195
+ f"'scale' value should be an integer. Using {scale} will cause issues."
196
+ )
197
+ self.scale = scale
198
+ self.min_width = min_width
199
+ self.interactive = interactive
200
+
201
+ # load_event is set in the Blocks.attach_load_events method
202
+ self.load_event: None | dict[str, Any] = None
203
+ self.load_event_to_attach: (
204
+ None
205
+ | tuple[
206
+ Callable,
207
+ list[tuple[Block, str]],
208
+ Component | list[Component] | set[Component] | None,
209
+ ]
210
+ ) = None
211
+ load_fn, initial_value = self.get_load_fn_and_initial_value(value, inputs)
212
+ initial_value = self.postprocess(initial_value)
213
+ self.value = move_files_to_cache(
214
+ initial_value,
215
+ self, # type: ignore
216
+ postprocess=True,
217
+ keep_in_cache=True,
218
+ )
219
+ if client_utils.is_file_obj(self.value):
220
+ self.keep_in_cache.add(self.value["path"])
221
+
222
+ if callable(load_fn):
223
+ self.attach_load_event(load_fn, every, inputs)
224
+
225
+ self.component_class_id = self.__class__.get_component_class_id()
226
+
227
+ TEMPLATE_DIR = "./templates/"
228
+ FRONTEND_DIR = "../../frontend/"
229
+
230
+ def get_config(self):
231
+ config = super().get_config()
232
+ if self.info:
233
+ config["info"] = self.info
234
+ if len(self.server_fns):
235
+ config["server_fns"] = [fn.__name__ for fn in self.server_fns]
236
+ config.pop("render", None)
237
+ return config
238
+
239
+ @property
240
+ def skip_api(self):
241
+ return False
242
+
243
+ @staticmethod
244
+ def get_load_fn_and_initial_value(value, inputs=None):
245
+ initial_value = None
246
+ if callable(value):
247
+ if not inputs:
248
+ initial_value = value()
249
+ load_fn = value
250
+ else:
251
+ initial_value = value
252
+ load_fn = None
253
+ return load_fn, initial_value
254
+
255
+ def attach_load_event(
256
+ self,
257
+ callable: Callable,
258
+ every: Timer | float | None,
259
+ inputs: Component | list[Component] | set[Component] | None = None,
260
+ ):
261
+ """Add an event that runs `callable`, optionally at interval specified by `every`."""
262
+ if isinstance(inputs, Component):
263
+ inputs = [inputs]
264
+ changeable_events: list[tuple[Block, str]] = (
265
+ [(i, "change") for i in inputs if hasattr(i, "change")] if inputs else []
266
+ )
267
+ if isinstance(every, (int, float)):
268
+ from gradio.components import Timer
269
+
270
+ every = Timer(every)
271
+ if every:
272
+ changeable_events.append((every, "tick"))
273
+ self.load_event_to_attach = (
274
+ callable,
275
+ changeable_events,
276
+ inputs,
277
+ )
278
+
279
+ def process_example(self, value):
280
+ """
281
+ Process the input data in a way that can be displayed by the examples dataset component in the front-end.
282
+ By default, this calls the `.postprocess()` method of the component. However, if the `.postprocess()` method is
283
+ computationally intensive, or returns a large payload, a custom implementation may be appropriate.
284
+
285
+ For example, the `process_example()` method of the `gr.Audio()` component only returns the name of the file, not
286
+ the processed audio file. The `.process_example()` method of the `gr.Dataframe()` returns the head of a dataframe
287
+ instead of the full dataframe.
288
+
289
+ The return value of this method must be json-serializable to put in the config.
290
+ """
291
+ return self.postprocess(value)
292
+
293
+ def as_example(self, value):
294
+ """Deprecated and replaced by `process_example()`."""
295
+ return self.process_example(value)
296
+
297
+ def example_inputs(self) -> Any:
298
+ """Deprecated and replaced by `example_payload()` and `example_value()`."""
299
+ return self.example_payload()
300
+
301
+ def example_payload(self) -> Any:
302
+ """
303
+ An example input data for this component, e.g. what is passed to this component's preprocess() method.
304
+ This is used to generate the docs for the View API page for Gradio apps using this component.
305
+ """
306
+ raise NotImplementedError()
307
+
308
+ def example_value(self) -> Any:
309
+ """
310
+ An example output data for this component, e.g. what is passed to this component's postprocess() method.
311
+ This is used to generate an example value if this component is used as a template for a custom component.
312
+ """
313
+ raise NotImplementedError()
314
+
315
+ def api_info(self) -> dict[str, Any]:
316
+ """
317
+ The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description].
318
+ Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output
319
+ """
320
+ if self.data_model is not None:
321
+ return self.data_model.model_json_schema()
322
+ raise NotImplementedError(
323
+ f"The api_info method has not been implemented for {self.get_block_name()}"
324
+ )
325
+
326
+ def flag(self, payload: Any, flag_dir: str | Path = "") -> str:
327
+ """
328
+ Write the component's value to a format that can be stored in a csv or jsonl format for flagging.
329
+ """
330
+ if self.data_model:
331
+ payload = self.data_model.from_json(payload)
332
+ Path(flag_dir).mkdir(exist_ok=True)
333
+ payload = payload.copy_to_dir(flag_dir).model_dump()
334
+ if isinstance(payload, JsonData):
335
+ payload = payload.model_dump()
336
+ if not isinstance(payload, str):
337
+ payload = json.dumps(payload)
338
+ return payload
339
+
340
+ def read_from_flag(self, payload: Any):
341
+ """
342
+ Convert the data from the csv or jsonl file into the component state.
343
+ """
344
+ if self.data_model:
345
+ return self.data_model.from_json(json.loads(payload))
346
+ return payload
347
+
348
+
349
+ class FormComponent(Component):
350
+ def get_expected_parent(self) -> type[Form] | None:
351
+ if getattr(self, "container", None) is False:
352
+ return None
353
+ return Form
354
+
355
+ def preprocess(self, payload: Any) -> Any:
356
+ return payload
357
+
358
+ def postprocess(self, value):
359
+ return value
360
+
361
+
362
+ class StreamingOutput(metaclass=abc.ABCMeta):
363
+ def __init__(self, *args, **kwargs) -> None:
364
+ super().__init__(*args, **kwargs)
365
+ self.streaming: bool
366
+
367
+ @abc.abstractmethod
368
+ def stream_output(
369
+ self, value, output_id: str, first_chunk: bool
370
+ ) -> tuple[bytes, Any]:
371
+ pass
372
+
373
+
374
+ class StreamingInput(metaclass=abc.ABCMeta):
375
+ def __init__(self, *args, **kwargs) -> None:
376
+ super().__init__(*args, **kwargs)
377
+
378
+ @abc.abstractmethod
379
+ def check_streamable(self):
380
+ """Used to check if streaming is supported given the input."""
381
+ pass
382
+
383
+
384
+ def component(cls_name: str, render: bool) -> Component:
385
+ obj = utils.component_or_layout_class(cls_name)(render=render)
386
+ if isinstance(obj, BlockContext):
387
+ raise ValueError(f"Invalid component: {obj.__class__}")
388
+ if not isinstance(obj, Component):
389
+ raise TypeError(f"Expected a Component instance, but got {obj.__class__}")
390
+ return obj
391
+
392
+
393
+ def get_component_instance(
394
+ comp: str | dict | Component, render: bool = False, unrender: bool = False
395
+ ) -> Component:
396
+ """
397
+ Returns a component instance from a string, dict, or Component object.
398
+ Parameters:
399
+ comp: the component to instantiate. If a string, must be the name of a component, e.g. "dropdown". If a dict, must have a "name" key, e.g. {"name": "dropdown", "choices": ["a", "b"]}. If a Component object, will be returned as is.
400
+ render: whether to render the component. If True, renders the component (if not already rendered). If False, does not do anything.
401
+ unrender: whether to unrender the component. If True, unrenders the the component (if already rendered) -- this is useful when constructing an Interface or ChatInterface inside of a Blocks. If False, does not do anything.
402
+ """
403
+ if isinstance(comp, str):
404
+ component_obj = component(comp, render=render)
405
+ elif isinstance(comp, dict):
406
+ name = comp.pop("name")
407
+ component_cls = utils.component_or_layout_class(name)
408
+ component_obj = component_cls(**comp, render=render)
409
+ if isinstance(component_obj, BlockContext):
410
+ raise ValueError(f"Invalid component: {name}")
411
+ elif isinstance(comp, Component):
412
+ component_obj = comp
413
+ else:
414
+ raise ValueError(
415
+ f"Component must provided as a `str` or `dict` or `Component` but is {comp}"
416
+ )
417
+
418
+ if render and not component_obj.is_rendered:
419
+ component_obj.render()
420
+ elif unrender and component_obj.is_rendered:
421
+ component_obj.unrender()
422
+ if not isinstance(component_obj, Component):
423
+ raise TypeError(
424
+ f"Expected a Component instance, but got {component_obj.__class__}"
425
+ )
426
+ return component_obj
moondream/lib/python3.10/site-packages/gradio/components/datetime.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """gr.DateTime() component."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import re
6
+ from datetime import datetime, timedelta
7
+ from typing import Any, Literal
8
+
9
+ import pytz
10
+ from gradio_client.documentation import document
11
+
12
+ from gradio.components.base import FormComponent
13
+ from gradio.events import Events
14
+
15
+
16
+ @document()
17
+ class DateTime(FormComponent):
18
+ """
19
+ Component to select a date and (optionally) a time.
20
+ """
21
+
22
+ EVENTS = [
23
+ Events.change,
24
+ Events.submit,
25
+ ]
26
+
27
+ def __init__(
28
+ self,
29
+ value: float | str | datetime | None = None,
30
+ *,
31
+ include_time: bool = True,
32
+ type: Literal["timestamp", "datetime", "string"] = "timestamp",
33
+ timezone: str | None = None,
34
+ label: str | None = None,
35
+ show_label: bool | None = None,
36
+ info: str | None = None,
37
+ every: float | None = None,
38
+ scale: int | None = None,
39
+ min_width: int = 160,
40
+ visible: bool = True,
41
+ elem_id: str | None = None,
42
+ elem_classes: list[str] | str | None = None,
43
+ render: bool = True,
44
+ key: int | str | None = None,
45
+ ):
46
+ """
47
+ Parameters:
48
+ value: default value for datetime.
49
+ label: The label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.
50
+ show_label: if True, will display label.
51
+ include_time: If True, the component will include time selection. If False, only date selection will be available.
52
+ type: The type of the value. Can be "timestamp", "datetime", or "string". If "timestamp", the value will be a number representing the start and end date in seconds since epoch. If "datetime", the value will be a datetime object. If "string", the value will be the date entered by the user.
53
+ timezone: The timezone to use for timestamps, such as "US/Pacific" or "Europe/Paris". If None, the timezone will be the local timezone.
54
+ info: additional component description.
55
+ every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
56
+ scale: relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.
57
+ min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
58
+ visible: If False, component will be hidden.
59
+ elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
60
+ render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.
61
+ key: if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.
62
+ """
63
+ super().__init__(
64
+ every=every,
65
+ scale=scale,
66
+ min_width=min_width,
67
+ visible=visible,
68
+ label=label,
69
+ show_label=show_label,
70
+ info=info,
71
+ elem_id=elem_id,
72
+ elem_classes=elem_classes,
73
+ render=render,
74
+ key=key,
75
+ value=value,
76
+ )
77
+ self.type = type
78
+ self.include_time = include_time
79
+ self.time_format = "%Y-%m-%d %H:%M:%S" if include_time else "%Y-%m-%d"
80
+ self.timezone = timezone
81
+
82
+ def preprocess(self, payload: str | None) -> str | float | datetime | None:
83
+ """
84
+ Parameters:
85
+ payload: the text entered in the textarea.
86
+ Returns:
87
+ Passes text value as a {str} into the function.
88
+ """
89
+ if payload is None or payload == "":
90
+ return None
91
+ if self.type == "string" and "now" not in payload:
92
+ return payload
93
+ datetime = self.get_datetime_from_str(payload)
94
+ if self.type == "string":
95
+ return datetime.strftime(self.time_format)
96
+ if self.type == "datetime":
97
+ return datetime
98
+ elif self.type == "timestamp":
99
+ return datetime.timestamp()
100
+
101
+ def postprocess(self, value: float | datetime | str | None) -> str | None:
102
+ """
103
+ Parameters:
104
+ value: Expects a tuple pair of datetimes.
105
+ Returns:
106
+ A tuple pair of timestamps.
107
+ """
108
+ if value is None:
109
+ return None
110
+
111
+ if isinstance(value, datetime):
112
+ return datetime.strftime(value, self.time_format)
113
+ elif isinstance(value, str):
114
+ return value
115
+ else:
116
+ return datetime.fromtimestamp(
117
+ value, tz=pytz.timezone(self.timezone) if self.timezone else None
118
+ ).strftime(self.time_format)
119
+
120
+ def api_info(self) -> dict[str, Any]:
121
+ return {
122
+ "type": "string",
123
+ "description": f"Formatted as YYYY-MM-DD{' HH:MM:SS' if self.include_time else ''}",
124
+ }
125
+
126
+ def example_payload(self) -> str:
127
+ return "2020-10-01 05:20:15"
128
+
129
+ def example_value(self) -> str:
130
+ return "2020-10-01 05:20:15"
131
+
132
+ def get_datetime_from_str(self, date: str) -> datetime:
133
+ now_regex = r"^(?:\s*now\s*(?:-\s*(\d+)\s*([dmhs]))?)?\s*$"
134
+
135
+ if "now" in date:
136
+ match = re.match(now_regex, date)
137
+ if match:
138
+ num = int(match.group(1) or 0)
139
+ unit = match.group(2) or "s"
140
+ if unit == "d":
141
+ delta = timedelta(days=num)
142
+ elif unit == "h":
143
+ delta = timedelta(hours=num)
144
+ elif unit == "m":
145
+ delta = timedelta(minutes=num)
146
+ else:
147
+ delta = timedelta(seconds=num)
148
+ return datetime.now() - delta
149
+ else:
150
+ raise ValueError("Invalid 'now' time format")
151
+ else:
152
+ dt = datetime.strptime(date, self.time_format)
153
+ if self.timezone:
154
+ dt = pytz.timezone(self.timezone).localize(dt)
155
+ return dt
moondream/lib/python3.10/site-packages/gradio/components/dropdown.pyi ADDED
@@ -0,0 +1,462 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """gr.Dropdown() component."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import warnings
6
+ from typing import TYPE_CHECKING, Any, Callable, Literal
7
+
8
+ from gradio_client.documentation import document
9
+
10
+ from gradio.components.base import Component, FormComponent
11
+ from gradio.events import Events
12
+
13
+ if TYPE_CHECKING:
14
+ from gradio.components import Timer
15
+
16
+ from gradio.events import Dependency
17
+
18
+ @document()
19
+ class Dropdown(FormComponent):
20
+ """
21
+ Creates a dropdown of choices from which a single entry or multiple entries can be selected (as an input component) or displayed (as an output component).
22
+
23
+ Demos: sentence_builder
24
+ """
25
+
26
+ EVENTS = [
27
+ Events.change,
28
+ Events.input,
29
+ Events.select,
30
+ Events.focus,
31
+ Events.blur,
32
+ Events.key_up,
33
+ ]
34
+
35
+ def __init__(
36
+ self,
37
+ choices: list[str | int | float | tuple[str, str | int | float]] | None = None,
38
+ *,
39
+ value: str | int | float | list[str | int | float] | Callable | None = None,
40
+ type: Literal["value", "index"] = "value",
41
+ multiselect: bool | None = None,
42
+ allow_custom_value: bool = False,
43
+ max_choices: int | None = None,
44
+ filterable: bool = True,
45
+ label: str | None = None,
46
+ info: str | None = None,
47
+ every: Timer | float | None = None,
48
+ inputs: Component | list[Component] | set[Component] | None = None,
49
+ show_label: bool | None = None,
50
+ container: bool = True,
51
+ scale: int | None = None,
52
+ min_width: int = 160,
53
+ interactive: bool | None = None,
54
+ visible: bool = True,
55
+ elem_id: str | None = None,
56
+ elem_classes: list[str] | str | None = None,
57
+ render: bool = True,
58
+ key: int | str | None = None,
59
+ ):
60
+ """
61
+ Parameters:
62
+ choices: A list of string options to choose from. An option can also be a tuple of the form (name, value), where name is the displayed name of the dropdown choice and value is the value to be passed to the function, or returned by the function.
63
+ value: default value(s) selected in dropdown. If None, no value is selected by default. If callable, the function will be called whenever the app loads to set the initial value of the component.
64
+ type: Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
65
+ multiselect: if True, multiple choices can be selected.
66
+ allow_custom_value: If True, allows user to enter a custom value that is not in the list of choices.
67
+ max_choices: maximum number of choices that can be selected. If None, no limit is enforced.
68
+ filterable: If True, user will be able to type into the dropdown and filter the choices by typing. Can only be set to False if `allow_custom_value` is False.
69
+ label: The label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.
70
+ info: additional component description.
71
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
72
+ inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change.
73
+ show_label: if True, will display label.
74
+ container: If True, will place the component in a container - providing some extra padding around the border.
75
+ scale: relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.
76
+ min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
77
+ interactive: if True, choices in this dropdown will be selectable; if False, selection will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
78
+ visible: If False, component will be hidden.
79
+ elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
80
+ elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
81
+ render: If False, component will not be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.
82
+ """
83
+ self.choices = (
84
+ # Although we expect choices to be a list of tuples, it can be a list of tuples if the Gradio app
85
+ # is loaded with gr.load() since Python tuples are converted to lists in JSON.
86
+ [tuple(c) if isinstance(c, (tuple, list)) else (str(c), c) for c in choices]
87
+ if choices
88
+ else []
89
+ )
90
+ valid_types = ["value", "index"]
91
+ if type not in valid_types:
92
+ raise ValueError(
93
+ f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}"
94
+ )
95
+ self.type = type
96
+ self.multiselect = multiselect
97
+ if multiselect and isinstance(value, str):
98
+ value = [value]
99
+ if not multiselect and max_choices is not None:
100
+ warnings.warn(
101
+ "The `max_choices` parameter is ignored when `multiselect` is False."
102
+ )
103
+ if not filterable and allow_custom_value:
104
+ filterable = True
105
+ warnings.warn(
106
+ "The `filterable` parameter cannot be set to False when `allow_custom_value` is True. Setting `filterable` to True."
107
+ )
108
+ self.max_choices = max_choices
109
+ self.allow_custom_value = allow_custom_value
110
+ self.filterable = filterable
111
+ super().__init__(
112
+ label=label,
113
+ info=info,
114
+ every=every,
115
+ inputs=inputs,
116
+ show_label=show_label,
117
+ container=container,
118
+ scale=scale,
119
+ min_width=min_width,
120
+ interactive=interactive,
121
+ visible=visible,
122
+ elem_id=elem_id,
123
+ elem_classes=elem_classes,
124
+ render=render,
125
+ key=key,
126
+ value=value,
127
+ )
128
+
129
+ def api_info(self) -> dict[str, Any]:
130
+ if self.multiselect:
131
+ json_type = {
132
+ "type": "array",
133
+ "items": {"type": "string", "enum": [c[1] for c in self.choices]},
134
+ }
135
+ else:
136
+ json_type = {
137
+ "type": "string",
138
+ "enum": [c[1] for c in self.choices],
139
+ }
140
+ return json_type
141
+
142
+ def example_payload(self) -> Any:
143
+ if self.multiselect:
144
+ return [self.choices[0][1]] if self.choices else []
145
+ else:
146
+ return self.choices[0][1] if self.choices else None
147
+
148
+ def example_value(self) -> Any:
149
+ if self.multiselect:
150
+ return [self.choices[0][1]] if self.choices else []
151
+ else:
152
+ return self.choices[0][1] if self.choices else None
153
+
154
+ def preprocess(
155
+ self, payload: str | int | float | list[str | int | float] | None
156
+ ) -> str | int | float | list[str | int | float] | list[int | None] | None:
157
+ """
158
+ Parameters:
159
+ payload: the value of the selected dropdown choice(s)
160
+ Returns:
161
+ Passes the value of the selected dropdown choice as a `str | int | float` or its index as an `int` into the function, depending on `type`. Or, if `multiselect` is True, passes the values of the selected dropdown choices as a list of correspoding values/indices instead.
162
+ """
163
+ if self.type == "value":
164
+ return payload
165
+ elif self.type == "index":
166
+ choice_values = [value for _, value in self.choices]
167
+ if payload is None:
168
+ return None
169
+ elif self.multiselect:
170
+ if not isinstance(payload, list):
171
+ raise TypeError("Multiselect dropdown payload must be a list")
172
+ return [
173
+ choice_values.index(choice) if choice in choice_values else None
174
+ for choice in payload
175
+ ]
176
+ else:
177
+ return (
178
+ choice_values.index(payload) if payload in choice_values else None
179
+ )
180
+ else:
181
+ raise ValueError(
182
+ f"Unknown type: {self.type}. Please choose from: 'value', 'index'."
183
+ )
184
+
185
+ def _warn_if_invalid_choice(self, value):
186
+ if self.allow_custom_value or value in [value for _, value in self.choices]:
187
+ return
188
+ warnings.warn(
189
+ f"The value passed into gr.Dropdown() is not in the list of choices. Please update the list of choices to include: {value} or set allow_custom_value=True."
190
+ )
191
+
192
+ def postprocess(
193
+ self, value: str | int | float | list[str | int | float] | None
194
+ ) -> str | int | float | list[str | int | float] | None:
195
+ """
196
+ Parameters:
197
+ value: Expects a `str | int | float` corresponding to the value of the dropdown entry to be selected. Or, if `multiselect` is True, expects a `list` of values corresponding to the selected dropdown entries.
198
+ Returns:
199
+ Returns the values of the selected dropdown entry or entries.
200
+ """
201
+ if value is None:
202
+ return None
203
+ if self.multiselect:
204
+ if not isinstance(value, list):
205
+ value = [value]
206
+ [self._warn_if_invalid_choice(_y) for _y in value]
207
+ else:
208
+ self._warn_if_invalid_choice(value)
209
+ return value
210
+
211
+
212
+ def change(self,
213
+ fn: Callable | None,
214
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
215
+ outputs: Component | Sequence[Component] | None = None,
216
+ api_name: str | None | Literal[False] = None,
217
+ scroll_to_output: bool = False,
218
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
219
+ queue: bool | None = None,
220
+ batch: bool = False,
221
+ max_batch_size: int = 4,
222
+ preprocess: bool = True,
223
+ postprocess: bool = True,
224
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
225
+ every: Timer | float | None = None,
226
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
227
+ js: str | None = None,
228
+ concurrency_limit: int | None | Literal["default"] = "default",
229
+ concurrency_id: str | None = None,
230
+ show_api: bool = True) -> Dependency:
231
+ """
232
+ Parameters:
233
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
234
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
235
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
236
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
237
+ scroll_to_output: If True, will scroll to output component on completion
238
+ show_progress: If True, will show progress animation while pending
239
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
240
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
241
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
242
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
243
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
244
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
245
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
246
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
247
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
248
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
249
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
250
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
251
+ """
252
+ ...
253
+
254
+ def input(self,
255
+ fn: Callable | None,
256
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
257
+ outputs: Component | Sequence[Component] | None = None,
258
+ api_name: str | None | Literal[False] = None,
259
+ scroll_to_output: bool = False,
260
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
261
+ queue: bool | None = None,
262
+ batch: bool = False,
263
+ max_batch_size: int = 4,
264
+ preprocess: bool = True,
265
+ postprocess: bool = True,
266
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
267
+ every: Timer | float | None = None,
268
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
269
+ js: str | None = None,
270
+ concurrency_limit: int | None | Literal["default"] = "default",
271
+ concurrency_id: str | None = None,
272
+ show_api: bool = True) -> Dependency:
273
+ """
274
+ Parameters:
275
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
276
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
277
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
278
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
279
+ scroll_to_output: If True, will scroll to output component on completion
280
+ show_progress: If True, will show progress animation while pending
281
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
282
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
283
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
284
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
285
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
286
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
287
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
288
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
289
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
290
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
291
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
292
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
293
+ """
294
+ ...
295
+
296
+ def select(self,
297
+ fn: Callable | None,
298
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
299
+ outputs: Component | Sequence[Component] | None = None,
300
+ api_name: str | None | Literal[False] = None,
301
+ scroll_to_output: bool = False,
302
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
303
+ queue: bool | None = None,
304
+ batch: bool = False,
305
+ max_batch_size: int = 4,
306
+ preprocess: bool = True,
307
+ postprocess: bool = True,
308
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
309
+ every: Timer | float | None = None,
310
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
311
+ js: str | None = None,
312
+ concurrency_limit: int | None | Literal["default"] = "default",
313
+ concurrency_id: str | None = None,
314
+ show_api: bool = True) -> Dependency:
315
+ """
316
+ Parameters:
317
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
318
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
319
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
320
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
321
+ scroll_to_output: If True, will scroll to output component on completion
322
+ show_progress: If True, will show progress animation while pending
323
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
324
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
325
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
326
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
327
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
328
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
329
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
330
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
331
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
332
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
333
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
334
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
335
+ """
336
+ ...
337
+
338
+ def focus(self,
339
+ fn: Callable | None,
340
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
341
+ outputs: Component | Sequence[Component] | None = None,
342
+ api_name: str | None | Literal[False] = None,
343
+ scroll_to_output: bool = False,
344
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
345
+ queue: bool | None = None,
346
+ batch: bool = False,
347
+ max_batch_size: int = 4,
348
+ preprocess: bool = True,
349
+ postprocess: bool = True,
350
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
351
+ every: Timer | float | None = None,
352
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
353
+ js: str | None = None,
354
+ concurrency_limit: int | None | Literal["default"] = "default",
355
+ concurrency_id: str | None = None,
356
+ show_api: bool = True) -> Dependency:
357
+ """
358
+ Parameters:
359
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
360
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
361
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
362
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
363
+ scroll_to_output: If True, will scroll to output component on completion
364
+ show_progress: If True, will show progress animation while pending
365
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
366
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
367
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
368
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
369
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
370
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
371
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
372
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
373
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
374
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
375
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
376
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
377
+ """
378
+ ...
379
+
380
+ def blur(self,
381
+ fn: Callable | None,
382
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
383
+ outputs: Component | Sequence[Component] | None = None,
384
+ api_name: str | None | Literal[False] = None,
385
+ scroll_to_output: bool = False,
386
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
387
+ queue: bool | None = None,
388
+ batch: bool = False,
389
+ max_batch_size: int = 4,
390
+ preprocess: bool = True,
391
+ postprocess: bool = True,
392
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
393
+ every: Timer | float | None = None,
394
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
395
+ js: str | None = None,
396
+ concurrency_limit: int | None | Literal["default"] = "default",
397
+ concurrency_id: str | None = None,
398
+ show_api: bool = True) -> Dependency:
399
+ """
400
+ Parameters:
401
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
402
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
403
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
404
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
405
+ scroll_to_output: If True, will scroll to output component on completion
406
+ show_progress: If True, will show progress animation while pending
407
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
408
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
409
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
410
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
411
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
412
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
413
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
414
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
415
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
416
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
417
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
418
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
419
+ """
420
+ ...
421
+
422
+ def key_up(self,
423
+ fn: Callable | None,
424
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
425
+ outputs: Component | Sequence[Component] | None = None,
426
+ api_name: str | None | Literal[False] = None,
427
+ scroll_to_output: bool = False,
428
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
429
+ queue: bool | None = None,
430
+ batch: bool = False,
431
+ max_batch_size: int = 4,
432
+ preprocess: bool = True,
433
+ postprocess: bool = True,
434
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
435
+ every: Timer | float | None = None,
436
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
437
+ js: str | None = None,
438
+ concurrency_limit: int | None | Literal["default"] = "default",
439
+ concurrency_id: str | None = None,
440
+ show_api: bool = True) -> Dependency:
441
+ """
442
+ Parameters:
443
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
444
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
445
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
446
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
447
+ scroll_to_output: If True, will scroll to output component on completion
448
+ show_progress: If True, will show progress animation while pending
449
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
450
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
451
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
452
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
453
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
454
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
455
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
456
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
457
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
458
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
459
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
460
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
461
+ """
462
+ ...
moondream/lib/python3.10/site-packages/gradio/components/fallback.pyi ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from gradio.components.base import Component
2
+
3
+ from gradio.events import Dependency
4
+
5
+ class Fallback(Component):
6
+ def preprocess(self, payload):
7
+ """
8
+ This docstring is used to generate the docs for this custom component.
9
+ Parameters:
10
+ payload: the data to be preprocessed, sent from the frontend
11
+ Returns:
12
+ the data after preprocessing, sent to the user's function in the backend
13
+ """
14
+ return payload
15
+
16
+ def postprocess(self, value):
17
+ """
18
+ This docstring is used to generate the docs for this custom component.
19
+ Parameters:
20
+ payload: the data to be postprocessed, sent from the user's function in the backend
21
+ Returns:
22
+ the data after postprocessing, sent to the frontend
23
+ """
24
+ return value
25
+
26
+ def example_payload(self):
27
+ return {"foo": "bar"}
28
+
29
+ def example_value(self):
30
+ return {"foo": "bar"}
31
+
32
+ def api_info(self):
33
+ return {"type": {}, "description": "any valid json"}
moondream/lib/python3.10/site-packages/gradio/components/file_explorer.pyi ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """gr.FileExplorer() component"""
2
+
3
+ from __future__ import annotations
4
+
5
+ import fnmatch
6
+ import os
7
+ import warnings
8
+ from pathlib import Path
9
+ from typing import TYPE_CHECKING, Any, Callable, List, Literal
10
+
11
+ from gradio_client.documentation import document
12
+
13
+ from gradio.components.base import Component, server
14
+ from gradio.data_classes import GradioRootModel
15
+
16
+ if TYPE_CHECKING:
17
+ from gradio.components import Timer
18
+
19
+
20
+ class FileExplorerData(GradioRootModel):
21
+ root: List[List[str]]
22
+
23
+ from gradio.events import Dependency
24
+
25
+ @document()
26
+ class FileExplorer(Component):
27
+ """
28
+ Creates a file explorer component that allows users to browse files on the machine hosting the Gradio app. As an input component,
29
+ it also allows users to select files to be used as input to a function, while as an output component, it displays selected files.
30
+
31
+ Demos: file_explorer
32
+ """
33
+
34
+ EVENTS = ["change"]
35
+ data_model = FileExplorerData
36
+
37
+ def __init__(
38
+ self,
39
+ glob: str = "**/*",
40
+ *,
41
+ value: str | list[str] | Callable | None = None,
42
+ file_count: Literal["single", "multiple"] = "multiple",
43
+ root_dir: str | Path = ".",
44
+ ignore_glob: str | None = None,
45
+ label: str | None = None,
46
+ every: Timer | float | None = None,
47
+ inputs: Component | list[Component] | set[Component] | None = None,
48
+ show_label: bool | None = None,
49
+ container: bool = True,
50
+ scale: int | None = None,
51
+ min_width: int = 160,
52
+ height: int | float | str | None = None,
53
+ interactive: bool | None = None,
54
+ visible: bool = True,
55
+ elem_id: str | None = None,
56
+ elem_classes: list[str] | str | None = None,
57
+ render: bool = True,
58
+ key: int | str | None = None,
59
+ root: None = None,
60
+ ):
61
+ """
62
+ Parameters:
63
+ glob: The glob-style pattern used to select which files to display, e.g. "*" to match all files, "*.png" to match all .png files, "**/*.txt" to match any .txt file in any subdirectory, etc. The default value matches all files and folders recursively. See the Python glob documentation at https://docs.python.org/3/library/glob.html for more information.
64
+ value: The file (or list of files, depending on the `file_count` parameter) to show as "selected" when the component is first loaded. If a callable is provided, it will be called when the app loads to set the initial value of the component. If not provided, no files are shown as selected.
65
+ file_count: Whether to allow single or multiple files to be selected. If "single", the component will return a single absolute file path as a string. If "multiple", the component will return a list of absolute file paths as a list of strings.
66
+ root_dir: Path to root directory to select files from. If not provided, defaults to current working directory.
67
+ ignore_glob: The glob-style, case-sensitive pattern that will be used to exclude files from the list. For example, "*.py" will exclude all .py files from the list. See the Python glob documentation at https://docs.python.org/3/library/glob.html for more information.
68
+ label: The label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.
69
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
70
+ inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change.
71
+ show_label: if True, will display label.
72
+ container: If True, will place the component in a container - providing some extra padding around the border.
73
+ scale: relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.
74
+ min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
75
+ height: The maximum height of the file component, specified in pixels if a number is passed, or in CSS units if a string is passed. If more files are uploaded than can fit in the height, a scrollbar will appear.
76
+ interactive: if True, will allow users to select file(s); if False, will only display files. If not provided, this is inferred based on whether the component is used as an input or output.
77
+ visible: If False, component will be hidden.
78
+ elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
79
+ elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
80
+ render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.
81
+ key: if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.
82
+ """
83
+ if root is not None:
84
+ warnings.warn(
85
+ "The `root` parameter has been deprecated. Please use `root_dir` instead."
86
+ )
87
+ root_dir = root
88
+ self._constructor_args[0]["root_dir"] = root
89
+ self.root_dir = os.path.abspath(root_dir)
90
+ self.glob = glob
91
+ self.ignore_glob = ignore_glob
92
+ valid_file_count = ["single", "multiple"]
93
+ if file_count not in valid_file_count:
94
+ raise ValueError(
95
+ f"Invalid value for parameter `file_count`: {file_count}. Please choose from one of: {valid_file_count}"
96
+ )
97
+ self.file_count = file_count
98
+ self.height = height
99
+
100
+ super().__init__(
101
+ label=label,
102
+ every=every,
103
+ inputs=inputs,
104
+ show_label=show_label,
105
+ container=container,
106
+ scale=scale,
107
+ min_width=min_width,
108
+ interactive=interactive,
109
+ visible=visible,
110
+ elem_id=elem_id,
111
+ elem_classes=elem_classes,
112
+ render=render,
113
+ key=key,
114
+ value=value,
115
+ )
116
+
117
+ def example_payload(self) -> Any:
118
+ return [["Users", "gradio", "app.py"]]
119
+
120
+ def example_value(self) -> Any:
121
+ return ["Users", "gradio", "app.py"]
122
+
123
+ def preprocess(self, payload: FileExplorerData | None) -> list[str] | str | None:
124
+ """
125
+ Parameters:
126
+ payload: List of selected files as a FileExplorerData object.
127
+ Returns:
128
+ Passes the selected file or directory as a `str` path (relative to `root`) or `list[str}` depending on `file_count`
129
+ """
130
+ if payload is None:
131
+ return None
132
+
133
+ if self.file_count == "single":
134
+ if len(payload.root) > 1:
135
+ raise ValueError(
136
+ f"Expected only one file, but {len(payload.root)} were selected."
137
+ )
138
+ elif len(payload.root) == 0:
139
+ return None
140
+ else:
141
+ return self._safe_join(payload.root[0])
142
+ files = []
143
+ for file in payload.root:
144
+ file_ = self._safe_join(file)
145
+ files.append(file_)
146
+ return files
147
+
148
+ def _strip_root(self, path):
149
+ if path.startswith(self.root_dir):
150
+ return path[len(self.root_dir) + 1 :]
151
+ return path
152
+
153
+ def postprocess(self, value: str | list[str] | None) -> FileExplorerData | None:
154
+ """
155
+ Parameters:
156
+ value: Expects function to return a `str` path to a file, or `list[str]` consisting of paths to files.
157
+ Returns:
158
+ A FileExplorerData object containing the selected files as a list of strings.
159
+ """
160
+ if value is None:
161
+ return None
162
+
163
+ files = [value] if isinstance(value, str) else value
164
+ root = []
165
+ for file in files:
166
+ root.append(self._strip_root(file).split(os.path.sep))
167
+
168
+ return FileExplorerData(root=root)
169
+
170
+ @server
171
+ def ls(self, subdirectory: list | None = None) -> list[dict[str, str]] | None:
172
+ """
173
+ Returns:
174
+ a list of dictionaries, where each dictionary represents a file or subdirectory in the given subdirectory
175
+ """
176
+ if subdirectory is None:
177
+ subdirectory = []
178
+
179
+ full_subdir_path = self._safe_join(subdirectory)
180
+
181
+ try:
182
+ subdir_items = sorted(os.listdir(full_subdir_path))
183
+ except FileNotFoundError:
184
+ return []
185
+
186
+ files, folders = [], []
187
+ for item in subdir_items:
188
+ full_path = os.path.join(full_subdir_path, item)
189
+ is_file = not os.path.isdir(full_path)
190
+ valid_by_glob = fnmatch.fnmatch(full_path, self.glob)
191
+ if is_file and not valid_by_glob:
192
+ continue
193
+ if self.ignore_glob and fnmatch.fnmatch(full_path, self.ignore_glob):
194
+ continue
195
+ target = files if is_file else folders
196
+ target.append(
197
+ {
198
+ "name": item,
199
+ "type": "file" if is_file else "folder",
200
+ "valid": valid_by_glob,
201
+ }
202
+ )
203
+
204
+ return folders + files
205
+
206
+ def _safe_join(self, folders):
207
+ combined_path = os.path.join(self.root_dir, *folders)
208
+ absolute_path = os.path.abspath(combined_path)
209
+ if os.path.commonprefix([self.root_dir, absolute_path]) != os.path.abspath(
210
+ self.root_dir
211
+ ):
212
+ raise ValueError("Attempted to navigate outside of root directory")
213
+ return absolute_path
214
+
215
+
216
+ def change(self,
217
+ fn: Callable | None,
218
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
219
+ outputs: Component | Sequence[Component] | None = None,
220
+ api_name: str | None | Literal[False] = None,
221
+ scroll_to_output: bool = False,
222
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
223
+ queue: bool | None = None,
224
+ batch: bool = False,
225
+ max_batch_size: int = 4,
226
+ preprocess: bool = True,
227
+ postprocess: bool = True,
228
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
229
+ every: Timer | float | None = None,
230
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
231
+ js: str | None = None,
232
+ concurrency_limit: int | None | Literal["default"] = "default",
233
+ concurrency_id: str | None = None,
234
+ show_api: bool = True) -> Dependency:
235
+ """
236
+ Parameters:
237
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
238
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
239
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
240
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
241
+ scroll_to_output: If True, will scroll to output component on completion
242
+ show_progress: If True, will show progress animation while pending
243
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
244
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
245
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
246
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
247
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
248
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
249
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
250
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
251
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
252
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
253
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
254
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
255
+ """
256
+ ...
moondream/lib/python3.10/site-packages/gradio/components/image.pyi ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """gr.Image() component."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import warnings
6
+ from pathlib import Path
7
+ from typing import TYPE_CHECKING, Any, Literal, cast
8
+
9
+ import numpy as np
10
+ import PIL.Image
11
+ from gradio_client import handle_file
12
+ from gradio_client.documentation import document
13
+ from PIL import ImageOps
14
+
15
+ from gradio import image_utils, utils
16
+ from gradio.components.base import Component, StreamingInput
17
+ from gradio.data_classes import FileData
18
+ from gradio.events import Events
19
+
20
+ if TYPE_CHECKING:
21
+ from gradio.components import Timer
22
+
23
+ PIL.Image.init() # fixes https://github.com/gradio-app/gradio/issues/2843
24
+
25
+ from gradio.events import Dependency
26
+
27
+ @document()
28
+ class Image(StreamingInput, Component):
29
+ """
30
+ Creates an image component that can be used to upload images (as an input) or display images (as an output).
31
+
32
+ Demos: sepia_filter, fake_diffusion
33
+ Guides: image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers, create-your-own-friends-with-a-gan
34
+ """
35
+
36
+ EVENTS = [
37
+ Events.clear,
38
+ Events.change,
39
+ Events.stream,
40
+ Events.select,
41
+ Events.upload,
42
+ ]
43
+
44
+ data_model = FileData
45
+
46
+ def __init__(
47
+ self,
48
+ value: str | PIL.Image.Image | np.ndarray | None = None,
49
+ *,
50
+ format: str = "webp",
51
+ height: int | str | None = None,
52
+ width: int | str | None = None,
53
+ image_mode: Literal[
54
+ "1", "L", "P", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"
55
+ ] = "RGB",
56
+ sources: list[Literal["upload", "webcam", "clipboard"]] | None = None,
57
+ type: Literal["numpy", "pil", "filepath"] = "numpy",
58
+ label: str | None = None,
59
+ every: Timer | float | None = None,
60
+ inputs: Component | list[Component] | set[Component] | None = None,
61
+ show_label: bool | None = None,
62
+ show_download_button: bool = True,
63
+ container: bool = True,
64
+ scale: int | None = None,
65
+ min_width: int = 160,
66
+ interactive: bool | None = None,
67
+ visible: bool = True,
68
+ streaming: bool = False,
69
+ elem_id: str | None = None,
70
+ elem_classes: list[str] | str | None = None,
71
+ render: bool = True,
72
+ key: int | str | None = None,
73
+ mirror_webcam: bool = True,
74
+ show_share_button: bool | None = None,
75
+ ):
76
+ """
77
+ Parameters:
78
+ value: A PIL Image, numpy array, path or URL for the default value that Image component is going to take. If callable, the function will be called whenever the app loads to set the initial value of the component.
79
+ format: File format (e.g. "png" or "gif") to save image if it does not already have a valid format (e.g. if the image is being returned to the frontend as a numpy array or PIL Image). The format should be supported by the PIL library. This parameter has no effect on SVG files.
80
+ height: The height of the displayed image, specified in pixels if a number is passed, or in CSS units if a string is passed.
81
+ width: The width of the displayed image, specified in pixels if a number is passed, or in CSS units if a string is passed.
82
+ image_mode: "RGB" if color, or "L" if black and white. See https://pillow.readthedocs.io/en/stable/handbook/concepts.html for other supported image modes and their meaning. This parameter has no effect on SVG or GIF files.
83
+ sources: List of sources for the image. "upload" creates a box where user can drop an image file, "webcam" allows user to take snapshot from their webcam, "clipboard" allows users to paste an image from the clipboard. If None, defaults to ["upload", "webcam", "clipboard"] if streaming is False, otherwise defaults to ["webcam"].
84
+ type: The format the image is converted before being passed into the prediction function. "numpy" converts the image to a numpy array with shape (height, width, 3) and values from 0 to 255, "pil" converts the image to a PIL image object, "filepath" passes a str path to a temporary file containing the image. If the image is SVG, the `type` is ignored and the filepath of the SVG is returned. To support animated GIFs in input, the `type` should be set to "filepath" or "pil".
85
+ label: The label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.
86
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
87
+ inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change.
88
+ show_label: if True, will display label.
89
+ show_download_button: If True, will display button to download image.
90
+ container: If True, will place the component in a container - providing some extra padding around the border.
91
+ scale: relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.
92
+ min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
93
+ interactive: if True, will allow users to upload and edit an image; if False, can only be used to display images. If not provided, this is inferred based on whether the component is used as an input or output.
94
+ visible: If False, component will be hidden.
95
+ streaming: If True when used in a `live` interface, will automatically stream webcam feed. Only valid is source is 'webcam'.
96
+ elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
97
+ elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
98
+ render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.
99
+ key: if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.
100
+ mirror_webcam: If True webcam will be mirrored. Default is True.
101
+ show_share_button: If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.
102
+ """
103
+ self.format = format
104
+ self.mirror_webcam = mirror_webcam
105
+ valid_types = ["numpy", "pil", "filepath"]
106
+ if type not in valid_types:
107
+ raise ValueError(
108
+ f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}"
109
+ )
110
+ self.type = type
111
+ self.height = height
112
+ self.width = width
113
+ self.image_mode = image_mode
114
+ valid_sources = ["upload", "webcam", "clipboard"]
115
+ if sources is None:
116
+ self.sources = (
117
+ ["webcam"] if streaming else ["upload", "webcam", "clipboard"]
118
+ )
119
+ elif isinstance(sources, str):
120
+ self.sources = [sources] # type: ignore
121
+ else:
122
+ self.sources = sources
123
+ for source in self.sources: # type: ignore
124
+ if source not in valid_sources:
125
+ raise ValueError(
126
+ f"`sources` must a list consisting of elements in {valid_sources}"
127
+ )
128
+ self.streaming = streaming
129
+ self.show_download_button = show_download_button
130
+ if streaming and self.sources != ["webcam"]:
131
+ raise ValueError(
132
+ "Image streaming only available if sources is ['webcam']. Streaming not supported with multiple sources."
133
+ )
134
+ self.show_share_button = (
135
+ (utils.get_space() is not None)
136
+ if show_share_button is None
137
+ else show_share_button
138
+ )
139
+ super().__init__(
140
+ label=label,
141
+ every=every,
142
+ inputs=inputs,
143
+ show_label=show_label,
144
+ container=container,
145
+ scale=scale,
146
+ min_width=min_width,
147
+ interactive=interactive,
148
+ visible=visible,
149
+ elem_id=elem_id,
150
+ elem_classes=elem_classes,
151
+ render=render,
152
+ key=key,
153
+ value=value,
154
+ )
155
+
156
+ def preprocess(
157
+ self, payload: FileData | None
158
+ ) -> np.ndarray | PIL.Image.Image | str | None:
159
+ """
160
+ Parameters:
161
+ payload: image data in the form of a FileData object
162
+ Returns:
163
+ Passes the uploaded image as a `numpy.array`, `PIL.Image` or `str` filepath depending on `type`. For SVGs, the `type` parameter is ignored and the filepath of the SVG is returned.
164
+ """
165
+ if payload is None:
166
+ return payload
167
+ file_path = Path(payload.path)
168
+ if payload.orig_name:
169
+ p = Path(payload.orig_name)
170
+ name = p.stem
171
+ suffix = p.suffix.replace(".", "")
172
+ if suffix in ["jpg", "jpeg"]:
173
+ suffix = "jpeg"
174
+ else:
175
+ name = "image"
176
+ suffix = "webp"
177
+
178
+ if suffix.lower() == "svg":
179
+ return str(file_path)
180
+
181
+ im = PIL.Image.open(file_path)
182
+ exif = im.getexif()
183
+ # 274 is the code for image rotation and 1 means "correct orientation"
184
+ if exif.get(274, 1) != 1 and hasattr(ImageOps, "exif_transpose"):
185
+ try:
186
+ im = ImageOps.exif_transpose(im)
187
+ except Exception:
188
+ warnings.warn(
189
+ f"Failed to transpose image {file_path} based on EXIF data."
190
+ )
191
+ if suffix.lower() != "gif" and im is not None:
192
+ with warnings.catch_warnings():
193
+ warnings.simplefilter("ignore")
194
+ im = im.convert(self.image_mode)
195
+ return image_utils.format_image(
196
+ im,
197
+ cast(Literal["numpy", "pil", "filepath"], self.type),
198
+ self.GRADIO_CACHE,
199
+ name=name,
200
+ format=suffix,
201
+ )
202
+
203
+ def postprocess(
204
+ self, value: np.ndarray | PIL.Image.Image | str | Path | None
205
+ ) -> FileData | None:
206
+ """
207
+ Parameters:
208
+ value: Expects a `numpy.array`, `PIL.Image`, or `str` or `pathlib.Path` filepath to an image which is displayed.
209
+ Returns:
210
+ Returns the image as a `FileData` object.
211
+ """
212
+ if value is None:
213
+ return None
214
+ if isinstance(value, str) and value.lower().endswith(".svg"):
215
+ return FileData(path=value, orig_name=Path(value).name)
216
+ saved = image_utils.save_image(value, self.GRADIO_CACHE, self.format)
217
+ orig_name = Path(saved).name if Path(saved).exists() else None
218
+ return FileData(path=saved, orig_name=orig_name)
219
+
220
+ def check_streamable(self):
221
+ if self.streaming and self.sources != ["webcam"]:
222
+ raise ValueError(
223
+ "Image streaming only available if sources is ['webcam']. Streaming not supported with multiple sources."
224
+ )
225
+
226
+ def example_payload(self) -> Any:
227
+ return handle_file(
228
+ "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
229
+ )
230
+
231
+ def example_value(self) -> Any:
232
+ return "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"
233
+
234
+
235
+ def clear(self,
236
+ fn: Callable | None,
237
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
238
+ outputs: Component | Sequence[Component] | None = None,
239
+ api_name: str | None | Literal[False] = None,
240
+ scroll_to_output: bool = False,
241
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
242
+ queue: bool | None = None,
243
+ batch: bool = False,
244
+ max_batch_size: int = 4,
245
+ preprocess: bool = True,
246
+ postprocess: bool = True,
247
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
248
+ every: Timer | float | None = None,
249
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
250
+ js: str | None = None,
251
+ concurrency_limit: int | None | Literal["default"] = "default",
252
+ concurrency_id: str | None = None,
253
+ show_api: bool = True) -> Dependency:
254
+ """
255
+ Parameters:
256
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
257
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
258
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
259
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
260
+ scroll_to_output: If True, will scroll to output component on completion
261
+ show_progress: If True, will show progress animation while pending
262
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
263
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
264
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
265
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
266
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
267
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
268
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
269
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
270
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
271
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
272
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
273
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
274
+ """
275
+ ...
276
+
277
+ def change(self,
278
+ fn: Callable | None,
279
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
280
+ outputs: Component | Sequence[Component] | None = None,
281
+ api_name: str | None | Literal[False] = None,
282
+ scroll_to_output: bool = False,
283
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
284
+ queue: bool | None = None,
285
+ batch: bool = False,
286
+ max_batch_size: int = 4,
287
+ preprocess: bool = True,
288
+ postprocess: bool = True,
289
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
290
+ every: Timer | float | None = None,
291
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
292
+ js: str | None = None,
293
+ concurrency_limit: int | None | Literal["default"] = "default",
294
+ concurrency_id: str | None = None,
295
+ show_api: bool = True) -> Dependency:
296
+ """
297
+ Parameters:
298
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
299
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
300
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
301
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
302
+ scroll_to_output: If True, will scroll to output component on completion
303
+ show_progress: If True, will show progress animation while pending
304
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
305
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
306
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
307
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
308
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
309
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
310
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
311
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
312
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
313
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
314
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
315
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
316
+ """
317
+ ...
318
+
319
+ def stream(self,
320
+ fn: Callable | None,
321
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
322
+ outputs: Component | Sequence[Component] | None = None,
323
+ api_name: str | None | Literal[False] = None,
324
+ scroll_to_output: bool = False,
325
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
326
+ queue: bool | None = None,
327
+ batch: bool = False,
328
+ max_batch_size: int = 4,
329
+ preprocess: bool = True,
330
+ postprocess: bool = True,
331
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
332
+ every: Timer | float | None = None,
333
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
334
+ js: str | None = None,
335
+ concurrency_limit: int | None | Literal["default"] = "default",
336
+ concurrency_id: str | None = None,
337
+ show_api: bool = True) -> Dependency:
338
+ """
339
+ Parameters:
340
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
341
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
342
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
343
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
344
+ scroll_to_output: If True, will scroll to output component on completion
345
+ show_progress: If True, will show progress animation while pending
346
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
347
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
348
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
349
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
350
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
351
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
352
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
353
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
354
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
355
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
356
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
357
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
358
+ """
359
+ ...
360
+
361
+ def select(self,
362
+ fn: Callable | None,
363
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
364
+ outputs: Component | Sequence[Component] | None = None,
365
+ api_name: str | None | Literal[False] = None,
366
+ scroll_to_output: bool = False,
367
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
368
+ queue: bool | None = None,
369
+ batch: bool = False,
370
+ max_batch_size: int = 4,
371
+ preprocess: bool = True,
372
+ postprocess: bool = True,
373
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
374
+ every: Timer | float | None = None,
375
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
376
+ js: str | None = None,
377
+ concurrency_limit: int | None | Literal["default"] = "default",
378
+ concurrency_id: str | None = None,
379
+ show_api: bool = True) -> Dependency:
380
+ """
381
+ Parameters:
382
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
383
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
384
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
385
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
386
+ scroll_to_output: If True, will scroll to output component on completion
387
+ show_progress: If True, will show progress animation while pending
388
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
389
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
390
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
391
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
392
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
393
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
394
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
395
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
396
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
397
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
398
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
399
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
400
+ """
401
+ ...
402
+
403
+ def upload(self,
404
+ fn: Callable | None,
405
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
406
+ outputs: Component | Sequence[Component] | None = None,
407
+ api_name: str | None | Literal[False] = None,
408
+ scroll_to_output: bool = False,
409
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
410
+ queue: bool | None = None,
411
+ batch: bool = False,
412
+ max_batch_size: int = 4,
413
+ preprocess: bool = True,
414
+ postprocess: bool = True,
415
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
416
+ every: Timer | float | None = None,
417
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
418
+ js: str | None = None,
419
+ concurrency_limit: int | None | Literal["default"] = "default",
420
+ concurrency_id: str | None = None,
421
+ show_api: bool = True) -> Dependency:
422
+ """
423
+ Parameters:
424
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
425
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
426
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
427
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
428
+ scroll_to_output: If True, will scroll to output component on completion
429
+ show_progress: If True, will show progress animation while pending
430
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
431
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
432
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
433
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
434
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
435
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
436
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
437
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
438
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
439
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
440
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
441
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
442
+ """
443
+ ...
moondream/lib/python3.10/site-packages/gradio/components/markdown.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """gr.Markdown() component."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import inspect
6
+ from typing import TYPE_CHECKING, Any, Callable
7
+
8
+ from gradio_client.documentation import document
9
+
10
+ from gradio.components.base import Component
11
+ from gradio.events import Events
12
+
13
+ if TYPE_CHECKING:
14
+ from gradio.components import Timer
15
+
16
+
17
+ @document()
18
+ class Markdown(Component):
19
+ """
20
+ Used to render arbitrary Markdown output. Can also render latex enclosed by dollar signs. As this component does not accept user input,
21
+ it is rarely used as an input component.
22
+
23
+ Demos: blocks_hello, blocks_kinematics
24
+ Guides: key-features
25
+ """
26
+
27
+ EVENTS = [Events.change]
28
+
29
+ def __init__(
30
+ self,
31
+ value: str | Callable | None = None,
32
+ *,
33
+ label: str | None = None,
34
+ every: Timer | float | None = None,
35
+ inputs: Component | list[Component] | set[Component] | None = None,
36
+ show_label: bool | None = None,
37
+ rtl: bool = False,
38
+ latex_delimiters: list[dict[str, str | bool]] | None = None,
39
+ visible: bool = True,
40
+ elem_id: str | None = None,
41
+ elem_classes: list[str] | str | None = None,
42
+ render: bool = True,
43
+ key: int | str | None = None,
44
+ sanitize_html: bool = True,
45
+ line_breaks: bool = False,
46
+ header_links: bool = False,
47
+ height: int | str | None = None,
48
+ ):
49
+ """
50
+ Parameters:
51
+ value: Value to show in Markdown component. If callable, the function will be called whenever the app loads to set the initial value of the component.
52
+ label: The label for this component. Is used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.
53
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
54
+ inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change.
55
+ show_label: This parameter has no effect.
56
+ rtl: If True, sets the direction of the rendered text to right-to-left. Default is False, which renders text left-to-right.
57
+ latex_delimiters: A list of dicts of the form {"left": open delimiter (str), "right": close delimiter (str), "display": whether to display in newline (bool)} that will be used to render LaTeX expressions. If not provided, `latex_delimiters` is set to `[{ "left": "$$", "right": "$$", "display": True }]`, so only expressions enclosed in $$ delimiters will be rendered as LaTeX, and in a new line. Pass in an empty list to disable LaTeX rendering. For more information, see the [KaTeX documentation](https://katex.org/docs/autorender.html).
58
+ visible: If False, component will be hidden.
59
+ elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
60
+ elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
61
+ render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.
62
+ key: if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.
63
+ sanitize_html: If False, will disable HTML sanitization when converted from markdown. This is not recommended, as it can lead to security vulnerabilities.
64
+ line_breaks: If True, will enable Github-flavored Markdown line breaks in chatbot messages. If False (default), single new lines will be ignored.
65
+ header_links: If True, will automatically create anchors for headings, displaying a link icon on hover.
66
+ height: An optional maximum height of this component, specified in pixels if a number is passed, or in CSS units (e.g., '200px') if a stirng is passed in. If context exceeds this height, a scrollbar is added.
67
+ """
68
+ self.rtl = rtl
69
+ if latex_delimiters is None:
70
+ latex_delimiters = [{"left": "$$", "right": "$$", "display": True}]
71
+ self.latex_delimiters = latex_delimiters
72
+ self.sanitize_html = sanitize_html
73
+ self.line_breaks = line_breaks
74
+ self.header_links = header_links
75
+ self.height = height
76
+
77
+ super().__init__(
78
+ label=label,
79
+ every=every,
80
+ inputs=inputs,
81
+ show_label=show_label,
82
+ visible=visible,
83
+ elem_id=elem_id,
84
+ elem_classes=elem_classes,
85
+ render=render,
86
+ key=key,
87
+ value=value,
88
+ )
89
+
90
+ def preprocess(self, payload: str | None) -> str | None:
91
+ """
92
+ Parameters:
93
+ payload: the `str` of Markdown corresponding to the displayed value.
94
+ Returns:
95
+ Passes the `str` of Markdown corresponding to the displayed value.
96
+ """
97
+ return payload
98
+
99
+ def postprocess(self, value: str | None) -> str | None:
100
+ """
101
+ Parameters:
102
+ value: Expects a valid `str` that can be rendered as Markdown.
103
+ Returns:
104
+ The same `str` as the input, but with leading and trailing whitespace removed.
105
+ """
106
+ if value is None:
107
+ return None
108
+ unindented_y = inspect.cleandoc(value)
109
+ return unindented_y
110
+
111
+ def example_payload(self) -> Any:
112
+ return "# Hello!"
113
+
114
+ def example_value(self) -> Any:
115
+ return "# Hello!"
116
+
117
+ def api_info(self) -> dict[str, Any]:
118
+ return {"type": "string"}
moondream/lib/python3.10/site-packages/gradio/components/radio.pyi ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """gr.Radio() component."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import TYPE_CHECKING, Any, Callable
6
+
7
+ from gradio_client.documentation import document
8
+
9
+ from gradio.components.base import Component, FormComponent
10
+ from gradio.events import Events
11
+
12
+ if TYPE_CHECKING:
13
+ from gradio.components import Timer
14
+
15
+ from gradio.events import Dependency
16
+
17
+ @document()
18
+ class Radio(FormComponent):
19
+ """
20
+ Creates a set of (string or numeric type) radio buttons of which only one can be selected.
21
+
22
+ Demos: sentence_builder, blocks_essay
23
+ """
24
+
25
+ EVENTS = [Events.select, Events.change, Events.input]
26
+
27
+ def __init__(
28
+ self,
29
+ choices: list[str | int | float | tuple[str, str | int | float]] | None = None,
30
+ *,
31
+ value: str | int | float | Callable | None = None,
32
+ type: str = "value",
33
+ label: str | None = None,
34
+ info: str | None = None,
35
+ every: Timer | float | None = None,
36
+ inputs: Component | list[Component] | set[Component] | None = None,
37
+ show_label: bool | None = None,
38
+ container: bool = True,
39
+ scale: int | None = None,
40
+ min_width: int = 160,
41
+ interactive: bool | None = None,
42
+ visible: bool = True,
43
+ elem_id: str | None = None,
44
+ elem_classes: list[str] | str | None = None,
45
+ render: bool = True,
46
+ key: int | str | None = None,
47
+ ):
48
+ """
49
+ Parameters:
50
+ choices: A list of string or numeric options to select from. An option can also be a tuple of the form (name, value), where name is the displayed name of the radio button and value is the value to be passed to the function, or returned by the function.
51
+ value: The option selected by default. If None, no option is selected by default. If callable, the function will be called whenever the app loads to set the initial value of the component.
52
+ type: Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
53
+ label: The label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.
54
+ info: Additional component description.
55
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
56
+ inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change.
57
+ show_label: if True, will display label.
58
+ container: If True, will place the component in a container - providing some extra padding around the border.
59
+ scale: Relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
60
+ min_width: Minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
61
+ interactive: If True, choices in this radio group will be selectable; if False, selection will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
62
+ visible: If False, component will be hidden.
63
+ elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
64
+ elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
65
+ render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.
66
+ key: if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.
67
+ """
68
+ self.choices = (
69
+ # Although we expect choices to be a list of tuples, it can be a list of tuples if the Gradio app
70
+ # is loaded with gr.load() since Python tuples are converted to lists in JSON.
71
+ [tuple(c) if isinstance(c, (tuple, list)) else (str(c), c) for c in choices]
72
+ if choices
73
+ else []
74
+ )
75
+ valid_types = ["value", "index"]
76
+ if type not in valid_types:
77
+ raise ValueError(
78
+ f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}"
79
+ )
80
+ self.type = type
81
+ super().__init__(
82
+ label=label,
83
+ info=info,
84
+ every=every,
85
+ inputs=inputs,
86
+ show_label=show_label,
87
+ container=container,
88
+ scale=scale,
89
+ min_width=min_width,
90
+ interactive=interactive,
91
+ visible=visible,
92
+ elem_id=elem_id,
93
+ elem_classes=elem_classes,
94
+ render=render,
95
+ key=key,
96
+ value=value,
97
+ )
98
+
99
+ def example_payload(self) -> Any:
100
+ return self.choices[0][1] if self.choices else None
101
+
102
+ def example_value(self) -> Any:
103
+ return self.choices[0][1] if self.choices else None
104
+
105
+ def preprocess(self, payload: str | int | float | None) -> str | int | float | None:
106
+ """
107
+ Parameters:
108
+ payload: Selected choice in the radio group
109
+ Returns:
110
+ Passes the value of the selected radio button as a `str | int | float`, or its index as an `int` into the function, depending on `type`.
111
+ """
112
+ if self.type == "value":
113
+ return payload
114
+ elif self.type == "index":
115
+ if payload is None:
116
+ return None
117
+ else:
118
+ choice_values = [value for _, value in self.choices]
119
+ return (
120
+ choice_values.index(payload) if payload in choice_values else None
121
+ )
122
+ else:
123
+ raise ValueError(
124
+ f"Unknown type: {self.type}. Please choose from: 'value', 'index'."
125
+ )
126
+
127
+ def postprocess(self, value: str | int | float | None) -> str | int | float | None:
128
+ """
129
+ Parameters:
130
+ value: Expects a `str | int | float` corresponding to the value of the radio button to be selected
131
+ Returns:
132
+ The same value
133
+ """
134
+ return value
135
+
136
+ def api_info(self) -> dict[str, Any]:
137
+ return {
138
+ "enum": [c[1] for c in self.choices],
139
+ "title": "Radio",
140
+ "type": "string",
141
+ }
142
+
143
+
144
+ def select(self,
145
+ fn: Callable | None,
146
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
147
+ outputs: Component | Sequence[Component] | None = None,
148
+ api_name: str | None | Literal[False] = None,
149
+ scroll_to_output: bool = False,
150
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
151
+ queue: bool | None = None,
152
+ batch: bool = False,
153
+ max_batch_size: int = 4,
154
+ preprocess: bool = True,
155
+ postprocess: bool = True,
156
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
157
+ every: Timer | float | None = None,
158
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
159
+ js: str | None = None,
160
+ concurrency_limit: int | None | Literal["default"] = "default",
161
+ concurrency_id: str | None = None,
162
+ show_api: bool = True) -> Dependency:
163
+ """
164
+ Parameters:
165
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
166
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
167
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
168
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
169
+ scroll_to_output: If True, will scroll to output component on completion
170
+ show_progress: If True, will show progress animation while pending
171
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
172
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
173
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
174
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
175
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
176
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
177
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
178
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
179
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
180
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
181
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
182
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
183
+ """
184
+ ...
185
+
186
+ def change(self,
187
+ fn: Callable | None,
188
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
189
+ outputs: Component | Sequence[Component] | None = None,
190
+ api_name: str | None | Literal[False] = None,
191
+ scroll_to_output: bool = False,
192
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
193
+ queue: bool | None = None,
194
+ batch: bool = False,
195
+ max_batch_size: int = 4,
196
+ preprocess: bool = True,
197
+ postprocess: bool = True,
198
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
199
+ every: Timer | float | None = None,
200
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
201
+ js: str | None = None,
202
+ concurrency_limit: int | None | Literal["default"] = "default",
203
+ concurrency_id: str | None = None,
204
+ show_api: bool = True) -> Dependency:
205
+ """
206
+ Parameters:
207
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
208
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
209
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
210
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
211
+ scroll_to_output: If True, will scroll to output component on completion
212
+ show_progress: If True, will show progress animation while pending
213
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
214
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
215
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
216
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
217
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
218
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
219
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
220
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
221
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
222
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
223
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
224
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
225
+ """
226
+ ...
227
+
228
+ def input(self,
229
+ fn: Callable | None,
230
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
231
+ outputs: Component | Sequence[Component] | None = None,
232
+ api_name: str | None | Literal[False] = None,
233
+ scroll_to_output: bool = False,
234
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
235
+ queue: bool | None = None,
236
+ batch: bool = False,
237
+ max_batch_size: int = 4,
238
+ preprocess: bool = True,
239
+ postprocess: bool = True,
240
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
241
+ every: Timer | float | None = None,
242
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
243
+ js: str | None = None,
244
+ concurrency_limit: int | None | Literal["default"] = "default",
245
+ concurrency_id: str | None = None,
246
+ show_api: bool = True) -> Dependency:
247
+ """
248
+ Parameters:
249
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
250
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
251
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
252
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
253
+ scroll_to_output: If True, will scroll to output component on completion
254
+ show_progress: If True, will show progress animation while pending
255
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
256
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
257
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
258
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
259
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
260
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
261
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
262
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
263
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
264
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
265
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
266
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
267
+ """
268
+ ...
moondream/lib/python3.10/site-packages/gradio/components/slider.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """gr.Slider() component."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import math
6
+ import random
7
+ from typing import TYPE_CHECKING, Any, Callable
8
+
9
+ from gradio_client.documentation import document
10
+
11
+ from gradio.components.base import Component, FormComponent
12
+ from gradio.events import Events
13
+
14
+ if TYPE_CHECKING:
15
+ from gradio.components import Timer
16
+
17
+
18
+ @document()
19
+ class Slider(FormComponent):
20
+ """
21
+ Creates a slider that ranges from {minimum} to {maximum} with a step size of {step}.
22
+
23
+ Demos: sentence_builder, slider_release, interface_random_slider, blocks_random_slider
24
+ Guides: create-your-own-friends-with-a-gan
25
+ """
26
+
27
+ EVENTS = [Events.change, Events.input, Events.release]
28
+
29
+ def __init__(
30
+ self,
31
+ minimum: float = 0,
32
+ maximum: float = 100,
33
+ value: float | Callable | None = None,
34
+ *,
35
+ step: float | None = None,
36
+ label: str | None = None,
37
+ info: str | None = None,
38
+ every: Timer | float | None = None,
39
+ inputs: Component | list[Component] | set[Component] | None = None,
40
+ show_label: bool | None = None,
41
+ container: bool = True,
42
+ scale: int | None = None,
43
+ min_width: int = 160,
44
+ interactive: bool | None = None,
45
+ visible: bool = True,
46
+ elem_id: str | None = None,
47
+ elem_classes: list[str] | str | None = None,
48
+ render: bool = True,
49
+ key: int | str | None = None,
50
+ randomize: bool = False,
51
+ ):
52
+ """
53
+ Parameters:
54
+ minimum: minimum value for slider.
55
+ maximum: maximum value for slider.
56
+ value: default value. If callable, the function will be called whenever the app loads to set the initial value of the component. Ignored if randomized=True.
57
+ step: increment between slider values.
58
+ label: The label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.
59
+ info: additional component description.
60
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
61
+ inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change.
62
+ show_label: if True, will display label.
63
+ container: If True, will place the component in a container - providing some extra padding around the border.
64
+ scale: relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.
65
+ min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
66
+ interactive: if True, slider will be adjustable; if False, adjusting will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
67
+ visible: If False, component will be hidden.
68
+ elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
69
+ elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
70
+ render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.
71
+ key: if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.
72
+ randomize: If True, the value of the slider when the app loads is taken uniformly at random from the range given by the minimum and maximum.
73
+ """
74
+ self.minimum = minimum
75
+ self.maximum = maximum
76
+ if step is None:
77
+ difference = maximum - minimum
78
+ power = math.floor(math.log10(difference) - 2)
79
+ self.step = 10**power
80
+ else:
81
+ self.step = step
82
+ if randomize:
83
+ value = self.get_random_value
84
+ super().__init__(
85
+ label=label,
86
+ info=info,
87
+ every=every,
88
+ inputs=inputs,
89
+ show_label=show_label,
90
+ container=container,
91
+ scale=scale,
92
+ min_width=min_width,
93
+ interactive=interactive,
94
+ visible=visible,
95
+ elem_id=elem_id,
96
+ elem_classes=elem_classes,
97
+ render=render,
98
+ key=key,
99
+ value=value,
100
+ )
101
+
102
+ def api_info(self) -> dict[str, Any]:
103
+ return {
104
+ "type": "number",
105
+ "description": f"numeric value between {self.minimum} and {self.maximum}",
106
+ }
107
+
108
+ def example_payload(self) -> Any:
109
+ return self.minimum
110
+
111
+ def example_value(self) -> Any:
112
+ return self.minimum
113
+
114
+ def get_random_value(self):
115
+ n_steps = int((self.maximum - self.minimum) / self.step)
116
+ step = random.randint(0, n_steps)
117
+ value = self.minimum + step * self.step
118
+ # Round to number of decimals in step so that UI doesn't display long decimals
119
+ n_decimals = max(str(self.step)[::-1].find("."), 0)
120
+ if n_decimals:
121
+ value = round(value, n_decimals)
122
+ return value
123
+
124
+ def postprocess(self, value: float | None) -> float:
125
+ """
126
+ Parameters:
127
+ value: Expects an {int} or {float} returned from function and sets slider value to it as long as it is within range (otherwise, sets to minimum value).
128
+ Returns:
129
+ The value of the slider within the range.
130
+ """
131
+ return self.minimum if value is None else value
132
+
133
+ def preprocess(self, payload: float) -> float:
134
+ """
135
+ Parameters:
136
+ payload: slider value
137
+ Returns:
138
+ Passes slider value as a {float} into the function.
139
+ """
140
+ return payload
moondream/lib/python3.10/site-packages/gradio/components/upload_button.pyi ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """gr.UploadButton() component."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import tempfile
6
+ import warnings
7
+ from pathlib import Path
8
+ from typing import TYPE_CHECKING, Any, Callable, Literal
9
+
10
+ import gradio_client.utils as client_utils
11
+ from gradio_client import handle_file
12
+ from gradio_client.documentation import document
13
+
14
+ from gradio import processing_utils
15
+ from gradio.components.base import Component
16
+ from gradio.data_classes import FileData, ListFiles
17
+ from gradio.events import Events
18
+ from gradio.utils import NamedString
19
+
20
+ if TYPE_CHECKING:
21
+ from gradio.components import Timer
22
+
23
+ from gradio.events import Dependency
24
+
25
+ @document()
26
+ class UploadButton(Component):
27
+ """
28
+ Used to create an upload button, when clicked allows a user to upload files that satisfy the specified file type or generic files (if file_type not set).
29
+
30
+ Demos: upload_and_download, upload_button
31
+ """
32
+
33
+ EVENTS = [Events.click, Events.upload]
34
+
35
+ def __init__(
36
+ self,
37
+ label: str = "Upload a File",
38
+ value: str | list[str] | Callable | None = None,
39
+ *,
40
+ every: Timer | float | None = None,
41
+ inputs: Component | list[Component] | set[Component] | None = None,
42
+ variant: Literal["primary", "secondary", "stop"] = "secondary",
43
+ visible: bool = True,
44
+ size: Literal["sm", "lg"] | None = None,
45
+ icon: str | None = None,
46
+ scale: int | None = None,
47
+ min_width: int | None = None,
48
+ interactive: bool = True,
49
+ elem_id: str | None = None,
50
+ elem_classes: list[str] | str | None = None,
51
+ render: bool = True,
52
+ key: int | str | None = None,
53
+ type: Literal["filepath", "bytes"] = "filepath",
54
+ file_count: Literal["single", "multiple", "directory"] = "single",
55
+ file_types: list[str] | None = None,
56
+ ):
57
+ """
58
+ Parameters:
59
+ label: Text to display on the button. Defaults to "Upload a File".
60
+ value: File or list of files to upload by default.
61
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
62
+ inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change.
63
+ variant: 'primary' for main call-to-action, 'secondary' for a more subdued style, 'stop' for a stop button.
64
+ visible: If False, component will be hidden.
65
+ size: Size of the button. Can be "sm" or "lg".
66
+ icon: URL or path to the icon file to display within the button. If None, no icon will be displayed.
67
+ scale: relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.
68
+ min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
69
+ interactive: If False, the UploadButton will be in a disabled state.
70
+ elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
71
+ elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
72
+ render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.
73
+ key: if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.
74
+ type: Type of value to be returned by component. "file" returns a temporary file object with the same base name as the uploaded file, whose full path can be retrieved by file_obj.name, "binary" returns an bytes object.
75
+ file_count: if single, allows user to upload one file. If "multiple", user uploads multiple files. If "directory", user uploads all files in selected directory. Return type will be list for each file in case of "multiple" or "directory".
76
+ file_types: List of type of files to be uploaded. "file" allows any file to be uploaded, "image" allows only image files to be uploaded, "audio" allows only audio files to be uploaded, "video" allows only video files to be uploaded, "text" allows only text files to be uploaded.
77
+ """
78
+ valid_types = [
79
+ "filepath",
80
+ "binary",
81
+ ]
82
+ if type not in valid_types:
83
+ raise ValueError(
84
+ f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}"
85
+ )
86
+ self.type = type
87
+ self.file_count = file_count
88
+ if file_count == "directory" and file_types is not None:
89
+ warnings.warn(
90
+ "The `file_types` parameter is ignored when `file_count` is 'directory'."
91
+ )
92
+ if file_types is not None and not isinstance(file_types, list):
93
+ raise ValueError(
94
+ f"Parameter file_types must be a list. Received {file_types.__class__.__name__}"
95
+ )
96
+ if self.file_count in ["multiple", "directory"]:
97
+ self.data_model = ListFiles
98
+ else:
99
+ self.data_model = FileData
100
+ self.size = size
101
+ self.file_types = file_types
102
+ self.label = label
103
+ self.variant = variant
104
+ super().__init__(
105
+ label=label,
106
+ every=every,
107
+ inputs=inputs,
108
+ visible=visible,
109
+ elem_id=elem_id,
110
+ elem_classes=elem_classes,
111
+ render=render,
112
+ key=key,
113
+ value=value,
114
+ scale=scale,
115
+ min_width=min_width,
116
+ interactive=interactive,
117
+ )
118
+ self.icon = self.serve_static_file(icon)
119
+
120
+ def api_info(self) -> dict[str, list[str]]:
121
+ if self.file_count == "single":
122
+ return FileData.model_json_schema()
123
+ else:
124
+ return ListFiles.model_json_schema()
125
+
126
+ def example_payload(self) -> Any:
127
+ if self.file_count == "single":
128
+ return handle_file(
129
+ "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf"
130
+ )
131
+ else:
132
+ return [
133
+ handle_file(
134
+ "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf"
135
+ )
136
+ ]
137
+
138
+ def example_value(self) -> Any:
139
+ if self.file_count == "single":
140
+ return "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf"
141
+ else:
142
+ return [
143
+ "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf"
144
+ ]
145
+
146
+ def _process_single_file(self, f: FileData) -> bytes | NamedString:
147
+ file_name = f.path
148
+ if self.type == "filepath":
149
+ file = tempfile.NamedTemporaryFile(delete=False, dir=self.GRADIO_CACHE)
150
+ file.name = file_name
151
+ return NamedString(file_name)
152
+ elif self.type == "binary":
153
+ with open(file_name, "rb") as file_data:
154
+ return file_data.read()
155
+ else:
156
+ raise ValueError(
157
+ "Unknown type: "
158
+ + str(type)
159
+ + ". Please choose from: 'filepath', 'binary'."
160
+ )
161
+
162
+ def preprocess(
163
+ self, payload: ListFiles | FileData | None
164
+ ) -> bytes | str | list[bytes] | list[str] | None:
165
+ """
166
+ Parameters:
167
+ payload: File information as a FileData object, or a list of FileData objects.
168
+ Returns:
169
+ Passes the file as a `str` or `bytes` object, or a list of `str` or list of `bytes` objects, depending on `type` and `file_count`.
170
+ """
171
+ if payload is None:
172
+ return None
173
+
174
+ if self.file_count == "single":
175
+ if isinstance(payload, ListFiles):
176
+ return self._process_single_file(payload[0])
177
+ return self._process_single_file(payload)
178
+
179
+ if isinstance(payload, ListFiles):
180
+ return [self._process_single_file(f) for f in payload] # type: ignore
181
+ return [self._process_single_file(payload)] # type: ignore
182
+
183
+ def _download_files(self, value: str | list[str]) -> str | list[str]:
184
+ downloaded_files = []
185
+ if isinstance(value, list):
186
+ for file in value:
187
+ if client_utils.is_http_url_like(file):
188
+ downloaded_file = processing_utils.save_url_to_cache(
189
+ file, self.GRADIO_CACHE
190
+ )
191
+ downloaded_files.append(downloaded_file)
192
+ else:
193
+ downloaded_files.append(file)
194
+ return downloaded_files
195
+ if client_utils.is_http_url_like(value):
196
+ downloaded_file = processing_utils.save_url_to_cache(
197
+ value, self.GRADIO_CACHE
198
+ )
199
+ return downloaded_file
200
+ else:
201
+ return value
202
+
203
+ def postprocess(self, value: str | list[str] | None) -> ListFiles | FileData | None:
204
+ """
205
+ Parameters:
206
+ value: Expects a `str` filepath or URL, or a `list[str]` of filepaths/URLs.
207
+ Returns:
208
+ File information as a FileData object, or a list of FileData objects.
209
+ """
210
+ if value is None:
211
+ return None
212
+ value = self._download_files(value)
213
+ if isinstance(value, list):
214
+ return ListFiles(
215
+ root=[
216
+ FileData(
217
+ path=file,
218
+ orig_name=Path(file).name,
219
+ size=Path(file).stat().st_size,
220
+ )
221
+ for file in value
222
+ ]
223
+ )
224
+ else:
225
+ return FileData(
226
+ path=value,
227
+ orig_name=Path(value).name,
228
+ size=Path(value).stat().st_size,
229
+ )
230
+
231
+ @property
232
+ def skip_api(self):
233
+ return False
234
+
235
+
236
+ def click(self,
237
+ fn: Callable | None,
238
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
239
+ outputs: Component | Sequence[Component] | None = None,
240
+ api_name: str | None | Literal[False] = None,
241
+ scroll_to_output: bool = False,
242
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
243
+ queue: bool | None = None,
244
+ batch: bool = False,
245
+ max_batch_size: int = 4,
246
+ preprocess: bool = True,
247
+ postprocess: bool = True,
248
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
249
+ every: Timer | float | None = None,
250
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
251
+ js: str | None = None,
252
+ concurrency_limit: int | None | Literal["default"] = "default",
253
+ concurrency_id: str | None = None,
254
+ show_api: bool = True) -> Dependency:
255
+ """
256
+ Parameters:
257
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
258
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
259
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
260
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
261
+ scroll_to_output: If True, will scroll to output component on completion
262
+ show_progress: If True, will show progress animation while pending
263
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
264
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
265
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
266
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
267
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
268
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
269
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
270
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
271
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
272
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
273
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
274
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
275
+ """
276
+ ...
277
+
278
+ def upload(self,
279
+ fn: Callable | None,
280
+ inputs: Component | Sequence[Component] | set[Component] | None = None,
281
+ outputs: Component | Sequence[Component] | None = None,
282
+ api_name: str | None | Literal[False] = None,
283
+ scroll_to_output: bool = False,
284
+ show_progress: Literal["full", "minimal", "hidden"] = "full",
285
+ queue: bool | None = None,
286
+ batch: bool = False,
287
+ max_batch_size: int = 4,
288
+ preprocess: bool = True,
289
+ postprocess: bool = True,
290
+ cancels: dict[str, Any] | list[dict[str, Any]] | None = None,
291
+ every: Timer | float | None = None,
292
+ trigger_mode: Literal["once", "multiple", "always_last"] | None = None,
293
+ js: str | None = None,
294
+ concurrency_limit: int | None | Literal["default"] = "default",
295
+ concurrency_id: str | None = None,
296
+ show_api: bool = True) -> Dependency:
297
+ """
298
+ Parameters:
299
+ fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
300
+ inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
301
+ outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.
302
+ api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.
303
+ scroll_to_output: If True, will scroll to output component on completion
304
+ show_progress: If True, will show progress animation while pending
305
+ queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.
306
+ batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
307
+ max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
308
+ preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
309
+ postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
310
+ cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.
311
+ every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.
312
+ trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.
313
+ js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
314
+ concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).
315
+ concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.
316
+ show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.
317
+ """
318
+ ...
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/__init__.py ADDED
File without changes
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (172 Bytes). View file
 
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__init__.py ADDED
File without changes
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cuda_stdint.h ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2009-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions
6
+ * are met:
7
+ * * Redistributions of source code must retain the above copyright
8
+ * notice, this list of conditions and the following disclaimer.
9
+ * * Redistributions in binary form must reproduce the above copyright
10
+ * notice, this list of conditions and the following disclaimer in the
11
+ * documentation and/or other materials provided with the distribution.
12
+ * * Neither the name of NVIDIA CORPORATION nor the names of its
13
+ * contributors may be used to endorse or promote products derived
14
+ * from this software without specific prior written permission.
15
+ *
16
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
17
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
20
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
24
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+ */
28
+
29
+ #ifndef __cuda_stdint_h__
30
+ #define __cuda_stdint_h__
31
+
32
+ // Compiler-specific treatment for C99's stdint.h
33
+ //
34
+ // By default, this header will use the standard headers (so it
35
+ // is your responsibility to make sure they are available), except
36
+ // on MSVC before Visual Studio 2010, when they were not provided.
37
+ // To support old MSVC, a few of the commonly-used definitions are
38
+ // provided here. If more definitions are needed, add them here,
39
+ // or replace these definitions with a complete implementation,
40
+ // such as the ones available from Google, Boost, or MSVC10. You
41
+ // can prevent the definition of any of these types (in order to
42
+ // use your own) by #defining CU_STDINT_TYPES_ALREADY_DEFINED.
43
+
44
+ #if !defined(CU_STDINT_TYPES_ALREADY_DEFINED)
45
+
46
+ // In VS including stdint.h forces the C++ runtime dep - provide an opt-out
47
+ // (CU_STDINT_VS_FORCE_NO_STDINT_H) for users that care (notably static
48
+ // cudart).
49
+ #if defined(_MSC_VER) && ((_MSC_VER < 1600) || defined(CU_STDINT_VS_FORCE_NO_STDINT_H))
50
+
51
+ // These definitions can be used with MSVC 8 and 9,
52
+ // which don't ship with stdint.h:
53
+
54
+ typedef unsigned char uint8_t;
55
+
56
+ typedef short int16_t;
57
+ typedef unsigned short uint16_t;
58
+
59
+ // To keep it consistent with all MSVC build. define those types
60
+ // in the exact same way they are defined with the MSVC headers
61
+ #if defined(_MSC_VER)
62
+ typedef signed char int8_t;
63
+
64
+ typedef int int32_t;
65
+ typedef unsigned int uint32_t;
66
+
67
+ typedef long long int64_t;
68
+ typedef unsigned long long uint64_t;
69
+ #else
70
+ typedef char int8_t;
71
+
72
+ typedef long int32_t;
73
+ typedef unsigned long uint32_t;
74
+
75
+ typedef __int64 int64_t;
76
+ typedef unsigned __int64 uint64_t;
77
+ #endif
78
+
79
+ #elif defined(__DJGPP__)
80
+
81
+ // These definitions can be used when compiling
82
+ // C code with DJGPP, which only provides stdint.h
83
+ // when compiling C++ code with TR1 enabled.
84
+
85
+ typedef char int8_t;
86
+ typedef unsigned char uint8_t;
87
+
88
+ typedef short int16_t;
89
+ typedef unsigned short uint16_t;
90
+
91
+ typedef long int32_t;
92
+ typedef unsigned long uint32_t;
93
+
94
+ typedef long long int64_t;
95
+ typedef unsigned long long uint64_t;
96
+
97
+ #else
98
+
99
+ // Use standard headers, as specified by C99 and C++ TR1.
100
+ // Known to be provided by:
101
+ // - gcc/glibc, supported by all versions of glibc
102
+ // - djgpp, supported since 2001
103
+ // - MSVC, supported by Visual Studio 2010 and later
104
+
105
+ #include <stdint.h>
106
+
107
+ #endif
108
+
109
+ #endif // !defined(CU_STDINT_TYPES_ALREADY_DEFINED)
110
+
111
+
112
+ #endif // file guard
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti.h ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_H_)
51
+ #define _CUPTI_H_
52
+
53
+ #ifdef _WIN32
54
+ #ifndef WIN32_LEAN_AND_MEAN
55
+ #define WIN32_LEAN_AND_MEAN
56
+ #endif
57
+ #ifdef NOMINMAX
58
+ #include <windows.h>
59
+ #else
60
+ #define NOMINMAX
61
+ #include <windows.h>
62
+ #undef NOMINMAX
63
+ #endif
64
+ #endif
65
+
66
+ #include <cuda.h>
67
+ #include <cupti_result.h>
68
+ #include <cupti_version.h>
69
+
70
+ /* Activity, callback, event and metric APIs */
71
+ #include <cupti_activity.h>
72
+ #include <cupti_callbacks.h>
73
+ #include <cupti_events.h>
74
+ #include <cupti_metrics.h>
75
+
76
+ /* Runtime, driver, and nvtx function identifiers */
77
+ #include <cupti_driver_cbid.h>
78
+ #include <cupti_runtime_cbid.h>
79
+ #include <cupti_nvtx_cbid.h>
80
+
81
+ /* To support function parameter structures for obsoleted API. See
82
+ cuda.h for the actual definition of these structures. */
83
+ typedef unsigned int CUdeviceptr_v1;
84
+ typedef struct CUDA_MEMCPY2D_v1_st { int dummy; } CUDA_MEMCPY2D_v1;
85
+ typedef struct CUDA_MEMCPY3D_v1_st { int dummy; } CUDA_MEMCPY3D_v1;
86
+ typedef struct CUDA_ARRAY_DESCRIPTOR_v1_st { int dummy; } CUDA_ARRAY_DESCRIPTOR_v1;
87
+ typedef struct CUDA_ARRAY3D_DESCRIPTOR_v1_st { int dummy; } CUDA_ARRAY3D_DESCRIPTOR_v1;
88
+
89
+ /* Function parameter structures */
90
+ #include <generated_cuda_runtime_api_meta.h>
91
+ #include <generated_cuda_meta.h>
92
+
93
+ /* The following parameter structures cannot be included unless a
94
+ header that defines GL_VERSION is included before including them.
95
+ If these are needed then make sure such a header is included
96
+ already. */
97
+ #ifdef GL_VERSION
98
+ #include <generated_cuda_gl_interop_meta.h>
99
+ #include <generated_cudaGL_meta.h>
100
+ #endif
101
+
102
+ //#include <generated_nvtx_meta.h>
103
+
104
+ /* The following parameter structures cannot be included by default as
105
+ they are not guaranteed to be available on all systems. Uncomment
106
+ the includes that are available, or use the include explicitly. */
107
+ #if defined(__linux__)
108
+ //#include <generated_cuda_vdpau_interop_meta.h>
109
+ //#include <generated_cudaVDPAU_meta.h>
110
+ #endif
111
+
112
+ #ifdef _WIN32
113
+ //#include <generated_cuda_d3d9_interop_meta.h>
114
+ //#include <generated_cuda_d3d10_interop_meta.h>
115
+ //#include <generated_cuda_d3d11_interop_meta.h>
116
+ //#include <generated_cudaD3D9_meta.h>
117
+ //#include <generated_cudaD3D10_meta.h>
118
+ //#include <generated_cudaD3D11_meta.h>
119
+ #endif
120
+
121
+ #endif /*_CUPTI_H_*/
122
+
123
+
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_activity.h ADDED
The diff for this file is too large to render. See raw diff
 
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_callbacks.h ADDED
@@ -0,0 +1,762 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2020 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUPTI_CALLBACKS_H__)
51
+ #define __CUPTI_CALLBACKS_H__
52
+
53
+ #include <cuda.h>
54
+ #include <builtin_types.h>
55
+ #include <string.h>
56
+ #include <cuda_stdint.h>
57
+ #include <cupti_result.h>
58
+
59
+ #ifndef CUPTIAPI
60
+ #ifdef _WIN32
61
+ #define CUPTIAPI __stdcall
62
+ #else
63
+ #define CUPTIAPI
64
+ #endif
65
+ #endif
66
+
67
+ #if defined(__cplusplus)
68
+ extern "C" {
69
+ #endif
70
+
71
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
72
+ #pragma GCC visibility push(default)
73
+ #endif
74
+
75
+ /**
76
+ * \defgroup CUPTI_CALLBACK_API CUPTI Callback API
77
+ * Functions, types, and enums that implement the CUPTI Callback API.
78
+ * @{
79
+ */
80
+
81
+ /**
82
+ * \brief Specifies the point in an API call that a callback is issued.
83
+ *
84
+ * Specifies the point in an API call that a callback is issued. This
85
+ * value is communicated to the callback function via \ref
86
+ * CUpti_CallbackData::callbackSite.
87
+ */
88
+ typedef enum {
89
+ /**
90
+ * The callback is at the entry of the API call.
91
+ */
92
+ CUPTI_API_ENTER = 0,
93
+ /**
94
+ * The callback is at the exit of the API call.
95
+ */
96
+ CUPTI_API_EXIT = 1,
97
+ CUPTI_API_CBSITE_FORCE_INT = 0x7fffffff
98
+ } CUpti_ApiCallbackSite;
99
+
100
+ /**
101
+ * \brief Callback domains.
102
+ *
103
+ * Callback domains. Each domain represents callback points for a
104
+ * group of related API functions or CUDA driver activity.
105
+ */
106
+ typedef enum {
107
+ /**
108
+ * Invalid domain.
109
+ */
110
+ CUPTI_CB_DOMAIN_INVALID = 0,
111
+ /**
112
+ * Domain containing callback points for all driver API functions.
113
+ */
114
+ CUPTI_CB_DOMAIN_DRIVER_API = 1,
115
+ /**
116
+ * Domain containing callback points for all runtime API
117
+ * functions.
118
+ */
119
+ CUPTI_CB_DOMAIN_RUNTIME_API = 2,
120
+ /**
121
+ * Domain containing callback points for CUDA resource tracking.
122
+ */
123
+ CUPTI_CB_DOMAIN_RESOURCE = 3,
124
+ /**
125
+ * Domain containing callback points for CUDA synchronization.
126
+ */
127
+ CUPTI_CB_DOMAIN_SYNCHRONIZE = 4,
128
+ /**
129
+ * Domain containing callback points for NVTX API functions.
130
+ */
131
+ CUPTI_CB_DOMAIN_NVTX = 5,
132
+ CUPTI_CB_DOMAIN_SIZE,
133
+
134
+ CUPTI_CB_DOMAIN_FORCE_INT = 0x7fffffff
135
+ } CUpti_CallbackDomain;
136
+
137
+ /**
138
+ * \brief Callback IDs for resource domain.
139
+ *
140
+ * Callback IDs for resource domain, CUPTI_CB_DOMAIN_RESOURCE. This
141
+ * value is communicated to the callback function via the \p cbid
142
+ * parameter.
143
+ */
144
+ typedef enum {
145
+ /**
146
+ * Invalid resource callback ID.
147
+ */
148
+ CUPTI_CBID_RESOURCE_INVALID = 0,
149
+ /**
150
+ * A new context has been created.
151
+ */
152
+ CUPTI_CBID_RESOURCE_CONTEXT_CREATED = 1,
153
+ /**
154
+ * A context is about to be destroyed.
155
+ */
156
+ CUPTI_CBID_RESOURCE_CONTEXT_DESTROY_STARTING = 2,
157
+ /**
158
+ * A new stream has been created.
159
+ */
160
+ CUPTI_CBID_RESOURCE_STREAM_CREATED = 3,
161
+ /**
162
+ * A stream is about to be destroyed.
163
+ */
164
+ CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING = 4,
165
+ /**
166
+ * The driver has finished initializing.
167
+ */
168
+ CUPTI_CBID_RESOURCE_CU_INIT_FINISHED = 5,
169
+ /**
170
+ * A module has been loaded.
171
+ */
172
+ CUPTI_CBID_RESOURCE_MODULE_LOADED = 6,
173
+ /**
174
+ * A module is about to be unloaded.
175
+ */
176
+ CUPTI_CBID_RESOURCE_MODULE_UNLOAD_STARTING = 7,
177
+ /**
178
+ * The current module which is being profiled.
179
+ */
180
+ CUPTI_CBID_RESOURCE_MODULE_PROFILED = 8,
181
+ /**
182
+ * CUDA graph has been created.
183
+ */
184
+ CUPTI_CBID_RESOURCE_GRAPH_CREATED = 9,
185
+ /**
186
+ * CUDA graph is about to be destroyed.
187
+ */
188
+ CUPTI_CBID_RESOURCE_GRAPH_DESTROY_STARTING = 10,
189
+ /**
190
+ * CUDA graph is cloned.
191
+ */
192
+ CUPTI_CBID_RESOURCE_GRAPH_CLONED = 11,
193
+ /**
194
+ * CUDA graph node is about to be created
195
+ */
196
+ CUPTI_CBID_RESOURCE_GRAPHNODE_CREATE_STARTING = 12,
197
+ /**
198
+ * CUDA graph node is created.
199
+ */
200
+ CUPTI_CBID_RESOURCE_GRAPHNODE_CREATED = 13,
201
+ /**
202
+ * CUDA graph node is about to be destroyed.
203
+ */
204
+ CUPTI_CBID_RESOURCE_GRAPHNODE_DESTROY_STARTING = 14,
205
+ /**
206
+ * Dependency on a CUDA graph node is created.
207
+ */
208
+ CUPTI_CBID_RESOURCE_GRAPHNODE_DEPENDENCY_CREATED = 15,
209
+ /**
210
+ * Dependency on a CUDA graph node is destroyed.
211
+ */
212
+ CUPTI_CBID_RESOURCE_GRAPHNODE_DEPENDENCY_DESTROY_STARTING = 16,
213
+ /**
214
+ * An executable CUDA graph is about to be created.
215
+ */
216
+ CUPTI_CBID_RESOURCE_GRAPHEXEC_CREATE_STARTING = 17,
217
+ /**
218
+ * An executable CUDA graph is created.
219
+ */
220
+ CUPTI_CBID_RESOURCE_GRAPHEXEC_CREATED = 18,
221
+ /**
222
+ * An executable CUDA graph is about to be destroyed.
223
+ */
224
+ CUPTI_CBID_RESOURCE_GRAPHEXEC_DESTROY_STARTING = 19,
225
+ /**
226
+ * CUDA graph node is cloned.
227
+ */
228
+ CUPTI_CBID_RESOURCE_GRAPHNODE_CLONED = 20,
229
+
230
+ CUPTI_CBID_RESOURCE_SIZE,
231
+ CUPTI_CBID_RESOURCE_FORCE_INT = 0x7fffffff
232
+ } CUpti_CallbackIdResource;
233
+
234
+ /**
235
+ * \brief Callback IDs for synchronization domain.
236
+ *
237
+ * Callback IDs for synchronization domain,
238
+ * CUPTI_CB_DOMAIN_SYNCHRONIZE. This value is communicated to the
239
+ * callback function via the \p cbid parameter.
240
+ */
241
+ typedef enum {
242
+ /**
243
+ * Invalid synchronize callback ID.
244
+ */
245
+ CUPTI_CBID_SYNCHRONIZE_INVALID = 0,
246
+ /**
247
+ * Stream synchronization has completed for the stream.
248
+ */
249
+ CUPTI_CBID_SYNCHRONIZE_STREAM_SYNCHRONIZED = 1,
250
+ /**
251
+ * Context synchronization has completed for the context.
252
+ */
253
+ CUPTI_CBID_SYNCHRONIZE_CONTEXT_SYNCHRONIZED = 2,
254
+ CUPTI_CBID_SYNCHRONIZE_SIZE,
255
+ CUPTI_CBID_SYNCHRONIZE_FORCE_INT = 0x7fffffff
256
+ } CUpti_CallbackIdSync;
257
+
258
+
259
+ /**
260
+ * \brief Data passed into a runtime or driver API callback function.
261
+ *
262
+ * Data passed into a runtime or driver API callback function as the
263
+ * \p cbdata argument to \ref CUpti_CallbackFunc. The \p cbdata will
264
+ * be this type for \p domain equal to CUPTI_CB_DOMAIN_DRIVER_API or
265
+ * CUPTI_CB_DOMAIN_RUNTIME_API. The callback data is valid only within
266
+ * the invocation of the callback function that is passed the data. If
267
+ * you need to retain some data for use outside of the callback, you
268
+ * must make a copy of that data. For example, if you make a shallow
269
+ * copy of CUpti_CallbackData within a callback, you cannot
270
+ * dereference \p functionParams outside of that callback to access
271
+ * the function parameters. \p functionName is an exception: the
272
+ * string pointed to by \p functionName is a global constant and so
273
+ * may be accessed outside of the callback.
274
+ */
275
+ typedef struct {
276
+ /**
277
+ * Point in the runtime or driver function from where the callback
278
+ * was issued.
279
+ */
280
+ CUpti_ApiCallbackSite callbackSite;
281
+
282
+ /**
283
+ * Name of the runtime or driver API function which issued the
284
+ * callback. This string is a global constant and so may be
285
+ * accessed outside of the callback.
286
+ */
287
+ const char *functionName;
288
+
289
+ /**
290
+ * Pointer to the arguments passed to the runtime or driver API
291
+ * call. See generated_cuda_runtime_api_meta.h and
292
+ * generated_cuda_meta.h for structure definitions for the
293
+ * parameters for each runtime and driver API function.
294
+ */
295
+ const void *functionParams;
296
+
297
+ /**
298
+ * Pointer to the return value of the runtime or driver API
299
+ * call. This field is only valid within the exit::CUPTI_API_EXIT
300
+ * callback. For a runtime API \p functionReturnValue points to a
301
+ * \p cudaError_t. For a driver API \p functionReturnValue points
302
+ * to a \p CUresult.
303
+ */
304
+ void *functionReturnValue;
305
+
306
+ /**
307
+ * Name of the symbol operated on by the runtime or driver API
308
+ * function which issued the callback. This entry is valid only for
309
+ * driver and runtime launch callbacks, where it returns the name of
310
+ * the kernel.
311
+ */
312
+ const char *symbolName;
313
+
314
+ /**
315
+ * Driver context current to the thread, or null if no context is
316
+ * current. This value can change from the entry to exit callback
317
+ * of a runtime API function if the runtime initializes a context.
318
+ */
319
+ CUcontext context;
320
+
321
+ /**
322
+ * Unique ID for the CUDA context associated with the thread. The
323
+ * UIDs are assigned sequentially as contexts are created and are
324
+ * unique within a process.
325
+ */
326
+ uint32_t contextUid;
327
+
328
+ /**
329
+ * Pointer to data shared between the entry and exit callbacks of
330
+ * a given runtime or drive API function invocation. This field
331
+ * can be used to pass 64-bit values from the entry callback to
332
+ * the corresponding exit callback.
333
+ */
334
+ uint64_t *correlationData;
335
+
336
+ /**
337
+ * The activity record correlation ID for this callback. For a
338
+ * driver domain callback (i.e. \p domain
339
+ * CUPTI_CB_DOMAIN_DRIVER_API) this ID will equal the correlation ID
340
+ * in the CUpti_ActivityAPI record corresponding to the CUDA driver
341
+ * function call. For a runtime domain callback (i.e. \p domain
342
+ * CUPTI_CB_DOMAIN_RUNTIME_API) this ID will equal the correlation
343
+ * ID in the CUpti_ActivityAPI record corresponding to the CUDA
344
+ * runtime function call. Within the callback, this ID can be
345
+ * recorded to correlate user data with the activity record. This
346
+ * field is new in 4.1.
347
+ */
348
+ uint32_t correlationId;
349
+
350
+ } CUpti_CallbackData;
351
+
352
+ /**
353
+ * \brief Data passed into a resource callback function.
354
+ *
355
+ * Data passed into a resource callback function as the \p cbdata
356
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
357
+ * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The callback
358
+ * data is valid only within the invocation of the callback function
359
+ * that is passed the data. If you need to retain some data for use
360
+ * outside of the callback, you must make a copy of that data.
361
+ */
362
+ typedef struct {
363
+ /**
364
+ * For CUPTI_CBID_RESOURCE_CONTEXT_CREATED and
365
+ * CUPTI_CBID_RESOURCE_CONTEXT_DESTROY_STARTING, the context being
366
+ * created or destroyed. For CUPTI_CBID_RESOURCE_STREAM_CREATED and
367
+ * CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING, the context
368
+ * containing the stream being created or destroyed.
369
+ */
370
+ CUcontext context;
371
+
372
+ union {
373
+ /**
374
+ * For CUPTI_CBID_RESOURCE_STREAM_CREATED and
375
+ * CUPTI_CBID_RESOURCE_STREAM_DESTROY_STARTING, the stream being
376
+ * created or destroyed.
377
+ */
378
+ CUstream stream;
379
+ } resourceHandle;
380
+
381
+ /**
382
+ * Reserved for future use.
383
+ */
384
+ void *resourceDescriptor;
385
+ } CUpti_ResourceData;
386
+
387
+
388
+ /**
389
+ * \brief Module data passed into a resource callback function.
390
+ *
391
+ * CUDA module data passed into a resource callback function as the \p cbdata
392
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
393
+ * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The module
394
+ * data is valid only within the invocation of the callback function
395
+ * that is passed the data. If you need to retain some data for use
396
+ * outside of the callback, you must make a copy of that data.
397
+ */
398
+
399
+ typedef struct {
400
+ /**
401
+ * Identifier to associate with the CUDA module.
402
+ */
403
+ uint32_t moduleId;
404
+
405
+ /**
406
+ * The size of the cubin.
407
+ */
408
+ size_t cubinSize;
409
+
410
+ /**
411
+ * Pointer to the associated cubin.
412
+ */
413
+ const char *pCubin;
414
+ } CUpti_ModuleResourceData;
415
+
416
+ /**
417
+ * \brief CUDA graphs data passed into a resource callback function.
418
+ *
419
+ * CUDA graphs data passed into a resource callback function as the \p cbdata
420
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
421
+ * type for \p domain equal to CUPTI_CB_DOMAIN_RESOURCE. The graph
422
+ * data is valid only within the invocation of the callback function
423
+ * that is passed the data. If you need to retain some data for use
424
+ * outside of the callback, you must make a copy of that data.
425
+ */
426
+
427
+ typedef struct {
428
+ /**
429
+ * CUDA graph
430
+ */
431
+ CUgraph graph;
432
+ /**
433
+ * The original CUDA graph from which \param graph is cloned
434
+ */
435
+ CUgraph originalGraph;
436
+ /**
437
+ * CUDA graph node
438
+ */
439
+ CUgraphNode node;
440
+ /**
441
+ * The original CUDA graph node from which \param node is cloned
442
+ */
443
+ CUgraphNode originalNode;
444
+ /**
445
+ * Type of the \param node
446
+ */
447
+ CUgraphNodeType nodeType;
448
+ /**
449
+ * The dependent graph node
450
+ * The size of the array is \param numDependencies.
451
+ */
452
+ CUgraphNode dependency;
453
+ /**
454
+ * CUDA executable graph
455
+ */
456
+ CUgraphExec graphExec;
457
+ } CUpti_GraphData;
458
+
459
+ /**
460
+ * \brief Data passed into a synchronize callback function.
461
+ *
462
+ * Data passed into a synchronize callback function as the \p cbdata
463
+ * argument to \ref CUpti_CallbackFunc. The \p cbdata will be this
464
+ * type for \p domain equal to CUPTI_CB_DOMAIN_SYNCHRONIZE. The
465
+ * callback data is valid only within the invocation of the callback
466
+ * function that is passed the data. If you need to retain some data
467
+ * for use outside of the callback, you must make a copy of that data.
468
+ */
469
+ typedef struct {
470
+ /**
471
+ * The context of the stream being synchronized.
472
+ */
473
+ CUcontext context;
474
+ /**
475
+ * The stream being synchronized.
476
+ */
477
+ CUstream stream;
478
+ } CUpti_SynchronizeData;
479
+
480
+ /**
481
+ * \brief Data passed into a NVTX callback function.
482
+ *
483
+ * Data passed into a NVTX callback function as the \p cbdata argument
484
+ * to \ref CUpti_CallbackFunc. The \p cbdata will be this type for \p
485
+ * domain equal to CUPTI_CB_DOMAIN_NVTX. Unless otherwise notes, the
486
+ * callback data is valid only within the invocation of the callback
487
+ * function that is passed the data. If you need to retain some data
488
+ * for use outside of the callback, you must make a copy of that data.
489
+ */
490
+ typedef struct {
491
+ /**
492
+ * Name of the NVTX API function which issued the callback. This
493
+ * string is a global constant and so may be accessed outside of the
494
+ * callback.
495
+ */
496
+ const char *functionName;
497
+
498
+ /**
499
+ * Pointer to the arguments passed to the NVTX API call. See
500
+ * generated_nvtx_meta.h for structure definitions for the
501
+ * parameters for each NVTX API function.
502
+ */
503
+ const void *functionParams;
504
+
505
+ /**
506
+ * Pointer to the return value of the NVTX API call. See
507
+ * nvToolsExt.h for each NVTX API function's return value.
508
+ */
509
+ const void *functionReturnValue;
510
+ } CUpti_NvtxData;
511
+
512
+ /**
513
+ * \brief An ID for a driver API, runtime API, resource or
514
+ * synchronization callback.
515
+ *
516
+ * An ID for a driver API, runtime API, resource or synchronization
517
+ * callback. Within a driver API callback this should be interpreted
518
+ * as a CUpti_driver_api_trace_cbid value (these values are defined in
519
+ * cupti_driver_cbid.h). Within a runtime API callback this should be
520
+ * interpreted as a CUpti_runtime_api_trace_cbid value (these values
521
+ * are defined in cupti_runtime_cbid.h). Within a resource API
522
+ * callback this should be interpreted as a \ref
523
+ * CUpti_CallbackIdResource value. Within a synchronize API callback
524
+ * this should be interpreted as a \ref CUpti_CallbackIdSync value.
525
+ */
526
+ typedef uint32_t CUpti_CallbackId;
527
+
528
+ /**
529
+ * \brief Function type for a callback.
530
+ *
531
+ * Function type for a callback. The type of the data passed to the
532
+ * callback in \p cbdata depends on the \p domain. If \p domain is
533
+ * CUPTI_CB_DOMAIN_DRIVER_API or CUPTI_CB_DOMAIN_RUNTIME_API the type
534
+ * of \p cbdata will be CUpti_CallbackData. If \p domain is
535
+ * CUPTI_CB_DOMAIN_RESOURCE the type of \p cbdata will be
536
+ * CUpti_ResourceData. If \p domain is CUPTI_CB_DOMAIN_SYNCHRONIZE the
537
+ * type of \p cbdata will be CUpti_SynchronizeData. If \p domain is
538
+ * CUPTI_CB_DOMAIN_NVTX the type of \p cbdata will be CUpti_NvtxData.
539
+ *
540
+ * \param userdata User data supplied at subscription of the callback
541
+ * \param domain The domain of the callback
542
+ * \param cbid The ID of the callback
543
+ * \param cbdata Data passed to the callback.
544
+ */
545
+ typedef void (CUPTIAPI *CUpti_CallbackFunc)(
546
+ void *userdata,
547
+ CUpti_CallbackDomain domain,
548
+ CUpti_CallbackId cbid,
549
+ const void *cbdata);
550
+
551
+ /**
552
+ * \brief A callback subscriber.
553
+ */
554
+ typedef struct CUpti_Subscriber_st *CUpti_SubscriberHandle;
555
+
556
+ /**
557
+ * \brief Pointer to an array of callback domains.
558
+ */
559
+ typedef CUpti_CallbackDomain *CUpti_DomainTable;
560
+
561
+ /**
562
+ * \brief Get the available callback domains.
563
+ *
564
+ * Returns in \p *domainTable an array of size \p *domainCount of all
565
+ * the available callback domains.
566
+ * \note \b Thread-safety: this function is thread safe.
567
+ *
568
+ * \param domainCount Returns number of callback domains
569
+ * \param domainTable Returns pointer to array of available callback domains
570
+ *
571
+ * \retval CUPTI_SUCCESS on success
572
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialize CUPTI
573
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p domainCount or \p domainTable are NULL
574
+ */
575
+ CUptiResult CUPTIAPI cuptiSupportedDomains(size_t *domainCount,
576
+ CUpti_DomainTable *domainTable);
577
+
578
+ /**
579
+ * \brief Initialize a callback subscriber with a callback function
580
+ * and user data.
581
+ *
582
+ * Initializes a callback subscriber with a callback function and
583
+ * (optionally) a pointer to user data. The returned subscriber handle
584
+ * can be used to enable and disable the callback for specific domains
585
+ * and callback IDs.
586
+ * \note Only a single subscriber can be registered at a time. To ensure
587
+ * that no other CUPTI client interrupts the profiling session, it's the
588
+ * responsibility of all the CUPTI clients to call this function before
589
+ * starting the profling session. In case profiling session is already
590
+ * started by another CUPTI client, this function returns the error code
591
+ * CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED.
592
+ * Note that this function returns the same error when application is
593
+ * launched using NVIDIA tools like nvprof, Visual Profiler, Nsight Systems,
594
+ * Nsight Compute, cuda-gdb and cuda-memcheck.
595
+ * \note This function does not enable any callbacks.
596
+ * \note \b Thread-safety: this function is thread safe.
597
+ *
598
+ * \param subscriber Returns handle to initialize subscriber
599
+ * \param callback The callback function
600
+ * \param userdata A pointer to user data. This data will be passed to
601
+ * the callback function via the \p userdata paramater.
602
+ *
603
+ * \retval CUPTI_SUCCESS on success
604
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialize CUPTI
605
+ * \retval CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED if there is already a CUPTI subscriber
606
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber is NULL
607
+ */
608
+ CUptiResult CUPTIAPI cuptiSubscribe(CUpti_SubscriberHandle *subscriber,
609
+ CUpti_CallbackFunc callback,
610
+ void *userdata);
611
+
612
+ /**
613
+ * \brief Unregister a callback subscriber.
614
+ *
615
+ * Removes a callback subscriber so that no future callbacks will be
616
+ * issued to that subscriber.
617
+ * \note \b Thread-safety: this function is thread safe.
618
+ *
619
+ * \param subscriber Handle to the initialize subscriber
620
+ *
621
+ * \retval CUPTI_SUCCESS on success
622
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
623
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber is NULL or not initialized
624
+ */
625
+ CUptiResult CUPTIAPI cuptiUnsubscribe(CUpti_SubscriberHandle subscriber);
626
+
627
+ /**
628
+ * \brief Get the current enabled/disabled state of a callback for a specific
629
+ * domain and function ID.
630
+ *
631
+ * Returns non-zero in \p *enable if the callback for a domain and
632
+ * callback ID is enabled, and zero if not enabled.
633
+ *
634
+ * \note \b Thread-safety: a subscriber must serialize access to
635
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
636
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackState(sub,
637
+ * d, c) and cuptiEnableCallback(sub, d, c) are called concurrently,
638
+ * the results are undefined.
639
+ *
640
+ * \param enable Returns non-zero if callback enabled, zero if not enabled
641
+ * \param subscriber Handle to the initialize subscriber
642
+ * \param domain The domain of the callback
643
+ * \param cbid The ID of the callback
644
+ *
645
+ * \retval CUPTI_SUCCESS on success
646
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
647
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p enabled is NULL, or if \p
648
+ * subscriber, \p domain or \p cbid is invalid.
649
+ */
650
+ CUptiResult CUPTIAPI cuptiGetCallbackState(uint32_t *enable,
651
+ CUpti_SubscriberHandle subscriber,
652
+ CUpti_CallbackDomain domain,
653
+ CUpti_CallbackId cbid);
654
+
655
+ /**
656
+ * \brief Enable or disabled callbacks for a specific domain and
657
+ * callback ID.
658
+ *
659
+ * Enable or disabled callbacks for a subscriber for a specific domain
660
+ * and callback ID.
661
+ *
662
+ * \note \b Thread-safety: a subscriber must serialize access to
663
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
664
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackState(sub,
665
+ * d, c) and cuptiEnableCallback(sub, d, c) are called concurrently,
666
+ * the results are undefined.
667
+ *
668
+ * \param enable New enable state for the callback. Zero disables the
669
+ * callback, non-zero enables the callback.
670
+ * \param subscriber - Handle to callback subscription
671
+ * \param domain The domain of the callback
672
+ * \param cbid The ID of the callback
673
+ *
674
+ * \retval CUPTI_SUCCESS on success
675
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
676
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber, \p domain or \p
677
+ * cbid is invalid.
678
+ */
679
+ CUptiResult CUPTIAPI cuptiEnableCallback(uint32_t enable,
680
+ CUpti_SubscriberHandle subscriber,
681
+ CUpti_CallbackDomain domain,
682
+ CUpti_CallbackId cbid);
683
+
684
+ /**
685
+ * \brief Enable or disabled all callbacks for a specific domain.
686
+ *
687
+ * Enable or disabled all callbacks for a specific domain.
688
+ *
689
+ * \note \b Thread-safety: a subscriber must serialize access to
690
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
691
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackEnabled(sub,
692
+ * d, *) and cuptiEnableDomain(sub, d) are called concurrently, the
693
+ * results are undefined.
694
+ *
695
+ * \param enable New enable state for all callbacks in the
696
+ * domain. Zero disables all callbacks, non-zero enables all
697
+ * callbacks.
698
+ * \param subscriber - Handle to callback subscription
699
+ * \param domain The domain of the callback
700
+ *
701
+ * \retval CUPTI_SUCCESS on success
702
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
703
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber or \p domain is invalid
704
+ */
705
+ CUptiResult CUPTIAPI cuptiEnableDomain(uint32_t enable,
706
+ CUpti_SubscriberHandle subscriber,
707
+ CUpti_CallbackDomain domain);
708
+
709
+ /**
710
+ * \brief Enable or disable all callbacks in all domains.
711
+ *
712
+ * Enable or disable all callbacks in all domains.
713
+ *
714
+ * \note \b Thread-safety: a subscriber must serialize access to
715
+ * cuptiGetCallbackState, cuptiEnableCallback, cuptiEnableDomain, and
716
+ * cuptiEnableAllDomains. For example, if cuptiGetCallbackState(sub,
717
+ * d, *) and cuptiEnableAllDomains(sub) are called concurrently, the
718
+ * results are undefined.
719
+ *
720
+ * \param enable New enable state for all callbacks in all
721
+ * domain. Zero disables all callbacks, non-zero enables all
722
+ * callbacks.
723
+ * \param subscriber - Handle to callback subscription
724
+ *
725
+ * \retval CUPTI_SUCCESS on success
726
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if unable to initialized CUPTI
727
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p subscriber is invalid
728
+ */
729
+ CUptiResult CUPTIAPI cuptiEnableAllDomains(uint32_t enable,
730
+ CUpti_SubscriberHandle subscriber);
731
+
732
+ /**
733
+ * \brief Get the name of a callback for a specific domain and callback ID.
734
+ *
735
+ * Returns a pointer to the name c_string in \p **name.
736
+ *
737
+ * \note \b Names are available only for the DRIVER and RUNTIME domains.
738
+ *
739
+ * \param domain The domain of the callback
740
+ * \param cbid The ID of the callback
741
+ * \param name Returns pointer to the name string on success, NULL otherwise
742
+ *
743
+ * \retval CUPTI_SUCCESS on success
744
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p name is NULL, or if
745
+ * \p domain or \p cbid is invalid.
746
+ */
747
+ CUptiResult CUPTIAPI cuptiGetCallbackName(CUpti_CallbackDomain domain,
748
+ uint32_t cbid,
749
+ const char **name);
750
+
751
+ /** @} */ /* END CUPTI_CALLBACK_API */
752
+
753
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
754
+ #pragma GCC visibility pop
755
+ #endif
756
+
757
+ #if defined(__cplusplus)
758
+ }
759
+ #endif
760
+
761
+ #endif // file guard
762
+
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_checkpoint.h ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cuda.h>
4
+ #include <cupti_result.h>
5
+
6
+ #include <stddef.h>
7
+ #include <stdint.h>
8
+
9
+ namespace NV { namespace Cupti { namespace Checkpoint {
10
+
11
+ #ifdef __cplusplus
12
+ extern "C"
13
+ {
14
+ #endif
15
+
16
+ /**
17
+ * \defgroup CUPTI_CHECKPOINT_API CUPTI Checkpoint API
18
+ * Functions, types, and enums that implement the CUPTI Checkpoint API.
19
+ * @{
20
+ */
21
+
22
+ /**
23
+ * \brief Specifies optimization options for a checkpoint, may be OR'd together to specify multiple options.
24
+ */
25
+ typedef enum
26
+ {
27
+ CUPTI_CHECKPOINT_OPT_NONE = 0, //!< Default behavior
28
+ CUPTI_CHECKPOINT_OPT_TRANSFER = 1, //!< Determine which mem blocks have changed, and only restore those. This optimization is cached, which means cuptiCheckpointRestore must always be called at the same point in the application when this option is enabled, or the result may be incorrect.
29
+ } CUpti_CheckpointOptimizations;
30
+
31
+ /**
32
+ * \brief Configuration and handle for a CUPTI Checkpoint
33
+ *
34
+ * A CUptiCheckpoint object should be initialized with desired options prior to passing into any
35
+ * CUPTI Checkpoint API function. The first call into a Checkpoint API function will initialize internal
36
+ * state based on these options. Subsequent changes to these options will not have any effect.
37
+ *
38
+ * Checkpoint data is saved in device, host, and filesystem space. There are options to reserve memory
39
+ * at each level (device, host, filesystem) which are intended to allow a guarantee that a certain amount
40
+ * of memory will remain free for use after the checkpoint is saved.
41
+ * Note, however, that falling back to slower levels of memory (host, and then filesystem) to save the checkpoint
42
+ * will result in performance degradation.
43
+ * Currently, the filesystem limitation is not implemented. Note that falling back to filesystem storage may
44
+ * significantly impact the performance for saving and restoring a checkpoint.
45
+ */
46
+ typedef struct
47
+ {
48
+ size_t structSize; //!< [in] Must be set to CUpti_Checkpoint_STRUCT_SIZE
49
+
50
+ CUcontext ctx; //!< [in] Set to context to save from, or will use current context if NULL
51
+
52
+ size_t reserveDeviceMB; //!< [in] Restrict checkpoint from using last N MB of device memory (-1 = use no device memory)
53
+ size_t reserveHostMB; //!< [in] Restrict checkpoint from using last N MB of host memory (-1 = use no host memory)
54
+ uint8_t allowOverwrite; //!< [in] Boolean, Allow checkpoint to save over existing checkpoint
55
+ uint8_t optimizations; //!< [in] Mask of CUpti_CheckpointOptimizations flags for this checkpoint
56
+
57
+ void * pPriv; //!< [in] Assign to NULL
58
+ } CUpti_Checkpoint;
59
+
60
+ #define CUpti_Checkpoint_STRUCT_SIZE \
61
+ (offsetof(CUpti_Checkpoint, pPriv) + \
62
+ sizeof(((CUpti_Checkpoint*)(nullptr))->pPriv))
63
+
64
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
65
+ #pragma GCC visibility push(default)
66
+ #endif
67
+
68
+ /**
69
+ * \brief Initialize and save a checkpoint of the device state associated with the handle context
70
+ *
71
+ * Uses the handle options to configure and save a checkpoint of the device state associated with the specified context.
72
+ *
73
+ * \param handle A pointer to a CUpti_Checkpoint object
74
+ *
75
+ * \retval CUPTI_SUCCESS if a checkpoint was successfully initialized and saved
76
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p handle does not appear to refer to a valid CUpti_Checkpoint
77
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
78
+ * \retval CUPTI_ERROR_INVALID_DEVICE if device associated with context is not compatible with checkpoint API
79
+ * \retval CUPTI_ERROR_INVALID_OPERATION if Save is requested over an existing checkpoint, but \p allowOverwrite was not originally specified
80
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY if as configured, not enough backing storage space to save the checkpoint
81
+ */
82
+ CUptiResult cuptiCheckpointSave(CUpti_Checkpoint * const handle);
83
+
84
+ /**
85
+ * \brief Restore a checkpoint to the device associated with its context
86
+ *
87
+ * Restores device, pinned, and allocated memory to the state when the checkpoint was saved
88
+ *
89
+ * \param handle A pointer to a previously saved CUpti_Checkpoint object
90
+ *
91
+ * \retval CUTPI_SUCCESS if the checkpoint was successfully restored
92
+ * \retval CUPTI_ERROR_NOT_INITIALIZED if the checkpoint was not previously initialized
93
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
94
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if the handle appears invalid
95
+ * \retval CUPTI_ERROR_UNKNOWN if the restore or optimization operation fails
96
+ */
97
+ CUptiResult cuptiCheckpointRestore(CUpti_Checkpoint * const handle);
98
+
99
+ /**
100
+ * \brief Free the backing data for a checkpoint
101
+ *
102
+ * Frees all associated device, host memory and filesystem storage used for this context.
103
+ * After freeing a handle, it may be re-used as if it was new - options may be re-configured and will
104
+ * take effect on the next call to \p cuptiCheckpointSave.
105
+ *
106
+ * \param handle A pointer to a previously saved CUpti_Checkpoint object
107
+ *
108
+ * \retval CUPTI_SUCCESS if the handle was successfully freed
109
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if the handle was already freed or appears invalid
110
+ * \retval CUPTI_ERROR_INVALID_CONTEXT if the context is no longer valid
111
+ */
112
+ CUptiResult cuptiCheckpointFree(CUpti_Checkpoint * const handle);
113
+
114
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
115
+ #pragma GCC visibility pop
116
+ #endif
117
+
118
+ /**
119
+ * @}
120
+ */
121
+
122
+ #ifdef __cplusplus
123
+ }
124
+ #endif
125
+
126
+ // Exit namespace NV::Cupti::Checkpoint
127
+ }}}
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_driver_cbid.h ADDED
@@ -0,0 +1,725 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ // *************************************************************************
3
+ // Definitions of indices for API functions, unique across entire API
4
+ // *************************************************************************
5
+
6
+ // This file is generated. Any changes you make will be lost during the next clean build.
7
+ // CUDA public interface, for type definitions and cu* function prototypes
8
+
9
+ typedef enum CUpti_driver_api_trace_cbid_enum {
10
+ CUPTI_DRIVER_TRACE_CBID_INVALID = 0,
11
+ CUPTI_DRIVER_TRACE_CBID_cuInit = 1,
12
+ CUPTI_DRIVER_TRACE_CBID_cuDriverGetVersion = 2,
13
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGet = 3,
14
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetCount = 4,
15
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetName = 5,
16
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceComputeCapability = 6,
17
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem = 7,
18
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetProperties = 8,
19
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetAttribute = 9,
20
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate = 10,
21
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy = 11,
22
+ CUPTI_DRIVER_TRACE_CBID_cuCtxAttach = 12,
23
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDetach = 13,
24
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent = 14,
25
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent = 15,
26
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetDevice = 16,
27
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSynchronize = 17,
28
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoad = 18,
29
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoadData = 19,
30
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoadDataEx = 20,
31
+ CUPTI_DRIVER_TRACE_CBID_cuModuleLoadFatBinary = 21,
32
+ CUPTI_DRIVER_TRACE_CBID_cuModuleUnload = 22,
33
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetFunction = 23,
34
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal = 24,
35
+ CUPTI_DRIVER_TRACE_CBID_cu64ModuleGetGlobal = 25,
36
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetTexRef = 26,
37
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo = 27,
38
+ CUPTI_DRIVER_TRACE_CBID_cu64MemGetInfo = 28,
39
+ CUPTI_DRIVER_TRACE_CBID_cuMemAlloc = 29,
40
+ CUPTI_DRIVER_TRACE_CBID_cu64MemAlloc = 30,
41
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch = 31,
42
+ CUPTI_DRIVER_TRACE_CBID_cu64MemAllocPitch = 32,
43
+ CUPTI_DRIVER_TRACE_CBID_cuMemFree = 33,
44
+ CUPTI_DRIVER_TRACE_CBID_cu64MemFree = 34,
45
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange = 35,
46
+ CUPTI_DRIVER_TRACE_CBID_cu64MemGetAddressRange = 36,
47
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost = 37,
48
+ CUPTI_DRIVER_TRACE_CBID_cuMemFreeHost = 38,
49
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc = 39,
50
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer = 40,
51
+ CUPTI_DRIVER_TRACE_CBID_cu64MemHostGetDevicePointer = 41,
52
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostGetFlags = 42,
53
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD = 43,
54
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoD = 44,
55
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH = 45,
56
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoH = 46,
57
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD = 47,
58
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoD = 48,
59
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA = 49,
60
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoA = 50,
61
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD = 51,
62
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyAtoD = 52,
63
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA = 53,
64
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH = 54,
65
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA = 55,
66
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D = 56,
67
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned = 57,
68
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D = 58,
69
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3D = 59,
70
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync = 60,
71
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyHtoDAsync = 61,
72
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync = 62,
73
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoHAsync = 63,
74
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync = 64,
75
+ CUPTI_DRIVER_TRACE_CBID_cu64MemcpyDtoDAsync = 65,
76
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync = 66,
77
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync = 67,
78
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync = 68,
79
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync = 69,
80
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy3DAsync = 70,
81
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8 = 71,
82
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8 = 72,
83
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16 = 73,
84
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16 = 74,
85
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32 = 75,
86
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32 = 76,
87
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8 = 77,
88
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8 = 78,
89
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16 = 79,
90
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16 = 80,
91
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32 = 81,
92
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32 = 82,
93
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetBlockShape = 83,
94
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedSize = 84,
95
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetAttribute = 85,
96
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetCacheConfig = 86,
97
+ CUPTI_DRIVER_TRACE_CBID_cuArrayCreate = 87,
98
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor = 88,
99
+ CUPTI_DRIVER_TRACE_CBID_cuArrayDestroy = 89,
100
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate = 90,
101
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor = 91,
102
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefCreate = 92,
103
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefDestroy = 93,
104
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetArray = 94,
105
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress = 95,
106
+ CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress = 96,
107
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D = 97,
108
+ CUPTI_DRIVER_TRACE_CBID_cu64TexRefSetAddress2D = 98,
109
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFormat = 99,
110
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddressMode = 100,
111
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFilterMode = 101,
112
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetFlags = 102,
113
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress = 103,
114
+ CUPTI_DRIVER_TRACE_CBID_cu64TexRefGetAddress = 104,
115
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetArray = 105,
116
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddressMode = 106,
117
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFilterMode = 107,
118
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFormat = 108,
119
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetFlags = 109,
120
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetSize = 110,
121
+ CUPTI_DRIVER_TRACE_CBID_cuParamSeti = 111,
122
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetf = 112,
123
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetv = 113,
124
+ CUPTI_DRIVER_TRACE_CBID_cuParamSetTexRef = 114,
125
+ CUPTI_DRIVER_TRACE_CBID_cuLaunch = 115,
126
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchGrid = 116,
127
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchGridAsync = 117,
128
+ CUPTI_DRIVER_TRACE_CBID_cuEventCreate = 118,
129
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecord = 119,
130
+ CUPTI_DRIVER_TRACE_CBID_cuEventQuery = 120,
131
+ CUPTI_DRIVER_TRACE_CBID_cuEventSynchronize = 121,
132
+ CUPTI_DRIVER_TRACE_CBID_cuEventDestroy = 122,
133
+ CUPTI_DRIVER_TRACE_CBID_cuEventElapsedTime = 123,
134
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCreate = 124,
135
+ CUPTI_DRIVER_TRACE_CBID_cuStreamQuery = 125,
136
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize = 126,
137
+ CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy = 127,
138
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnregisterResource = 128,
139
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsSubResourceGetMappedArray = 129,
140
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer = 130,
141
+ CUPTI_DRIVER_TRACE_CBID_cu64GraphicsResourceGetMappedPointer = 131,
142
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags = 132,
143
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources = 133,
144
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources = 134,
145
+ CUPTI_DRIVER_TRACE_CBID_cuGetExportTable = 135,
146
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetLimit = 136,
147
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetLimit = 137,
148
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevice = 138,
149
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate = 139,
150
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D10RegisterResource = 140,
151
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10RegisterResource = 141,
152
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10UnregisterResource = 142,
153
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10MapResources = 143,
154
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10UnmapResources = 144,
155
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceSetMapFlags = 145,
156
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedArray = 146,
157
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer = 147,
158
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize = 148,
159
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch = 149,
160
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions = 150,
161
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevice = 151,
162
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate = 152,
163
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D11RegisterResource = 153,
164
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevice = 154,
165
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate = 155,
166
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsD3D9RegisterResource = 156,
167
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDirect3DDevice = 157,
168
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterResource = 158,
169
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterResource = 159,
170
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9MapResources = 160,
171
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapResources = 161,
172
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceSetMapFlags = 162,
173
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions = 163,
174
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedArray = 164,
175
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer = 165,
176
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize = 166,
177
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch = 167,
178
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9Begin = 168,
179
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9End = 169,
180
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9RegisterVertexBuffer = 170,
181
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer = 171,
182
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnmapVertexBuffer = 172,
183
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9UnregisterVertexBuffer = 173,
184
+ CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate = 174,
185
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterBuffer = 175,
186
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsGLRegisterImage = 176,
187
+ CUPTI_DRIVER_TRACE_CBID_cuWGLGetDevice = 177,
188
+ CUPTI_DRIVER_TRACE_CBID_cuGLInit = 178,
189
+ CUPTI_DRIVER_TRACE_CBID_cuGLRegisterBufferObject = 179,
190
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject = 180,
191
+ CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObject = 181,
192
+ CUPTI_DRIVER_TRACE_CBID_cuGLUnregisterBufferObject = 182,
193
+ CUPTI_DRIVER_TRACE_CBID_cuGLSetBufferObjectMapFlags = 183,
194
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync = 184,
195
+ CUPTI_DRIVER_TRACE_CBID_cuGLUnmapBufferObjectAsync = 185,
196
+ CUPTI_DRIVER_TRACE_CBID_cuVDPAUGetDevice = 186,
197
+ CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate = 187,
198
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterVideoSurface = 188,
199
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsVDPAURegisterOutputSurface = 189,
200
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetSurfRef = 190,
201
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefCreate = 191,
202
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefDestroy = 192,
203
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetFormat = 193,
204
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefSetArray = 194,
205
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetFormat = 195,
206
+ CUPTI_DRIVER_TRACE_CBID_cuSurfRefGetArray = 196,
207
+ CUPTI_DRIVER_TRACE_CBID_cu64DeviceTotalMem = 197,
208
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPointer = 198,
209
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedSize = 199,
210
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetMappedPitch = 200,
211
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D10ResourceGetSurfaceDimensions = 201,
212
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetSurfaceDimensions = 202,
213
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPointer = 203,
214
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedSize = 204,
215
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9ResourceGetMappedPitch = 205,
216
+ CUPTI_DRIVER_TRACE_CBID_cu64D3D9MapVertexBuffer = 206,
217
+ CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObject = 207,
218
+ CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObjectAsync = 208,
219
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDevices = 209,
220
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreateOnDevice = 210,
221
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDevices = 211,
222
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreateOnDevice = 212,
223
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9GetDevices = 213,
224
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreateOnDevice = 214,
225
+ CUPTI_DRIVER_TRACE_CBID_cu64MemHostAlloc = 215,
226
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async = 216,
227
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD8Async = 217,
228
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async = 218,
229
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD16Async = 219,
230
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async = 220,
231
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD32Async = 221,
232
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async = 222,
233
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D8Async = 223,
234
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async = 224,
235
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D16Async = 225,
236
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async = 226,
237
+ CUPTI_DRIVER_TRACE_CBID_cu64MemsetD2D32Async = 227,
238
+ CUPTI_DRIVER_TRACE_CBID_cu64ArrayCreate = 228,
239
+ CUPTI_DRIVER_TRACE_CBID_cu64ArrayGetDescriptor = 229,
240
+ CUPTI_DRIVER_TRACE_CBID_cu64Array3DCreate = 230,
241
+ CUPTI_DRIVER_TRACE_CBID_cu64Array3DGetDescriptor = 231,
242
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2D = 232,
243
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DUnaligned = 233,
244
+ CUPTI_DRIVER_TRACE_CBID_cu64Memcpy2DAsync = 234,
245
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v2 = 235,
246
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10CtxCreate_v2 = 236,
247
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11CtxCreate_v2 = 237,
248
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9CtxCreate_v2 = 238,
249
+ CUPTI_DRIVER_TRACE_CBID_cuGLCtxCreate_v2 = 239,
250
+ CUPTI_DRIVER_TRACE_CBID_cuVDPAUCtxCreate_v2 = 240,
251
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetGlobal_v2 = 241,
252
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetInfo_v2 = 242,
253
+ CUPTI_DRIVER_TRACE_CBID_cuMemAlloc_v2 = 243,
254
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocPitch_v2 = 244,
255
+ CUPTI_DRIVER_TRACE_CBID_cuMemFree_v2 = 245,
256
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAddressRange_v2 = 246,
257
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostGetDevicePointer_v2 = 247,
258
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy_v2 = 248,
259
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2 = 249,
260
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2 = 250,
261
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2 = 251,
262
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2 = 252,
263
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2 = 253,
264
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2 = 254,
265
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress_v2 = 255,
266
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v2 = 256,
267
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetAddress_v2 = 257,
268
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedPointer_v2 = 258,
269
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceTotalMem_v2 = 259,
270
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPointer_v2 = 260,
271
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedSize_v2 = 261,
272
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetMappedPitch_v2 = 262,
273
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10ResourceGetSurfaceDimensions_v2 = 263,
274
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetSurfaceDimensions_v2 = 264,
275
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPointer_v2 = 265,
276
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedSize_v2 = 266,
277
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9ResourceGetMappedPitch_v2 = 267,
278
+ CUPTI_DRIVER_TRACE_CBID_cuD3D9MapVertexBuffer_v2 = 268,
279
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2 = 269,
280
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2 = 270,
281
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostAlloc_v2 = 271,
282
+ CUPTI_DRIVER_TRACE_CBID_cuArrayCreate_v2 = 272,
283
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetDescriptor_v2 = 273,
284
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DCreate_v2 = 274,
285
+ CUPTI_DRIVER_TRACE_CBID_cuArray3DGetDescriptor_v2 = 275,
286
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2 = 276,
287
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2 = 277,
288
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2 = 278,
289
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2 = 279,
290
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2 = 280,
291
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2 = 281,
292
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2 = 282,
293
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2 = 283,
294
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2 = 284,
295
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2 = 285,
296
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2 = 286,
297
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2 = 287,
298
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2 = 288,
299
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2 = 289,
300
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2 = 290,
301
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2 = 291,
302
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2 = 292,
303
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2 = 293,
304
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocHost_v2 = 294,
305
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent = 295,
306
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetApiVersion = 296,
307
+ CUPTI_DRIVER_TRACE_CBID_cuD3D10GetDirect3DDevice = 297,
308
+ CUPTI_DRIVER_TRACE_CBID_cuD3D11GetDirect3DDevice = 298,
309
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetCacheConfig = 299,
310
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetCacheConfig = 300,
311
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister = 301,
312
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostUnregister = 302,
313
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetCurrent = 303,
314
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetCurrent = 304,
315
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy = 305,
316
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync = 306,
317
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel = 307,
318
+ CUPTI_DRIVER_TRACE_CBID_cuProfilerStart = 308,
319
+ CUPTI_DRIVER_TRACE_CBID_cuProfilerStop = 309,
320
+ CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttribute = 310,
321
+ CUPTI_DRIVER_TRACE_CBID_cuProfilerInitialize = 311,
322
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceCanAccessPeer = 312,
323
+ CUPTI_DRIVER_TRACE_CBID_cuCtxEnablePeerAccess = 313,
324
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDisablePeerAccess = 314,
325
+ CUPTI_DRIVER_TRACE_CBID_cuMemPeerRegister = 315,
326
+ CUPTI_DRIVER_TRACE_CBID_cuMemPeerUnregister = 316,
327
+ CUPTI_DRIVER_TRACE_CBID_cuMemPeerGetDevicePointer = 317,
328
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer = 318,
329
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync = 319,
330
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer = 320,
331
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync = 321,
332
+ CUPTI_DRIVER_TRACE_CBID_cuCtxDestroy_v2 = 322,
333
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPushCurrent_v2 = 323,
334
+ CUPTI_DRIVER_TRACE_CBID_cuCtxPopCurrent_v2 = 324,
335
+ CUPTI_DRIVER_TRACE_CBID_cuEventDestroy_v2 = 325,
336
+ CUPTI_DRIVER_TRACE_CBID_cuStreamDestroy_v2 = 326,
337
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetAddress2D_v3 = 327,
338
+ CUPTI_DRIVER_TRACE_CBID_cuIpcGetMemHandle = 328,
339
+ CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle = 329,
340
+ CUPTI_DRIVER_TRACE_CBID_cuIpcCloseMemHandle = 330,
341
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetByPCIBusId = 331,
342
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetPCIBusId = 332,
343
+ CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices = 333,
344
+ CUPTI_DRIVER_TRACE_CBID_cuIpcGetEventHandle = 334,
345
+ CUPTI_DRIVER_TRACE_CBID_cuIpcOpenEventHandle = 335,
346
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetSharedMemConfig = 336,
347
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetSharedMemConfig = 337,
348
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetSharedMemConfig = 338,
349
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectCreate = 339,
350
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectDestroy = 340,
351
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceDesc = 341,
352
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetTextureDesc = 342,
353
+ CUPTI_DRIVER_TRACE_CBID_cuSurfObjectCreate = 343,
354
+ CUPTI_DRIVER_TRACE_CBID_cuSurfObjectDestroy = 344,
355
+ CUPTI_DRIVER_TRACE_CBID_cuSurfObjectGetResourceDesc = 345,
356
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback = 346,
357
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayCreate = 347,
358
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetLevel = 348,
359
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayDestroy = 349,
360
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmappedArray = 350,
361
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapFilterMode = 351,
362
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelBias = 352,
363
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMipmapLevelClamp = 353,
364
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetMaxAnisotropy = 354,
365
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmappedArray = 355,
366
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapFilterMode = 356,
367
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelBias = 357,
368
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMipmapLevelClamp = 358,
369
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetMaxAnisotropy = 359,
370
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedMipmappedArray = 360,
371
+ CUPTI_DRIVER_TRACE_CBID_cuTexObjectGetResourceViewDesc = 361,
372
+ CUPTI_DRIVER_TRACE_CBID_cuLinkCreate = 362,
373
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddData = 363,
374
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile = 364,
375
+ CUPTI_DRIVER_TRACE_CBID_cuLinkComplete = 365,
376
+ CUPTI_DRIVER_TRACE_CBID_cuLinkDestroy = 366,
377
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCreateWithPriority = 367,
378
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority = 368,
379
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags = 369,
380
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetStreamPriorityRange = 370,
381
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocManaged = 371,
382
+ CUPTI_DRIVER_TRACE_CBID_cuGetErrorString = 372,
383
+ CUPTI_DRIVER_TRACE_CBID_cuGetErrorName = 373,
384
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessor = 374,
385
+ CUPTI_DRIVER_TRACE_CBID_cuCompilePtx = 375,
386
+ CUPTI_DRIVER_TRACE_CBID_cuBinaryFree = 376,
387
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync = 377,
388
+ CUPTI_DRIVER_TRACE_CBID_cuPointerSetAttribute = 378,
389
+ CUPTI_DRIVER_TRACE_CBID_cuMemHostRegister_v2 = 379,
390
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceSetMapFlags_v2 = 380,
391
+ CUPTI_DRIVER_TRACE_CBID_cuLinkCreate_v2 = 381,
392
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddData_v2 = 382,
393
+ CUPTI_DRIVER_TRACE_CBID_cuLinkAddFile_v2 = 383,
394
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSize = 384,
395
+ CUPTI_DRIVER_TRACE_CBID_cuGLGetDevices_v2 = 385,
396
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRetain = 386,
397
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease = 387,
398
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags = 388,
399
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset = 389,
400
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsEGLRegisterImage = 390,
401
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetFlags = 391,
402
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxGetState = 392,
403
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnect = 393,
404
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerDisconnect = 394,
405
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerAcquireFrame = 395,
406
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerReleaseFrame = 396,
407
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoD_v2_ptds = 397,
408
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoH_v2_ptds = 398,
409
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoD_v2_ptds = 399,
410
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoA_v2_ptds = 400,
411
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoD_v2_ptds = 401,
412
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoA_v2_ptds = 402,
413
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoH_v2_ptds = 403,
414
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoA_v2_ptds = 404,
415
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2D_v2_ptds = 405,
416
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DUnaligned_v2_ptds = 406,
417
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3D_v2_ptds = 407,
418
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy_ptds = 408,
419
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeer_ptds = 409,
420
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeer_ptds = 410,
421
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8_v2_ptds = 411,
422
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16_v2_ptds = 412,
423
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32_v2_ptds = 413,
424
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8_v2_ptds = 414,
425
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16_v2_ptds = 415,
426
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32_v2_ptds = 416,
427
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObject_v2_ptds = 417,
428
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAsync_ptsz = 418,
429
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoAAsync_v2_ptsz = 419,
430
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyAtoHAsync_v2_ptsz = 420,
431
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyHtoDAsync_v2_ptsz = 421,
432
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoHAsync_v2_ptsz = 422,
433
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyDtoDAsync_v2_ptsz = 423,
434
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy2DAsync_v2_ptsz = 424,
435
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DAsync_v2_ptsz = 425,
436
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpyPeerAsync_ptsz = 426,
437
+ CUPTI_DRIVER_TRACE_CBID_cuMemcpy3DPeerAsync_ptsz = 427,
438
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD8Async_ptsz = 428,
439
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD16Async_ptsz = 429,
440
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD32Async_ptsz = 430,
441
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D8Async_ptsz = 431,
442
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D16Async_ptsz = 432,
443
+ CUPTI_DRIVER_TRACE_CBID_cuMemsetD2D32Async_ptsz = 433,
444
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetPriority_ptsz = 434,
445
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetFlags_ptsz = 435,
446
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitEvent_ptsz = 436,
447
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAddCallback_ptsz = 437,
448
+ CUPTI_DRIVER_TRACE_CBID_cuStreamAttachMemAsync_ptsz = 438,
449
+ CUPTI_DRIVER_TRACE_CBID_cuStreamQuery_ptsz = 439,
450
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSynchronize_ptsz = 440,
451
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecord_ptsz = 441,
452
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel_ptsz = 442,
453
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsMapResources_ptsz = 443,
454
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsUnmapResources_ptsz = 444,
455
+ CUPTI_DRIVER_TRACE_CBID_cuGLMapBufferObjectAsync_v2_ptsz = 445,
456
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerConnect = 446,
457
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerDisconnect = 447,
458
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerPresentFrame = 448,
459
+ CUPTI_DRIVER_TRACE_CBID_cuGraphicsResourceGetMappedEglFrame = 449,
460
+ CUPTI_DRIVER_TRACE_CBID_cuPointerGetAttributes = 450,
461
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags = 451,
462
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialBlockSizeWithFlags = 452,
463
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamProducerReturnFrame = 453,
464
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetP2PAttribute = 454,
465
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefSetBorderColor = 455,
466
+ CUPTI_DRIVER_TRACE_CBID_cuTexRefGetBorderColor = 456,
467
+ CUPTI_DRIVER_TRACE_CBID_cuMemAdvise = 457,
468
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32 = 458,
469
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_ptsz = 459,
470
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32 = 460,
471
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_ptsz = 461,
472
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp = 462,
473
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_ptsz = 463,
474
+ CUPTI_DRIVER_TRACE_CBID_cuNVNbufferGetPointer = 464,
475
+ CUPTI_DRIVER_TRACE_CBID_cuNVNtextureGetArray = 465,
476
+ CUPTI_DRIVER_TRACE_CBID_cuNNSetAllocator = 466,
477
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync = 467,
478
+ CUPTI_DRIVER_TRACE_CBID_cuMemPrefetchAsync_ptsz = 468,
479
+ CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromNVNSync = 469,
480
+ CUPTI_DRIVER_TRACE_CBID_cuEGLStreamConsumerConnectWithFlags = 470,
481
+ CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttribute = 471,
482
+ CUPTI_DRIVER_TRACE_CBID_cuMemRangeGetAttributes = 472,
483
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64 = 473,
484
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_ptsz = 474,
485
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64 = 475,
486
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_ptsz = 476,
487
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel = 477,
488
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernel_ptsz = 478,
489
+ CUPTI_DRIVER_TRACE_CBID_cuEventCreateFromEGLSync = 479,
490
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchCooperativeKernelMultiDevice = 480,
491
+ CUPTI_DRIVER_TRACE_CBID_cuFuncSetAttribute = 481,
492
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid = 482,
493
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx = 483,
494
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCtx_ptsz = 484,
495
+ CUPTI_DRIVER_TRACE_CBID_cuImportExternalMemory = 485,
496
+ CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedBuffer = 486,
497
+ CUPTI_DRIVER_TRACE_CBID_cuExternalMemoryGetMappedMipmappedArray = 487,
498
+ CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalMemory = 488,
499
+ CUPTI_DRIVER_TRACE_CBID_cuImportExternalSemaphore = 489,
500
+ CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync = 490,
501
+ CUPTI_DRIVER_TRACE_CBID_cuSignalExternalSemaphoresAsync_ptsz = 491,
502
+ CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync = 492,
503
+ CUPTI_DRIVER_TRACE_CBID_cuWaitExternalSemaphoresAsync_ptsz = 493,
504
+ CUPTI_DRIVER_TRACE_CBID_cuDestroyExternalSemaphore = 494,
505
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture = 495,
506
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_ptsz = 496,
507
+ CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture = 497,
508
+ CUPTI_DRIVER_TRACE_CBID_cuStreamEndCapture_ptsz = 498,
509
+ CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing = 499,
510
+ CUPTI_DRIVER_TRACE_CBID_cuStreamIsCapturing_ptsz = 500,
511
+ CUPTI_DRIVER_TRACE_CBID_cuGraphCreate = 501,
512
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode = 502,
513
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams = 503,
514
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemcpyNode = 504,
515
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeGetParams = 505,
516
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemsetNode = 506,
517
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeGetParams = 507,
518
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemsetNodeSetParams = 508,
519
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetType = 509,
520
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetRootNodes = 510,
521
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependencies = 511,
522
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetDependentNodes = 512,
523
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate = 513,
524
+ CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch = 514,
525
+ CUPTI_DRIVER_TRACE_CBID_cuGraphLaunch_ptsz = 515,
526
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecDestroy = 516,
527
+ CUPTI_DRIVER_TRACE_CBID_cuGraphDestroy = 517,
528
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddDependencies = 518,
529
+ CUPTI_DRIVER_TRACE_CBID_cuGraphRemoveDependencies = 519,
530
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemcpyNodeSetParams = 520,
531
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams = 521,
532
+ CUPTI_DRIVER_TRACE_CBID_cuGraphDestroyNode = 522,
533
+ CUPTI_DRIVER_TRACE_CBID_cuGraphClone = 523,
534
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeFindInClone = 524,
535
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddChildGraphNode = 525,
536
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddEmptyNode = 526,
537
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc = 527,
538
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchHostFunc_ptsz = 528,
539
+ CUPTI_DRIVER_TRACE_CBID_cuGraphChildGraphNodeGetGraph = 529,
540
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddHostNode = 530,
541
+ CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeGetParams = 531,
542
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetLuid = 532,
543
+ CUPTI_DRIVER_TRACE_CBID_cuGraphHostNodeSetParams = 533,
544
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetNodes = 534,
545
+ CUPTI_DRIVER_TRACE_CBID_cuGraphGetEdges = 535,
546
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo = 536,
547
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_ptsz = 537,
548
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams = 538,
549
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2 = 539,
550
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBeginCapture_v2_ptsz = 540,
551
+ CUPTI_DRIVER_TRACE_CBID_cuThreadExchangeStreamCaptureMode = 541,
552
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetNvSciSyncAttributes = 542,
553
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyAvailableDynamicSMemPerBlock = 543,
554
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxRelease_v2 = 544,
555
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxReset_v2 = 545,
556
+ CUPTI_DRIVER_TRACE_CBID_cuDevicePrimaryCtxSetFlags_v2 = 546,
557
+ CUPTI_DRIVER_TRACE_CBID_cuMemAddressReserve = 547,
558
+ CUPTI_DRIVER_TRACE_CBID_cuMemAddressFree = 548,
559
+ CUPTI_DRIVER_TRACE_CBID_cuMemCreate = 549,
560
+ CUPTI_DRIVER_TRACE_CBID_cuMemRelease = 550,
561
+ CUPTI_DRIVER_TRACE_CBID_cuMemMap = 551,
562
+ CUPTI_DRIVER_TRACE_CBID_cuMemUnmap = 552,
563
+ CUPTI_DRIVER_TRACE_CBID_cuMemSetAccess = 553,
564
+ CUPTI_DRIVER_TRACE_CBID_cuMemExportToShareableHandle = 554,
565
+ CUPTI_DRIVER_TRACE_CBID_cuMemImportFromShareableHandle = 555,
566
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationGranularity = 556,
567
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAllocationPropertiesFromHandle = 557,
568
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetAccess = 558,
569
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags = 559,
570
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetFlags_ptsz = 560,
571
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate = 561,
572
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemcpyNodeSetParams = 562,
573
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecMemsetNodeSetParams = 563,
574
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecHostNodeSetParams = 564,
575
+ CUPTI_DRIVER_TRACE_CBID_cuMemRetainAllocationHandle = 565,
576
+ CUPTI_DRIVER_TRACE_CBID_cuFuncGetModule = 566,
577
+ CUPTI_DRIVER_TRACE_CBID_cuIpcOpenMemHandle_v2 = 567,
578
+ CUPTI_DRIVER_TRACE_CBID_cuCtxResetPersistingL2Cache = 568,
579
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeCopyAttributes = 569,
580
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetAttribute = 570,
581
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetAttribute = 571,
582
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes = 572,
583
+ CUPTI_DRIVER_TRACE_CBID_cuStreamCopyAttributes_ptsz = 573,
584
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute = 574,
585
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetAttribute_ptsz = 575,
586
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute = 576,
587
+ CUPTI_DRIVER_TRACE_CBID_cuStreamSetAttribute_ptsz = 577,
588
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiate_v2 = 578,
589
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetTexture1DLinearMaxWidth = 579,
590
+ CUPTI_DRIVER_TRACE_CBID_cuGraphUpload = 580,
591
+ CUPTI_DRIVER_TRACE_CBID_cuGraphUpload_ptsz = 581,
592
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetSparseProperties = 582,
593
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetSparseProperties = 583,
594
+ CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync = 584,
595
+ CUPTI_DRIVER_TRACE_CBID_cuMemMapArrayAsync_ptsz = 585,
596
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecChildGraphNodeSetParams = 586,
597
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags = 587,
598
+ CUPTI_DRIVER_TRACE_CBID_cuEventRecordWithFlags_ptsz = 588,
599
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventRecordNode = 589,
600
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddEventWaitNode = 590,
601
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeGetEvent = 591,
602
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeGetEvent = 592,
603
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventRecordNodeSetEvent = 593,
604
+ CUPTI_DRIVER_TRACE_CBID_cuGraphEventWaitNodeSetEvent = 594,
605
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventRecordNodeSetEvent = 595,
606
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecEventWaitNodeSetEvent = 596,
607
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetPlane = 597,
608
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync = 598,
609
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocAsync_ptsz = 599,
610
+ CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync = 600,
611
+ CUPTI_DRIVER_TRACE_CBID_cuMemFreeAsync_ptsz = 601,
612
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolTrimTo = 602,
613
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAttribute = 603,
614
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAttribute = 604,
615
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolSetAccess = 605,
616
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetDefaultMemPool = 606,
617
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolCreate = 607,
618
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolDestroy = 608,
619
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceSetMemPool = 609,
620
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetMemPool = 610,
621
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync = 611,
622
+ CUPTI_DRIVER_TRACE_CBID_cuMemAllocFromPoolAsync_ptsz = 612,
623
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportToShareableHandle = 613,
624
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportFromShareableHandle = 614,
625
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolExportPointer = 615,
626
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolImportPointer = 616,
627
+ CUPTI_DRIVER_TRACE_CBID_cuMemPoolGetAccess = 617,
628
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresSignalNode = 618,
629
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeGetParams = 619,
630
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresSignalNodeSetParams = 620,
631
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddExternalSemaphoresWaitNode = 621,
632
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeGetParams = 622,
633
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExternalSemaphoresWaitNodeSetParams = 623,
634
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresSignalNodeSetParams = 624,
635
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecExternalSemaphoresWaitNodeSetParams = 625,
636
+ CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress = 626,
637
+ CUPTI_DRIVER_TRACE_CBID_cuFlushGPUDirectRDMAWrites = 627,
638
+ CUPTI_DRIVER_TRACE_CBID_cuGraphDebugDotPrint = 628,
639
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2 = 629,
640
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetCaptureInfo_v2_ptsz = 630,
641
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies = 631,
642
+ CUPTI_DRIVER_TRACE_CBID_cuStreamUpdateCaptureDependencies_ptsz = 632,
643
+ CUPTI_DRIVER_TRACE_CBID_cuUserObjectCreate = 633,
644
+ CUPTI_DRIVER_TRACE_CBID_cuUserObjectRetain = 634,
645
+ CUPTI_DRIVER_TRACE_CBID_cuUserObjectRelease = 635,
646
+ CUPTI_DRIVER_TRACE_CBID_cuGraphRetainUserObject = 636,
647
+ CUPTI_DRIVER_TRACE_CBID_cuGraphReleaseUserObject = 637,
648
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemAllocNode = 638,
649
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddMemFreeNode = 639,
650
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGraphMemTrim = 640,
651
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetGraphMemAttribute = 641,
652
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceSetGraphMemAttribute = 642,
653
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithFlags = 643,
654
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetExecAffinitySupport = 644,
655
+ CUPTI_DRIVER_TRACE_CBID_cuCtxCreate_v3 = 645,
656
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetExecAffinity = 646,
657
+ CUPTI_DRIVER_TRACE_CBID_cuDeviceGetUuid_v2 = 647,
658
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemAllocNodeGetParams = 648,
659
+ CUPTI_DRIVER_TRACE_CBID_cuGraphMemFreeNodeGetParams = 649,
660
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeSetEnabled = 650,
661
+ CUPTI_DRIVER_TRACE_CBID_cuGraphNodeGetEnabled = 651,
662
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx = 652,
663
+ CUPTI_DRIVER_TRACE_CBID_cuLaunchKernelEx_ptsz = 653,
664
+ CUPTI_DRIVER_TRACE_CBID_cuArrayGetMemoryRequirements = 654,
665
+ CUPTI_DRIVER_TRACE_CBID_cuMipmappedArrayGetMemoryRequirements = 655,
666
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams = 656,
667
+ CUPTI_DRIVER_TRACE_CBID_cuGraphInstantiateWithParams_ptsz = 657,
668
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecGetFlags = 658,
669
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2 = 659,
670
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue32_v2_ptsz = 660,
671
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2 = 661,
672
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWaitValue64_v2_ptsz = 662,
673
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2 = 663,
674
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue32_v2_ptsz = 664,
675
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2 = 665,
676
+ CUPTI_DRIVER_TRACE_CBID_cuStreamWriteValue64_v2_ptsz = 666,
677
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2 = 667,
678
+ CUPTI_DRIVER_TRACE_CBID_cuStreamBatchMemOp_v2_ptsz = 668,
679
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddBatchMemOpNode = 669,
680
+ CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeGetParams = 670,
681
+ CUPTI_DRIVER_TRACE_CBID_cuGraphBatchMemOpNodeSetParams = 671,
682
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecBatchMemOpNodeSetParams = 672,
683
+ CUPTI_DRIVER_TRACE_CBID_cuModuleGetLoadingMode = 673,
684
+ CUPTI_DRIVER_TRACE_CBID_cuMemGetHandleForAddressRange = 674,
685
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxPotentialClusterSize = 675,
686
+ CUPTI_DRIVER_TRACE_CBID_cuOccupancyMaxActiveClusters = 676,
687
+ CUPTI_DRIVER_TRACE_CBID_cuGetProcAddress_v2 = 677,
688
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadData = 678,
689
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryLoadFromFile = 679,
690
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryUnload = 680,
691
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetKernel = 681,
692
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetModule = 682,
693
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetFunction = 683,
694
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetGlobal = 684,
695
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetManaged = 685,
696
+ CUPTI_DRIVER_TRACE_CBID_cuKernelGetAttribute = 686,
697
+ CUPTI_DRIVER_TRACE_CBID_cuKernelSetAttribute = 687,
698
+ CUPTI_DRIVER_TRACE_CBID_cuKernelSetCacheConfig = 688,
699
+ CUPTI_DRIVER_TRACE_CBID_cuGraphAddKernelNode_v2 = 689,
700
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeGetParams_v2 = 690,
701
+ CUPTI_DRIVER_TRACE_CBID_cuGraphKernelNodeSetParams_v2 = 691,
702
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecKernelNodeSetParams_v2 = 692,
703
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetId = 693,
704
+ CUPTI_DRIVER_TRACE_CBID_cuStreamGetId_ptsz = 694,
705
+ CUPTI_DRIVER_TRACE_CBID_cuCtxGetId = 695,
706
+ CUPTI_DRIVER_TRACE_CBID_cuGraphExecUpdate_v2 = 696,
707
+ CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeTiled = 697,
708
+ CUPTI_DRIVER_TRACE_CBID_cuTensorMapEncodeIm2col = 698,
709
+ CUPTI_DRIVER_TRACE_CBID_cuTensorMapReplaceAddress = 699,
710
+ CUPTI_DRIVER_TRACE_CBID_cuLibraryGetUnifiedFunction = 700,
711
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttribute = 701,
712
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpGetAttributeGlobal = 702,
713
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttribute = 703,
714
+ CUPTI_DRIVER_TRACE_CBID_cuCoredumpSetAttributeGlobal = 704,
715
+ CUPTI_DRIVER_TRACE_CBID_cuCtxSetFlags = 705,
716
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastCreate = 706,
717
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastAddDevice = 707,
718
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastBindMem = 708,
719
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastBindAddr = 709,
720
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastUnbind = 710,
721
+ CUPTI_DRIVER_TRACE_CBID_cuMulticastGetGranularity = 711,
722
+ CUPTI_DRIVER_TRACE_CBID_SIZE = 712,
723
+ CUPTI_DRIVER_TRACE_CBID_FORCE_INT = 0x7fffffff
724
+ } CUpti_driver_api_trace_cbid;
725
+
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_events.h ADDED
@@ -0,0 +1,1371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_EVENTS_H_)
51
+ #define _CUPTI_EVENTS_H_
52
+
53
+ #include <cuda.h>
54
+ #include <string.h>
55
+ #include <cuda_stdint.h>
56
+ #include <cupti_result.h>
57
+
58
+ #ifndef CUPTIAPI
59
+ #ifdef _WIN32
60
+ #define CUPTIAPI __stdcall
61
+ #else
62
+ #define CUPTIAPI
63
+ #endif
64
+ #endif
65
+
66
+ #if defined(__cplusplus)
67
+ extern "C" {
68
+ #endif
69
+
70
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
71
+ #pragma GCC visibility push(default)
72
+ #endif
73
+
74
+ /**
75
+ * \defgroup CUPTI_EVENT_API CUPTI Event API
76
+ * Functions, types, and enums that implement the CUPTI Event API.
77
+ *
78
+ * \note CUPTI event API from the header cupti_events.h are not supported on devices
79
+ * with compute capability 7.5 and higher (i.e. Turing and later GPU architectures).
80
+ * These API will be deprecated in a future CUDA release. These are replaced by
81
+ * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API
82
+ * in the headers nvperf_host.h and nvperf_target.h which are supported on
83
+ * devices with compute capability 7.0 and higher (i.e. Volta and later GPU
84
+ * architectures).
85
+ *
86
+ * @{
87
+ */
88
+
89
+ /**
90
+ * \brief ID for an event.
91
+ *
92
+ * An event represents a countable activity, action, or occurrence on
93
+ * the device.
94
+ */
95
+ typedef uint32_t CUpti_EventID;
96
+
97
+ /**
98
+ * \brief ID for an event domain.
99
+ *
100
+ * ID for an event domain. An event domain represents a group of
101
+ * related events. A device may have multiple instances of a domain,
102
+ * indicating that the device can simultaneously record multiple
103
+ * instances of each event within that domain.
104
+ */
105
+ typedef uint32_t CUpti_EventDomainID;
106
+
107
+ /**
108
+ * \brief A group of events.
109
+ *
110
+ * An event group is a collection of events that are managed
111
+ * together. All events in an event group must belong to the same
112
+ * domain.
113
+ */
114
+ typedef void *CUpti_EventGroup;
115
+
116
+ /**
117
+ * \brief Device class.
118
+ *
119
+ * Enumeration of device classes for device attribute
120
+ * CUPTI_DEVICE_ATTR_DEVICE_CLASS.
121
+ */
122
+ typedef enum {
123
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_TESLA = 0,
124
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_QUADRO = 1,
125
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_GEFORCE = 2,
126
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_TEGRA = 3,
127
+ } CUpti_DeviceAttributeDeviceClass;
128
+
129
+ /**
130
+ * \brief Device attributes.
131
+ *
132
+ * CUPTI device attributes. These attributes can be read using \ref
133
+ * cuptiDeviceGetAttribute.
134
+ */
135
+ typedef enum {
136
+ /**
137
+ * Number of event IDs for a device. Value is a uint32_t.
138
+ */
139
+ CUPTI_DEVICE_ATTR_MAX_EVENT_ID = 1,
140
+ /**
141
+ * Number of event domain IDs for a device. Value is a uint32_t.
142
+ */
143
+ CUPTI_DEVICE_ATTR_MAX_EVENT_DOMAIN_ID = 2,
144
+ /**
145
+ * Get global memory bandwidth in Kbytes/sec. Value is a uint64_t.
146
+ */
147
+ CUPTI_DEVICE_ATTR_GLOBAL_MEMORY_BANDWIDTH = 3,
148
+ /**
149
+ * Get theoretical maximum number of instructions per cycle. Value
150
+ * is a uint32_t.
151
+ */
152
+ CUPTI_DEVICE_ATTR_INSTRUCTION_PER_CYCLE = 4,
153
+ /**
154
+ * Get theoretical maximum number of single precision instructions
155
+ * that can be executed per second. Value is a uint64_t.
156
+ */
157
+ CUPTI_DEVICE_ATTR_INSTRUCTION_THROUGHPUT_SINGLE_PRECISION = 5,
158
+ /**
159
+ * Get number of frame buffers for device. Value is a uint64_t.
160
+ */
161
+ CUPTI_DEVICE_ATTR_MAX_FRAME_BUFFERS = 6,
162
+ /**
163
+ * Get PCIE link rate in Mega bits/sec for device. Return 0 if bus-type
164
+ * is non-PCIE. Value is a uint64_t.
165
+ */
166
+ CUPTI_DEVICE_ATTR_PCIE_LINK_RATE = 7,
167
+ /**
168
+ * Get PCIE link width for device. Return 0 if bus-type
169
+ * is non-PCIE. Value is a uint64_t.
170
+ */
171
+ CUPTI_DEVICE_ATTR_PCIE_LINK_WIDTH = 8,
172
+ /**
173
+ * Get PCIE generation for device. Return 0 if bus-type
174
+ * is non-PCIE. Value is a uint64_t.
175
+ */
176
+ CUPTI_DEVICE_ATTR_PCIE_GEN = 9,
177
+ /**
178
+ * Get the class for the device. Value is a
179
+ * CUpti_DeviceAttributeDeviceClass.
180
+ */
181
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS = 10,
182
+ /**
183
+ * Get the peak single precision flop per cycle. Value is a uint64_t.
184
+ */
185
+ CUPTI_DEVICE_ATTR_FLOP_SP_PER_CYCLE = 11,
186
+ /**
187
+ * Get the peak double precision flop per cycle. Value is a uint64_t.
188
+ */
189
+ CUPTI_DEVICE_ATTR_FLOP_DP_PER_CYCLE = 12,
190
+ /**
191
+ * Get number of L2 units. Value is a uint64_t.
192
+ */
193
+ CUPTI_DEVICE_ATTR_MAX_L2_UNITS = 13,
194
+ /**
195
+ * Get the maximum shared memory for the CU_FUNC_CACHE_PREFER_SHARED
196
+ * preference. Value is a uint64_t.
197
+ */
198
+ CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_SHARED = 14,
199
+ /**
200
+ * Get the maximum shared memory for the CU_FUNC_CACHE_PREFER_L1
201
+ * preference. Value is a uint64_t.
202
+ */
203
+ CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_L1 = 15,
204
+ /**
205
+ * Get the maximum shared memory for the CU_FUNC_CACHE_PREFER_EQUAL
206
+ * preference. Value is a uint64_t.
207
+ */
208
+ CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_EQUAL = 16,
209
+ /**
210
+ * Get the peak half precision flop per cycle. Value is a uint64_t.
211
+ */
212
+ CUPTI_DEVICE_ATTR_FLOP_HP_PER_CYCLE = 17,
213
+ /**
214
+ * Check if Nvlink is connected to device. Returns 1, if at least one
215
+ * Nvlink is connected to the device, returns 0 otherwise.
216
+ * Value is a uint32_t.
217
+ */
218
+ CUPTI_DEVICE_ATTR_NVLINK_PRESENT = 18,
219
+ /**
220
+ * Check if Nvlink is present between GPU and CPU. Returns Bandwidth,
221
+ * in Bytes/sec, if Nvlink is present, returns 0 otherwise.
222
+ * Value is a uint64_t.
223
+ */
224
+ CUPTI_DEVICE_ATTR_GPU_CPU_NVLINK_BW = 19,
225
+ /**
226
+ * Check if NVSwitch is present in the underlying topology.
227
+ * Returns 1, if present, returns 0 otherwise.
228
+ * Value is a uint32_t.
229
+ */
230
+ CUPTI_DEVICE_ATTR_NVSWITCH_PRESENT = 20,
231
+ CUPTI_DEVICE_ATTR_FORCE_INT = 0x7fffffff,
232
+ } CUpti_DeviceAttribute;
233
+
234
+ /**
235
+ * \brief Event domain attributes.
236
+ *
237
+ * Event domain attributes. Except where noted, all the attributes can
238
+ * be read using either \ref cuptiDeviceGetEventDomainAttribute or
239
+ * \ref cuptiEventDomainGetAttribute.
240
+ */
241
+ typedef enum {
242
+ /**
243
+ * Event domain name. Value is a null terminated const c-string.
244
+ */
245
+ CUPTI_EVENT_DOMAIN_ATTR_NAME = 0,
246
+ /**
247
+ * Number of instances of the domain for which event counts will be
248
+ * collected. The domain may have additional instances that cannot
249
+ * be profiled (see CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT).
250
+ * Can be read only with \ref
251
+ * cuptiDeviceGetEventDomainAttribute. Value is a uint32_t.
252
+ */
253
+ CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT = 1,
254
+ /**
255
+ * Total number of instances of the domain, including instances that
256
+ * cannot be profiled. Use CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT
257
+ * to get the number of instances that can be profiled. Can be read
258
+ * only with \ref cuptiDeviceGetEventDomainAttribute. Value is a
259
+ * uint32_t.
260
+ */
261
+ CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT = 3,
262
+ /**
263
+ * Collection method used for events contained in the event domain.
264
+ * Value is a \ref CUpti_EventCollectionMethod.
265
+ */
266
+ CUPTI_EVENT_DOMAIN_ATTR_COLLECTION_METHOD = 4,
267
+
268
+ CUPTI_EVENT_DOMAIN_ATTR_FORCE_INT = 0x7fffffff,
269
+ } CUpti_EventDomainAttribute;
270
+
271
+ /**
272
+ * \brief The collection method used for an event.
273
+ *
274
+ * The collection method indicates how an event is collected.
275
+ */
276
+ typedef enum {
277
+ /**
278
+ * Event is collected using a hardware global performance monitor.
279
+ */
280
+ CUPTI_EVENT_COLLECTION_METHOD_PM = 0,
281
+ /**
282
+ * Event is collected using a hardware SM performance monitor.
283
+ */
284
+ CUPTI_EVENT_COLLECTION_METHOD_SM = 1,
285
+ /**
286
+ * Event is collected using software instrumentation.
287
+ */
288
+ CUPTI_EVENT_COLLECTION_METHOD_INSTRUMENTED = 2,
289
+ /**
290
+ * Event is collected using NvLink throughput counter method.
291
+ */
292
+ CUPTI_EVENT_COLLECTION_METHOD_NVLINK_TC = 3,
293
+ CUPTI_EVENT_COLLECTION_METHOD_FORCE_INT = 0x7fffffff
294
+ } CUpti_EventCollectionMethod;
295
+
296
+ /**
297
+ * \brief Event group attributes.
298
+ *
299
+ * Event group attributes. These attributes can be read using \ref
300
+ * cuptiEventGroupGetAttribute. Attributes marked [rw] can also be
301
+ * written using \ref cuptiEventGroupSetAttribute.
302
+ */
303
+ typedef enum {
304
+ /**
305
+ * The domain to which the event group is bound. This attribute is
306
+ * set when the first event is added to the group. Value is a
307
+ * CUpti_EventDomainID.
308
+ */
309
+ CUPTI_EVENT_GROUP_ATTR_EVENT_DOMAIN_ID = 0,
310
+ /**
311
+ * [rw] Profile all the instances of the domain for this
312
+ * eventgroup. This feature can be used to get load balancing
313
+ * across all instances of a domain. Value is an integer.
314
+ */
315
+ CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES = 1,
316
+ /**
317
+ * [rw] Reserved for user data.
318
+ */
319
+ CUPTI_EVENT_GROUP_ATTR_USER_DATA = 2,
320
+ /**
321
+ * Number of events in the group. Value is a uint32_t.
322
+ */
323
+ CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS = 3,
324
+ /**
325
+ * Enumerates events in the group. Value is a pointer to buffer of
326
+ * size sizeof(CUpti_EventID) * num_of_events in the eventgroup.
327
+ * num_of_events can be queried using
328
+ * CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS.
329
+ */
330
+ CUPTI_EVENT_GROUP_ATTR_EVENTS = 4,
331
+ /**
332
+ * Number of instances of the domain bound to this event group that
333
+ * will be counted. Value is a uint32_t.
334
+ */
335
+ CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT = 5,
336
+ /**
337
+ * Event group scope can be set to CUPTI_EVENT_PROFILING_SCOPE_DEVICE or
338
+ * CUPTI_EVENT_PROFILING_SCOPE_CONTEXT for an eventGroup, before
339
+ * adding any event.
340
+ * Sets the scope of eventgroup as CUPTI_EVENT_PROFILING_SCOPE_DEVICE or
341
+ * CUPTI_EVENT_PROFILING_SCOPE_CONTEXT when the scope of the events
342
+ * that will be added is CUPTI_EVENT_PROFILING_SCOPE_BOTH.
343
+ * If profiling scope of event is either
344
+ * CUPTI_EVENT_PROFILING_SCOPE_DEVICE or CUPTI_EVENT_PROFILING_SCOPE_CONTEXT
345
+ * then setting this attribute will not affect the default scope.
346
+ * It is not allowed to add events of different scope to same eventgroup.
347
+ * Value is a uint32_t.
348
+ */
349
+ CUPTI_EVENT_GROUP_ATTR_PROFILING_SCOPE = 6,
350
+ CUPTI_EVENT_GROUP_ATTR_FORCE_INT = 0x7fffffff,
351
+ } CUpti_EventGroupAttribute;
352
+
353
+ /**
354
+ * \brief Profiling scope for event.
355
+ *
356
+ * Profiling scope of event indicates if the event can be collected at context
357
+ * scope or device scope or both i.e. it can be collected at any of context or
358
+ * device scope.
359
+ */
360
+ typedef enum {
361
+ /**
362
+ * Event is collected at context scope.
363
+ */
364
+ CUPTI_EVENT_PROFILING_SCOPE_CONTEXT = 0,
365
+ /**
366
+ * Event is collected at device scope.
367
+ */
368
+ CUPTI_EVENT_PROFILING_SCOPE_DEVICE = 1,
369
+ /**
370
+ * Event can be collected at device or context scope.
371
+ * The scope can be set using \ref cuptiEventGroupSetAttribute API.
372
+ */
373
+ CUPTI_EVENT_PROFILING_SCOPE_BOTH = 2,
374
+ CUPTI_EVENT_PROFILING_SCOPE_FORCE_INT = 0x7fffffff
375
+ } CUpti_EventProfilingScope;
376
+
377
+ /**
378
+ * \brief Event attributes.
379
+ *
380
+ * Event attributes. These attributes can be read using \ref
381
+ * cuptiEventGetAttribute.
382
+ */
383
+ typedef enum {
384
+ /**
385
+ * Event name. Value is a null terminated const c-string.
386
+ */
387
+ CUPTI_EVENT_ATTR_NAME = 0,
388
+ /**
389
+ * Short description of event. Value is a null terminated const
390
+ * c-string.
391
+ */
392
+ CUPTI_EVENT_ATTR_SHORT_DESCRIPTION = 1,
393
+ /**
394
+ * Long description of event. Value is a null terminated const
395
+ * c-string.
396
+ */
397
+ CUPTI_EVENT_ATTR_LONG_DESCRIPTION = 2,
398
+ /**
399
+ * Category of event. Value is CUpti_EventCategory.
400
+ */
401
+ CUPTI_EVENT_ATTR_CATEGORY = 3,
402
+ /**
403
+ * Profiling scope of the events. It can be either device or context or both.
404
+ * Value is a \ref CUpti_EventProfilingScope.
405
+ */
406
+ CUPTI_EVENT_ATTR_PROFILING_SCOPE = 5,
407
+
408
+ CUPTI_EVENT_ATTR_FORCE_INT = 0x7fffffff,
409
+ } CUpti_EventAttribute;
410
+
411
+ /**
412
+ * \brief Event collection modes.
413
+ *
414
+ * The event collection mode determines the period over which the
415
+ * events within the enabled event groups will be collected.
416
+ */
417
+ typedef enum {
418
+ /**
419
+ * Events are collected for the entire duration between the
420
+ * cuptiEventGroupEnable and cuptiEventGroupDisable calls.
421
+ * Event values are reset when the events are read.
422
+ * For CUDA toolkit v6.0 and older this was the default mode.
423
+ */
424
+ CUPTI_EVENT_COLLECTION_MODE_CONTINUOUS = 0,
425
+ /**
426
+ * Events are collected only for the durations of kernel executions
427
+ * that occur between the cuptiEventGroupEnable and
428
+ * cuptiEventGroupDisable calls. Event collection begins when a
429
+ * kernel execution begins, and stops when kernel execution
430
+ * completes. Event values are reset to zero when each kernel
431
+ * execution begins. If multiple kernel executions occur between the
432
+ * cuptiEventGroupEnable and cuptiEventGroupDisable calls then the
433
+ * event values must be read after each kernel launch if those
434
+ * events need to be associated with the specific kernel launch.
435
+ * Note that collection in this mode may significantly change the
436
+ * overall performance characteristics of the application because
437
+ * kernel executions that occur between the cuptiEventGroupEnable and
438
+ * cuptiEventGroupDisable calls are serialized on the GPU.
439
+ * This is the default mode from CUDA toolkit v6.5
440
+ */
441
+ CUPTI_EVENT_COLLECTION_MODE_KERNEL = 1,
442
+ CUPTI_EVENT_COLLECTION_MODE_FORCE_INT = 0x7fffffff
443
+ } CUpti_EventCollectionMode;
444
+
445
+ /**
446
+ * \brief An event category.
447
+ *
448
+ * Each event is assigned to a category that represents the general
449
+ * type of the event. A event's category is accessed using \ref
450
+ * cuptiEventGetAttribute and the CUPTI_EVENT_ATTR_CATEGORY attribute.
451
+ */
452
+ typedef enum {
453
+ /**
454
+ * An instruction related event.
455
+ */
456
+ CUPTI_EVENT_CATEGORY_INSTRUCTION = 0,
457
+ /**
458
+ * A memory related event.
459
+ */
460
+ CUPTI_EVENT_CATEGORY_MEMORY = 1,
461
+ /**
462
+ * A cache related event.
463
+ */
464
+ CUPTI_EVENT_CATEGORY_CACHE = 2,
465
+ /**
466
+ * A profile-trigger event.
467
+ */
468
+ CUPTI_EVENT_CATEGORY_PROFILE_TRIGGER = 3,
469
+ /**
470
+ * A system event.
471
+ */
472
+ CUPTI_EVENT_CATEGORY_SYSTEM = 4,
473
+ CUPTI_EVENT_CATEGORY_FORCE_INT = 0x7fffffff
474
+ } CUpti_EventCategory;
475
+
476
+ /**
477
+ * \brief The overflow value for a CUPTI event.
478
+ *
479
+ * The CUPTI event value that indicates an overflow.
480
+ */
481
+ #define CUPTI_EVENT_OVERFLOW ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
482
+
483
+ /**
484
+ * \brief The value that indicates the event value is invalid
485
+ */
486
+ #define CUPTI_EVENT_INVALID ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
487
+
488
+ /**
489
+ * \brief Flags for cuptiEventGroupReadEvent an
490
+ * cuptiEventGroupReadAllEvents.
491
+ *
492
+ * Flags for \ref cuptiEventGroupReadEvent an \ref
493
+ * cuptiEventGroupReadAllEvents.
494
+ */
495
+ typedef enum {
496
+ /**
497
+ * No flags.
498
+ */
499
+ CUPTI_EVENT_READ_FLAG_NONE = 0,
500
+ CUPTI_EVENT_READ_FLAG_FORCE_INT = 0x7fffffff,
501
+ } CUpti_ReadEventFlags;
502
+
503
+
504
+ /**
505
+ * \brief A set of event groups.
506
+ *
507
+ * A set of event groups. When returned by \ref
508
+ * cuptiEventGroupSetsCreate and \ref cuptiMetricCreateEventGroupSets
509
+ * a set indicates that event groups that can be enabled at the same
510
+ * time (i.e. all the events in the set can be collected
511
+ * simultaneously).
512
+ */
513
+ typedef struct {
514
+ /**
515
+ * The number of event groups in the set.
516
+ */
517
+ uint32_t numEventGroups;
518
+ /**
519
+ * An array of \p numEventGroups event groups.
520
+ */
521
+ CUpti_EventGroup *eventGroups;
522
+ } CUpti_EventGroupSet;
523
+
524
+ /**
525
+ * \brief A set of event group sets.
526
+ *
527
+ * A set of event group sets. When returned by \ref
528
+ * cuptiEventGroupSetsCreate and \ref cuptiMetricCreateEventGroupSets
529
+ * a CUpti_EventGroupSets indicates the number of passes required to
530
+ * collect all the events, and the event groups that should be
531
+ * collected during each pass.
532
+ */
533
+ typedef struct {
534
+ /**
535
+ * Number of event group sets.
536
+ */
537
+ uint32_t numSets;
538
+ /**
539
+ * An array of \p numSets event group sets.
540
+ */
541
+ CUpti_EventGroupSet *sets;
542
+ } CUpti_EventGroupSets;
543
+
544
+ /**
545
+ * \brief Set the event collection mode.
546
+ *
547
+ * Set the event collection mode for a \p context. The \p mode
548
+ * controls the event collection behavior of all events in event
549
+ * groups created in the \p context. This API is invalid in kernel
550
+ * replay mode.
551
+ * \note \b Thread-safety: this function is thread safe.
552
+ *
553
+ * \param context The context
554
+ * \param mode The event collection mode
555
+ *
556
+ * \retval CUPTI_SUCCESS
557
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
558
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
559
+ * \retval CUPTI_ERROR_INVALID_OPERATION if called when replay mode is enabled
560
+ * \retval CUPTI_ERROR_NOT_SUPPORTED if mode is not supported on the device
561
+ */
562
+
563
+ CUptiResult CUPTIAPI cuptiSetEventCollectionMode(CUcontext context,
564
+ CUpti_EventCollectionMode mode);
565
+
566
+ /**
567
+ * \brief Read a device attribute.
568
+ *
569
+ * Read a device attribute and return it in \p *value.
570
+ * \note \b Thread-safety: this function is thread safe.
571
+ *
572
+ * \param device The CUDA device
573
+ * \param attrib The attribute to read
574
+ * \param valueSize Size of buffer pointed by the value, and
575
+ * returns the number of bytes written to \p value
576
+ * \param value Returns the value of the attribute
577
+ *
578
+ * \retval CUPTI_SUCCESS
579
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
580
+ * \retval CUPTI_ERROR_INVALID_DEVICE
581
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
582
+ * is NULL, or if \p attrib is not a device attribute
583
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
584
+ * attribute values, indicates that the \p value buffer is too small
585
+ * to hold the attribute value.
586
+ */
587
+ CUptiResult CUPTIAPI cuptiDeviceGetAttribute(CUdevice device,
588
+ CUpti_DeviceAttribute attrib,
589
+ size_t *valueSize,
590
+ void *value);
591
+
592
+ /**
593
+ * \brief Read a device timestamp.
594
+ *
595
+ * Returns the device timestamp in \p *timestamp. The timestamp is
596
+ * reported in nanoseconds and indicates the time since the device was
597
+ * last reset.
598
+ * \note \b Thread-safety: this function is thread safe.
599
+ *
600
+ * \param context A context on the device from which to get the timestamp
601
+ * \param timestamp Returns the device timestamp
602
+ *
603
+ * \retval CUPTI_SUCCESS
604
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
605
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
606
+ * \retval CUPTI_ERROR_INVALID_PARAMETER is \p timestamp is NULL
607
+
608
+ * **DEPRECATED** This API is deprecated as of CUDA 11.3
609
+ */
610
+ CUptiResult CUPTIAPI cuptiDeviceGetTimestamp(CUcontext context,
611
+ uint64_t *timestamp);
612
+
613
+ /**
614
+ * \brief Get the number of domains for a device.
615
+ *
616
+ * Returns the number of domains in \p numDomains for a device.
617
+ * \note \b Thread-safety: this function is thread safe.
618
+ *
619
+ * \param device The CUDA device
620
+ * \param numDomains Returns the number of domains
621
+ *
622
+ * \retval CUPTI_SUCCESS
623
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
624
+ * \retval CUPTI_ERROR_INVALID_DEVICE
625
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numDomains is NULL
626
+ */
627
+ CUptiResult CUPTIAPI cuptiDeviceGetNumEventDomains(CUdevice device,
628
+ uint32_t *numDomains);
629
+
630
+ /**
631
+ * \brief Get the event domains for a device.
632
+ *
633
+ * Returns the event domains IDs in \p domainArray for a device. The
634
+ * size of the \p domainArray buffer is given by \p
635
+ * *arraySizeBytes. The size of the \p domainArray buffer must be at
636
+ * least \p numdomains * sizeof(CUpti_EventDomainID) or else all
637
+ * domains will not be returned. The value returned in \p
638
+ * *arraySizeBytes contains the number of bytes returned in \p
639
+ * domainArray.
640
+ * \note \b Thread-safety: this function is thread safe.
641
+ *
642
+ * \param device The CUDA device
643
+ * \param arraySizeBytes The size of \p domainArray in bytes, and
644
+ * returns the number of bytes written to \p domainArray
645
+ * \param domainArray Returns the IDs of the event domains for the device
646
+ *
647
+ * \retval CUPTI_SUCCESS
648
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
649
+ * \retval CUPTI_ERROR_INVALID_DEVICE
650
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or
651
+ * \p domainArray are NULL
652
+ */
653
+ CUptiResult CUPTIAPI cuptiDeviceEnumEventDomains(CUdevice device,
654
+ size_t *arraySizeBytes,
655
+ CUpti_EventDomainID *domainArray);
656
+
657
+ /**
658
+ * \brief Read an event domain attribute.
659
+ *
660
+ * Returns an event domain attribute in \p *value. The size of the \p
661
+ * value buffer is given by \p *valueSize. The value returned in \p
662
+ * *valueSize contains the number of bytes returned in \p value.
663
+ *
664
+ * If the attribute value is a c-string that is longer than \p
665
+ * *valueSize, then only the first \p *valueSize characters will be
666
+ * returned and there will be no terminating null byte.
667
+ * \note \b Thread-safety: this function is thread safe.
668
+ *
669
+ * \param device The CUDA device
670
+ * \param eventDomain ID of the event domain
671
+ * \param attrib The event domain attribute to read
672
+ * \param valueSize The size of the \p value buffer in bytes, and
673
+ * returns the number of bytes written to \p value
674
+ * \param value Returns the attribute's value
675
+ *
676
+ * \retval CUPTI_SUCCESS
677
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
678
+ * \retval CUPTI_ERROR_INVALID_DEVICE
679
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
680
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
681
+ * is NULL, or if \p attrib is not an event domain attribute
682
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
683
+ * attribute values, indicates that the \p value buffer is too small
684
+ * to hold the attribute value.
685
+ */
686
+ CUptiResult CUPTIAPI cuptiDeviceGetEventDomainAttribute(CUdevice device,
687
+ CUpti_EventDomainID eventDomain,
688
+ CUpti_EventDomainAttribute attrib,
689
+ size_t *valueSize,
690
+ void *value);
691
+
692
+ /**
693
+ * \brief Get the number of event domains available on any device.
694
+ *
695
+ * Returns the total number of event domains available on any
696
+ * CUDA-capable device.
697
+ * \note \b Thread-safety: this function is thread safe.
698
+ *
699
+ * \param numDomains Returns the number of domains
700
+ *
701
+ * \retval CUPTI_SUCCESS
702
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numDomains is NULL
703
+ */
704
+ CUptiResult CUPTIAPI cuptiGetNumEventDomains(uint32_t *numDomains);
705
+
706
+ /**
707
+ * \brief Get the event domains available on any device.
708
+ *
709
+ * Returns all the event domains available on any CUDA-capable device.
710
+ * Event domain IDs are returned in \p domainArray. The size of the \p
711
+ * domainArray buffer is given by \p *arraySizeBytes. The size of the
712
+ * \p domainArray buffer must be at least \p numDomains *
713
+ * sizeof(CUpti_EventDomainID) or all domains will not be
714
+ * returned. The value returned in \p *arraySizeBytes contains the
715
+ * number of bytes returned in \p domainArray.
716
+ * \note \b Thread-safety: this function is thread safe.
717
+ *
718
+ * \param arraySizeBytes The size of \p domainArray in bytes, and
719
+ * returns the number of bytes written to \p domainArray
720
+ * \param domainArray Returns all the event domains
721
+ *
722
+ * \retval CUPTI_SUCCESS
723
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or
724
+ * \p domainArray are NULL
725
+ */
726
+ CUptiResult CUPTIAPI cuptiEnumEventDomains(size_t *arraySizeBytes,
727
+ CUpti_EventDomainID *domainArray);
728
+
729
+ /**
730
+ * \brief Read an event domain attribute.
731
+ *
732
+ * Returns an event domain attribute in \p *value. The size of the \p
733
+ * value buffer is given by \p *valueSize. The value returned in \p
734
+ * *valueSize contains the number of bytes returned in \p value.
735
+ *
736
+ * If the attribute value is a c-string that is longer than \p
737
+ * *valueSize, then only the first \p *valueSize characters will be
738
+ * returned and there will be no terminating null byte.
739
+ * \note \b Thread-safety: this function is thread safe.
740
+ *
741
+ * \param eventDomain ID of the event domain
742
+ * \param attrib The event domain attribute to read
743
+ * \param valueSize The size of the \p value buffer in bytes, and
744
+ * returns the number of bytes written to \p value
745
+ * \param value Returns the attribute's value
746
+ *
747
+ * \retval CUPTI_SUCCESS
748
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
749
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
750
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
751
+ * is NULL, or if \p attrib is not an event domain attribute
752
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
753
+ * attribute values, indicates that the \p value buffer is too small
754
+ * to hold the attribute value.
755
+ */
756
+ CUptiResult CUPTIAPI cuptiEventDomainGetAttribute(CUpti_EventDomainID eventDomain,
757
+ CUpti_EventDomainAttribute attrib,
758
+ size_t *valueSize,
759
+ void *value);
760
+
761
+ /**
762
+ * \brief Get number of events in a domain.
763
+ *
764
+ * Returns the number of events in \p numEvents for a domain.
765
+ * \note \b Thread-safety: this function is thread safe.
766
+ *
767
+ * \param eventDomain ID of the event domain
768
+ * \param numEvents Returns the number of events in the domain
769
+ *
770
+ * \retval CUPTI_SUCCESS
771
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
772
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
773
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numEvents is NULL
774
+ */
775
+ CUptiResult CUPTIAPI cuptiEventDomainGetNumEvents(CUpti_EventDomainID eventDomain,
776
+ uint32_t *numEvents);
777
+
778
+ /**
779
+ * \brief Get the events in a domain.
780
+ *
781
+ * Returns the event IDs in \p eventArray for a domain. The size of
782
+ * the \p eventArray buffer is given by \p *arraySizeBytes. The size
783
+ * of the \p eventArray buffer must be at least \p numdomainevents *
784
+ * sizeof(CUpti_EventID) or else all events will not be returned. The
785
+ * value returned in \p *arraySizeBytes contains the number of bytes
786
+ * returned in \p eventArray.
787
+ * \note \b Thread-safety: this function is thread safe.
788
+ *
789
+ * \param eventDomain ID of the event domain
790
+ * \param arraySizeBytes The size of \p eventArray in bytes, and
791
+ * returns the number of bytes written to \p eventArray
792
+ * \param eventArray Returns the IDs of the events in the domain
793
+ *
794
+ * \retval CUPTI_SUCCESS
795
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
796
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
797
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or \p
798
+ * eventArray are NULL
799
+ */
800
+ CUptiResult CUPTIAPI cuptiEventDomainEnumEvents(CUpti_EventDomainID eventDomain,
801
+ size_t *arraySizeBytes,
802
+ CUpti_EventID *eventArray);
803
+
804
+ /**
805
+ * \brief Get an event attribute.
806
+ *
807
+ * Returns an event attribute in \p *value. The size of the \p
808
+ * value buffer is given by \p *valueSize. The value returned in \p
809
+ * *valueSize contains the number of bytes returned in \p value.
810
+ *
811
+ * If the attribute value is a c-string that is longer than \p
812
+ * *valueSize, then only the first \p *valueSize characters will be
813
+ * returned and there will be no terminating null byte.
814
+ * \note \b Thread-safety: this function is thread safe.
815
+ *
816
+ * \param event ID of the event
817
+ * \param attrib The event attribute to read
818
+ * \param valueSize The size of the \p value buffer in bytes, and
819
+ * returns the number of bytes written to \p value
820
+ * \param value Returns the attribute's value
821
+ *
822
+ * \retval CUPTI_SUCCESS
823
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
824
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
825
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
826
+ * is NULL, or if \p attrib is not an event attribute
827
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
828
+ * attribute values, indicates that the \p value buffer is too small
829
+ * to hold the attribute value.
830
+ */
831
+ CUptiResult CUPTIAPI cuptiEventGetAttribute(CUpti_EventID event,
832
+ CUpti_EventAttribute attrib,
833
+ size_t *valueSize,
834
+ void *value);
835
+
836
+ /**
837
+ * \brief Find an event by name.
838
+ *
839
+ * Find an event by name and return the event ID in \p *event.
840
+ * \note \b Thread-safety: this function is thread safe.
841
+ *
842
+ * \param device The CUDA device
843
+ * \param eventName The name of the event to find
844
+ * \param event Returns the ID of the found event or undefined if
845
+ * unable to find the event
846
+ *
847
+ * \retval CUPTI_SUCCESS
848
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
849
+ * \retval CUPTI_ERROR_INVALID_DEVICE
850
+ * \retval CUPTI_ERROR_INVALID_EVENT_NAME if unable to find an event
851
+ * with name \p eventName. In this case \p *event is undefined
852
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventName or \p event are NULL
853
+ */
854
+ CUptiResult CUPTIAPI cuptiEventGetIdFromName(CUdevice device,
855
+ const char *eventName,
856
+ CUpti_EventID *event);
857
+
858
+ /**
859
+ * \brief Create a new event group for a context.
860
+ *
861
+ * Creates a new event group for \p context and returns the new group
862
+ * in \p *eventGroup.
863
+ * \note \p flags are reserved for future use and should be set to zero.
864
+ * \note \b Thread-safety: this function is thread safe.
865
+ *
866
+ * \param context The context for the event group
867
+ * \param eventGroup Returns the new event group
868
+ * \param flags Reserved - must be zero
869
+ *
870
+ * \retval CUPTI_SUCCESS
871
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
872
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
873
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY
874
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
875
+ */
876
+ CUptiResult CUPTIAPI cuptiEventGroupCreate(CUcontext context,
877
+ CUpti_EventGroup *eventGroup,
878
+ uint32_t flags);
879
+
880
+ /**
881
+ * \brief Destroy an event group.
882
+ *
883
+ * Destroy an \p eventGroup and free its resources. An event group
884
+ * cannot be destroyed if it is enabled.
885
+ * \note \b Thread-safety: this function is thread safe.
886
+ *
887
+ * \param eventGroup The event group to destroy
888
+ *
889
+ * \retval CUPTI_SUCCESS
890
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
891
+ * \retval CUPTI_ERROR_INVALID_OPERATION if the event group is enabled
892
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if eventGroup is NULL
893
+ */
894
+ CUptiResult CUPTIAPI cuptiEventGroupDestroy(CUpti_EventGroup eventGroup);
895
+
896
+ /**
897
+ * \brief Read an event group attribute.
898
+ *
899
+ * Read an event group attribute and return it in \p *value.
900
+ * \note \b Thread-safety: this function is thread safe but client
901
+ * must guard against simultaneous destruction or modification of \p
902
+ * eventGroup (for example, client must guard against simultaneous
903
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
904
+ * etc.), and must guard against simultaneous destruction of the
905
+ * context in which \p eventGroup was created (for example, client
906
+ * must guard against simultaneous calls to cudaDeviceReset,
907
+ * cuCtxDestroy, etc.).
908
+ *
909
+ * \param eventGroup The event group
910
+ * \param attrib The attribute to read
911
+ * \param valueSize Size of buffer pointed by the value, and
912
+ * returns the number of bytes written to \p value
913
+ * \param value Returns the value of the attribute
914
+ *
915
+ * \retval CUPTI_SUCCESS
916
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
917
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
918
+ * is NULL, or if \p attrib is not an eventgroup attribute
919
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
920
+ * attribute values, indicates that the \p value buffer is too small
921
+ * to hold the attribute value.
922
+ */
923
+ CUptiResult CUPTIAPI cuptiEventGroupGetAttribute(CUpti_EventGroup eventGroup,
924
+ CUpti_EventGroupAttribute attrib,
925
+ size_t *valueSize,
926
+ void *value);
927
+
928
+ /**
929
+ * \brief Write an event group attribute.
930
+ *
931
+ * Write an event group attribute.
932
+ * \note \b Thread-safety: this function is thread safe.
933
+ *
934
+ * \param eventGroup The event group
935
+ * \param attrib The attribute to write
936
+ * \param valueSize The size, in bytes, of the value
937
+ * \param value The attribute value to write
938
+ *
939
+ * \retval CUPTI_SUCCESS
940
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
941
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
942
+ * is NULL, or if \p attrib is not an event group attribute, or if
943
+ * \p attrib is not a writable attribute
944
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT Indicates that
945
+ * the \p value buffer is too small to hold the attribute value.
946
+ */
947
+ CUptiResult CUPTIAPI cuptiEventGroupSetAttribute(CUpti_EventGroup eventGroup,
948
+ CUpti_EventGroupAttribute attrib,
949
+ size_t valueSize,
950
+ void *value);
951
+
952
+ /**
953
+ * \brief Add an event to an event group.
954
+ *
955
+ * Add an event to an event group. The event add can fail for a number of reasons:
956
+ * \li The event group is enabled
957
+ * \li The event does not belong to the same event domain as the
958
+ * events that are already in the event group
959
+ * \li Device limitations on the events that can belong to the same group
960
+ * \li The event group is full
961
+ *
962
+ * \note \b Thread-safety: this function is thread safe.
963
+ *
964
+ * \param eventGroup The event group
965
+ * \param event The event to add to the group
966
+ *
967
+ * \retval CUPTI_SUCCESS
968
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
969
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
970
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY
971
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is enabled
972
+ * \retval CUPTI_ERROR_NOT_COMPATIBLE if \p event belongs to a
973
+ * different event domain than the events already in \p eventGroup, or
974
+ * if a device limitation prevents \p event from being collected at
975
+ * the same time as the events already in \p eventGroup
976
+ * \retval CUPTI_ERROR_MAX_LIMIT_REACHED if \p eventGroup is full
977
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
978
+ */
979
+ CUptiResult CUPTIAPI cuptiEventGroupAddEvent(CUpti_EventGroup eventGroup,
980
+ CUpti_EventID event);
981
+
982
+ /**
983
+ * \brief Remove an event from an event group.
984
+ *
985
+ * Remove \p event from the an event group. The event cannot be
986
+ * removed if the event group is enabled.
987
+ * \note \b Thread-safety: this function is thread safe.
988
+ *
989
+ * \param eventGroup The event group
990
+ * \param event The event to remove from the group
991
+ *
992
+ * \retval CUPTI_SUCCESS
993
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
994
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
995
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is enabled
996
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
997
+ */
998
+ CUptiResult CUPTIAPI cuptiEventGroupRemoveEvent(CUpti_EventGroup eventGroup,
999
+ CUpti_EventID event);
1000
+
1001
+ /**
1002
+ * \brief Remove all events from an event group.
1003
+ *
1004
+ * Remove all events from an event group. Events cannot be removed if
1005
+ * the event group is enabled.
1006
+ * \note \b Thread-safety: this function is thread safe.
1007
+ *
1008
+ * \param eventGroup The event group
1009
+ *
1010
+ * \retval CUPTI_SUCCESS
1011
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1012
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is enabled
1013
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
1014
+ */
1015
+ CUptiResult CUPTIAPI cuptiEventGroupRemoveAllEvents(CUpti_EventGroup eventGroup);
1016
+
1017
+ /**
1018
+ * \brief Zero all the event counts in an event group.
1019
+ *
1020
+ * Zero all the event counts in an event group.
1021
+ * \note \b Thread-safety: this function is thread safe but client
1022
+ * must guard against simultaneous destruction or modification of \p
1023
+ * eventGroup (for example, client must guard against simultaneous
1024
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
1025
+ * etc.), and must guard against simultaneous destruction of the
1026
+ * context in which \p eventGroup was created (for example, client
1027
+ * must guard against simultaneous calls to cudaDeviceReset,
1028
+ * cuCtxDestroy, etc.).
1029
+ *
1030
+ * \param eventGroup The event group
1031
+ *
1032
+ * \retval CUPTI_SUCCESS
1033
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1034
+ * \retval CUPTI_ERROR_HARDWARE
1035
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
1036
+ */
1037
+ CUptiResult CUPTIAPI cuptiEventGroupResetAllEvents(CUpti_EventGroup eventGroup);
1038
+
1039
+ /**
1040
+ * \brief Enable an event group.
1041
+ *
1042
+ * Enable an event group. Enabling an event group zeros the value of
1043
+ * all the events in the group and then starts collection of those
1044
+ * events.
1045
+ * \note \b Thread-safety: this function is thread safe.
1046
+ *
1047
+ * \param eventGroup The event group
1048
+ *
1049
+ * \retval CUPTI_SUCCESS
1050
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1051
+ * \retval CUPTI_ERROR_HARDWARE
1052
+ * \retval CUPTI_ERROR_NOT_READY if \p eventGroup does not contain any events
1053
+ * \retval CUPTI_ERROR_NOT_COMPATIBLE if \p eventGroup cannot be
1054
+ * enabled due to other already enabled event groups
1055
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
1056
+ * \retval CUPTI_ERROR_HARDWARE_BUSY if another client is profiling
1057
+ * and hardware is busy
1058
+ */
1059
+ CUptiResult CUPTIAPI cuptiEventGroupEnable(CUpti_EventGroup eventGroup);
1060
+
1061
+ /**
1062
+ * \brief Disable an event group.
1063
+ *
1064
+ * Disable an event group. Disabling an event group stops collection
1065
+ * of events contained in the group.
1066
+ * \note \b Thread-safety: this function is thread safe.
1067
+ *
1068
+ * \param eventGroup The event group
1069
+ *
1070
+ * \retval CUPTI_SUCCESS
1071
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1072
+ * \retval CUPTI_ERROR_HARDWARE
1073
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
1074
+ */
1075
+ CUptiResult CUPTIAPI cuptiEventGroupDisable(CUpti_EventGroup eventGroup);
1076
+
1077
+ /**
1078
+ * \brief Read the value for an event in an event group.
1079
+ *
1080
+ * Read the value for an event in an event group. The event value is
1081
+ * returned in the \p eventValueBuffer buffer. \p
1082
+ * eventValueBufferSizeBytes indicates the size of the \p
1083
+ * eventValueBuffer buffer. The buffer must be at least sizeof(uint64)
1084
+ * if ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is not set
1085
+ * on the group containing the event. The buffer must be at least
1086
+ * (sizeof(uint64) * number of domain instances) if
1087
+ * ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is set on the
1088
+ * group.
1089
+ *
1090
+ * If any instance of an event counter overflows, the value returned
1091
+ * for that event instance will be ::CUPTI_EVENT_OVERFLOW.
1092
+ *
1093
+ * The only allowed value for \p flags is ::CUPTI_EVENT_READ_FLAG_NONE.
1094
+ *
1095
+ * Reading an event from a disabled event group is not allowed. After
1096
+ * being read, an event's value is reset to zero.
1097
+ * \note \b Thread-safety: this function is thread safe but client
1098
+ * must guard against simultaneous destruction or modification of \p
1099
+ * eventGroup (for example, client must guard against simultaneous
1100
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
1101
+ * etc.), and must guard against simultaneous destruction of the
1102
+ * context in which \p eventGroup was created (for example, client
1103
+ * must guard against simultaneous calls to cudaDeviceReset,
1104
+ * cuCtxDestroy, etc.). If \ref cuptiEventGroupResetAllEvents is
1105
+ * called simultaneously with this function, then returned event
1106
+ * values are undefined.
1107
+ *
1108
+ * \param eventGroup The event group
1109
+ * \param flags Flags controlling the reading mode
1110
+ * \param event The event to read
1111
+ * \param eventValueBufferSizeBytes The size of \p eventValueBuffer
1112
+ * in bytes, and returns the number of bytes written to \p
1113
+ * eventValueBuffer
1114
+ * \param eventValueBuffer Returns the event value(s)
1115
+ *
1116
+ * \retval CUPTI_SUCCESS
1117
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1118
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
1119
+ * \retval CUPTI_ERROR_HARDWARE
1120
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is disabled
1121
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup, \p
1122
+ * eventValueBufferSizeBytes or \p eventValueBuffer is NULL
1123
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if size of \p eventValueBuffer
1124
+ * is not sufficient
1125
+ */
1126
+ CUptiResult CUPTIAPI cuptiEventGroupReadEvent(CUpti_EventGroup eventGroup,
1127
+ CUpti_ReadEventFlags flags,
1128
+ CUpti_EventID event,
1129
+ size_t *eventValueBufferSizeBytes,
1130
+ uint64_t *eventValueBuffer);
1131
+
1132
+ /**
1133
+ * \brief Read the values for all the events in an event group.
1134
+ *
1135
+ * Read the values for all the events in an event group. The event
1136
+ * values are returned in the \p eventValueBuffer buffer. \p
1137
+ * eventValueBufferSizeBytes indicates the size of \p
1138
+ * eventValueBuffer. The buffer must be at least (sizeof(uint64) *
1139
+ * number of events in group) if
1140
+ * ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is not set on
1141
+ * the group containing the events. The buffer must be at least
1142
+ * (sizeof(uint64) * number of domain instances * number of events in
1143
+ * group) if ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is
1144
+ * set on the group.
1145
+ *
1146
+ * The data format returned in \p eventValueBuffer is:
1147
+ * - domain instance 0: event0 event1 ... eventN
1148
+ * - domain instance 1: event0 event1 ... eventN
1149
+ * - ...
1150
+ * - domain instance M: event0 event1 ... eventN
1151
+ *
1152
+ * The event order in \p eventValueBuffer is returned in \p
1153
+ * eventIdArray. The size of \p eventIdArray is specified in \p
1154
+ * eventIdArraySizeBytes. The size should be at least
1155
+ * (sizeof(CUpti_EventID) * number of events in group).
1156
+ *
1157
+ * If any instance of any event counter overflows, the value returned
1158
+ * for that event instance will be ::CUPTI_EVENT_OVERFLOW.
1159
+ *
1160
+ * The only allowed value for \p flags is ::CUPTI_EVENT_READ_FLAG_NONE.
1161
+ *
1162
+ * Reading events from a disabled event group is not allowed. After
1163
+ * being read, an event's value is reset to zero.
1164
+ * \note \b Thread-safety: this function is thread safe but client
1165
+ * must guard against simultaneous destruction or modification of \p
1166
+ * eventGroup (for example, client must guard against simultaneous
1167
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
1168
+ * etc.), and must guard against simultaneous destruction of the
1169
+ * context in which \p eventGroup was created (for example, client
1170
+ * must guard against simultaneous calls to cudaDeviceReset,
1171
+ * cuCtxDestroy, etc.). If \ref cuptiEventGroupResetAllEvents is
1172
+ * called simultaneously with this function, then returned event
1173
+ * values are undefined.
1174
+ *
1175
+ * \param eventGroup The event group
1176
+ * \param flags Flags controlling the reading mode
1177
+ * \param eventValueBufferSizeBytes The size of \p eventValueBuffer in
1178
+ * bytes, and returns the number of bytes written to \p
1179
+ * eventValueBuffer
1180
+ * \param eventValueBuffer Returns the event values
1181
+ * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes,
1182
+ * and returns the number of bytes written to \p eventIdArray
1183
+ * \param eventIdArray Returns the IDs of the events in the same order
1184
+ * as the values return in eventValueBuffer.
1185
+ * \param numEventIdsRead Returns the number of event IDs returned
1186
+ * in \p eventIdArray
1187
+ *
1188
+ * \retval CUPTI_SUCCESS
1189
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1190
+ * \retval CUPTI_ERROR_HARDWARE
1191
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is disabled
1192
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup, \p
1193
+ * eventValueBufferSizeBytes, \p eventValueBuffer, \p
1194
+ * eventIdArraySizeBytes, \p eventIdArray or \p numEventIdsRead is
1195
+ * NULL
1196
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if size of \p eventValueBuffer
1197
+ * or \p eventIdArray is not sufficient
1198
+ */
1199
+ CUptiResult CUPTIAPI cuptiEventGroupReadAllEvents(CUpti_EventGroup eventGroup,
1200
+ CUpti_ReadEventFlags flags,
1201
+ size_t *eventValueBufferSizeBytes,
1202
+ uint64_t *eventValueBuffer,
1203
+ size_t *eventIdArraySizeBytes,
1204
+ CUpti_EventID *eventIdArray,
1205
+ size_t *numEventIdsRead);
1206
+
1207
+ /**
1208
+ * \brief For a set of events, get the grouping that indicates the
1209
+ * number of passes and the event groups necessary to collect the
1210
+ * events.
1211
+ *
1212
+ * The number of events that can be collected simultaneously varies by
1213
+ * device and by the type of the events. When events can be collected
1214
+ * simultaneously, they may need to be grouped into multiple event
1215
+ * groups because they are from different event domains. This function
1216
+ * takes a set of events and determines how many passes are required
1217
+ * to collect all those events, and which events can be collected
1218
+ * simultaneously in each pass.
1219
+ *
1220
+ * The CUpti_EventGroupSets returned in \p eventGroupPasses indicates
1221
+ * how many passes are required to collect the events with the \p
1222
+ * numSets field. Within each event group set, the \p sets array
1223
+ * indicates the event groups that should be collected on each pass.
1224
+ * \note \b Thread-safety: this function is thread safe, but client
1225
+ * must guard against another thread simultaneously destroying \p
1226
+ * context.
1227
+ *
1228
+ * \param context The context for event collection
1229
+ * \param eventIdArraySizeBytes Size of \p eventIdArray in bytes
1230
+ * \param eventIdArray Array of event IDs that need to be grouped
1231
+ * \param eventGroupPasses Returns a CUpti_EventGroupSets object that
1232
+ * indicates the number of passes required to collect the events and
1233
+ * the events to collect on each pass
1234
+ *
1235
+ * \retval CUPTI_SUCCESS
1236
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1237
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
1238
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
1239
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventIdArray or
1240
+ * \p eventGroupPasses is NULL
1241
+ */
1242
+ CUptiResult CUPTIAPI cuptiEventGroupSetsCreate(CUcontext context,
1243
+ size_t eventIdArraySizeBytes,
1244
+ CUpti_EventID *eventIdArray,
1245
+ CUpti_EventGroupSets **eventGroupPasses);
1246
+
1247
+ /**
1248
+ * \brief Destroy a event group sets object.
1249
+ *
1250
+ * Destroy a CUpti_EventGroupSets object.
1251
+ * \note \b Thread-safety: this function is thread safe.
1252
+ *
1253
+ * \param eventGroupSets The object to destroy
1254
+ *
1255
+ * \retval CUPTI_SUCCESS
1256
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1257
+ * \retval CUPTI_ERROR_INVALID_OPERATION if any of the event groups
1258
+ * contained in the sets is enabled
1259
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroupSets is NULL
1260
+ */
1261
+ CUptiResult CUPTIAPI cuptiEventGroupSetsDestroy(CUpti_EventGroupSets *eventGroupSets);
1262
+
1263
+
1264
+ /**
1265
+ * \brief Enable an event group set.
1266
+ *
1267
+ * Enable a set of event groups. Enabling a set of event groups zeros the value of
1268
+ * all the events in all the groups and then starts collection of those events.
1269
+ * \note \b Thread-safety: this function is thread safe.
1270
+ *
1271
+ * \param eventGroupSet The pointer to the event group set
1272
+ *
1273
+ * \retval CUPTI_SUCCESS
1274
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1275
+ * \retval CUPTI_ERROR_HARDWARE
1276
+ * \retval CUPTI_ERROR_NOT_READY if \p eventGroup does not contain any events
1277
+ * \retval CUPTI_ERROR_NOT_COMPATIBLE if \p eventGroup cannot be
1278
+ * enabled due to other already enabled event groups
1279
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroupSet is NULL
1280
+ * \retval CUPTI_ERROR_HARDWARE_BUSY if other client is profiling and hardware is
1281
+ * busy
1282
+ */
1283
+ CUptiResult CUPTIAPI cuptiEventGroupSetEnable(CUpti_EventGroupSet *eventGroupSet);
1284
+
1285
+ /**
1286
+ * \brief Disable an event group set.
1287
+ *
1288
+ * Disable a set of event groups. Disabling a set of event groups
1289
+ * stops collection of events contained in the groups.
1290
+ * \note \b Thread-safety: this function is thread safe.
1291
+ * \note \b If this call fails, some of the event groups in the set may be disabled
1292
+ * and other event groups may remain enabled.
1293
+ *
1294
+ * \param eventGroupSet The pointer to the event group set
1295
+ * \retval CUPTI_SUCCESS
1296
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1297
+ * \retval CUPTI_ERROR_HARDWARE
1298
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroupSet is NULL
1299
+ */
1300
+ CUptiResult CUPTIAPI cuptiEventGroupSetDisable(CUpti_EventGroupSet *eventGroupSet);
1301
+
1302
+ /**
1303
+ * \brief Enable kernel replay mode.
1304
+ *
1305
+ * Set profiling mode for the context to replay mode. In this mode,
1306
+ * any number of events can be collected in one run of the kernel. The
1307
+ * event collection mode will automatically switch to
1308
+ * CUPTI_EVENT_COLLECTION_MODE_KERNEL. In this mode, \ref
1309
+ * cuptiSetEventCollectionMode will return
1310
+ * CUPTI_ERROR_INVALID_OPERATION.
1311
+ * \note \b Kernels might take longer to run if many events are enabled.
1312
+ * \note \b Thread-safety: this function is thread safe.
1313
+ *
1314
+ * \param context The context
1315
+ * \retval CUPTI_SUCCESS
1316
+ */
1317
+ CUptiResult CUPTIAPI cuptiEnableKernelReplayMode(CUcontext context);
1318
+
1319
+ /**
1320
+ * \brief Disable kernel replay mode.
1321
+ *
1322
+ * Set profiling mode for the context to non-replay (default)
1323
+ * mode. Event collection mode will be set to
1324
+ * CUPTI_EVENT_COLLECTION_MODE_KERNEL. All previously enabled
1325
+ * event groups and event group sets will be disabled.
1326
+ * \note \b Thread-safety: this function is thread safe.
1327
+ *
1328
+ * \param context The context
1329
+ * \retval CUPTI_SUCCESS
1330
+ */
1331
+ CUptiResult CUPTIAPI cuptiDisableKernelReplayMode(CUcontext context);
1332
+
1333
+ /**
1334
+ * \brief Function type for getting updates on kernel replay.
1335
+ *
1336
+ * \param kernelName The mangled kernel name
1337
+ * \param numReplaysDone Number of replays done so far
1338
+ * \param customData Pointer of any custom data passed in when subscribing
1339
+ */
1340
+ typedef void (CUPTIAPI *CUpti_KernelReplayUpdateFunc)(
1341
+ const char *kernelName,
1342
+ int numReplaysDone,
1343
+ void *customData);
1344
+
1345
+ /**
1346
+ * \brief Subscribe to kernel replay updates.
1347
+ *
1348
+ * When subscribed, the function pointer passed in will be called each time a
1349
+ * kernel run is finished during kernel replay. Previously subscribed function
1350
+ * pointer will be replaced. Pass in NULL as the function pointer unsubscribes
1351
+ * the update.
1352
+ *
1353
+ * \param updateFunc The update function pointer
1354
+ * \param customData Pointer to any custom data
1355
+ * \retval CUPTI_SUCCESS
1356
+ */
1357
+ CUptiResult CUPTIAPI cuptiKernelReplaySubscribeUpdate(CUpti_KernelReplayUpdateFunc updateFunc, void *customData);
1358
+
1359
+ /** @} */ /* END CUPTI_EVENT_API */
1360
+
1361
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
1362
+ #pragma GCC visibility pop
1363
+ #endif
1364
+
1365
+ #if defined(__cplusplus)
1366
+ }
1367
+ #endif
1368
+
1369
+ #endif /*_CUPTI_EVENTS_H_*/
1370
+
1371
+
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_nvtx_cbid.h ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2013-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
51
+ #pragma GCC visibility push(default)
52
+ #endif
53
+
54
+ typedef enum {
55
+ CUPTI_CBID_NVTX_INVALID = 0,
56
+ CUPTI_CBID_NVTX_nvtxMarkA = 1,
57
+ CUPTI_CBID_NVTX_nvtxMarkW = 2,
58
+ CUPTI_CBID_NVTX_nvtxMarkEx = 3,
59
+ CUPTI_CBID_NVTX_nvtxRangeStartA = 4,
60
+ CUPTI_CBID_NVTX_nvtxRangeStartW = 5,
61
+ CUPTI_CBID_NVTX_nvtxRangeStartEx = 6,
62
+ CUPTI_CBID_NVTX_nvtxRangeEnd = 7,
63
+ CUPTI_CBID_NVTX_nvtxRangePushA = 8,
64
+ CUPTI_CBID_NVTX_nvtxRangePushW = 9,
65
+ CUPTI_CBID_NVTX_nvtxRangePushEx = 10,
66
+ CUPTI_CBID_NVTX_nvtxRangePop = 11,
67
+ CUPTI_CBID_NVTX_nvtxNameCategoryA = 12,
68
+ CUPTI_CBID_NVTX_nvtxNameCategoryW = 13,
69
+ CUPTI_CBID_NVTX_nvtxNameOsThreadA = 14,
70
+ CUPTI_CBID_NVTX_nvtxNameOsThreadW = 15,
71
+ CUPTI_CBID_NVTX_nvtxNameCuDeviceA = 16,
72
+ CUPTI_CBID_NVTX_nvtxNameCuDeviceW = 17,
73
+ CUPTI_CBID_NVTX_nvtxNameCuContextA = 18,
74
+ CUPTI_CBID_NVTX_nvtxNameCuContextW = 19,
75
+ CUPTI_CBID_NVTX_nvtxNameCuStreamA = 20,
76
+ CUPTI_CBID_NVTX_nvtxNameCuStreamW = 21,
77
+ CUPTI_CBID_NVTX_nvtxNameCuEventA = 22,
78
+ CUPTI_CBID_NVTX_nvtxNameCuEventW = 23,
79
+ CUPTI_CBID_NVTX_nvtxNameCudaDeviceA = 24,
80
+ CUPTI_CBID_NVTX_nvtxNameCudaDeviceW = 25,
81
+ CUPTI_CBID_NVTX_nvtxNameCudaStreamA = 26,
82
+ CUPTI_CBID_NVTX_nvtxNameCudaStreamW = 27,
83
+ CUPTI_CBID_NVTX_nvtxNameCudaEventA = 28,
84
+ CUPTI_CBID_NVTX_nvtxNameCudaEventW = 29,
85
+ CUPTI_CBID_NVTX_nvtxDomainMarkEx = 30,
86
+ CUPTI_CBID_NVTX_nvtxDomainRangeStartEx = 31,
87
+ CUPTI_CBID_NVTX_nvtxDomainRangeEnd = 32,
88
+ CUPTI_CBID_NVTX_nvtxDomainRangePushEx = 33,
89
+ CUPTI_CBID_NVTX_nvtxDomainRangePop = 34,
90
+ CUPTI_CBID_NVTX_nvtxDomainResourceCreate = 35,
91
+ CUPTI_CBID_NVTX_nvtxDomainResourceDestroy = 36,
92
+ CUPTI_CBID_NVTX_nvtxDomainNameCategoryA = 37,
93
+ CUPTI_CBID_NVTX_nvtxDomainNameCategoryW = 38,
94
+ CUPTI_CBID_NVTX_nvtxDomainRegisterStringA = 39,
95
+ CUPTI_CBID_NVTX_nvtxDomainRegisterStringW = 40,
96
+ CUPTI_CBID_NVTX_nvtxDomainCreateA = 41,
97
+ CUPTI_CBID_NVTX_nvtxDomainCreateW = 42,
98
+ CUPTI_CBID_NVTX_nvtxDomainDestroy = 43,
99
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserCreate = 44,
100
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserDestroy = 45,
101
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserAcquireStart = 46,
102
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserAcquireFailed = 47,
103
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserAcquireSuccess = 48,
104
+ CUPTI_CBID_NVTX_nvtxDomainSyncUserReleasing = 49,
105
+ CUPTI_CBID_NVTX_SIZE,
106
+ CUPTI_CBID_NVTX_FORCE_INT = 0x7fffffff
107
+ } CUpti_nvtx_api_trace_cbid;
108
+
109
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
110
+ #pragma GCC visibility pop
111
+ #endif
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling.h ADDED
@@ -0,0 +1,950 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_PCSAMPLING_H_)
51
+ #define _CUPTI_PCSAMPLING_H_
52
+
53
+ #include <cuda.h>
54
+ #include <stdint.h>
55
+ #include <stddef.h>
56
+ #include "cupti_result.h"
57
+
58
+ #ifndef CUPTIAPI
59
+ #ifdef _WIN32
60
+ #define CUPTIAPI __stdcall
61
+ #else
62
+ #define CUPTIAPI
63
+ #endif
64
+ #endif
65
+
66
+ #define ACTIVITY_RECORD_ALIGNMENT 8
67
+ #if defined(_WIN32) // Windows 32- and 64-bit
68
+ #define START_PACKED_ALIGNMENT __pragma(pack(push,1)) // exact fit - no padding
69
+ #define PACKED_ALIGNMENT __declspec(align(ACTIVITY_RECORD_ALIGNMENT))
70
+ #define END_PACKED_ALIGNMENT __pragma(pack(pop))
71
+ #elif defined(__GNUC__) // GCC
72
+ #define START_PACKED_ALIGNMENT
73
+ #define PACKED_ALIGNMENT __attribute__ ((__packed__)) __attribute__ ((aligned (ACTIVITY_RECORD_ALIGNMENT)))
74
+ #define END_PACKED_ALIGNMENT
75
+ #else // all other compilers
76
+ #define START_PACKED_ALIGNMENT
77
+ #define PACKED_ALIGNMENT
78
+ #define END_PACKED_ALIGNMENT
79
+ #endif
80
+
81
+ #if defined(__cplusplus)
82
+ extern "C" {
83
+ #endif
84
+
85
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
86
+ #pragma GCC visibility push(default)
87
+ #endif
88
+
89
+ /**
90
+ * \defgroup CUPTI_PCSAMPLING_API CUPTI PC Sampling API
91
+ * Functions, types, and enums that implement the CUPTI PC Sampling API.
92
+ * @{
93
+ */
94
+
95
+ #ifndef CUPTI_PCSAMPLING_STRUCT_SIZE
96
+ #define CUPTI_PCSAMPLING_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
97
+ #endif
98
+
99
+ #ifndef CUPTI_STALL_REASON_STRING_SIZE
100
+ #define CUPTI_STALL_REASON_STRING_SIZE 128
101
+ #endif
102
+
103
+ /**
104
+ * \brief PC Sampling collection mode
105
+ */
106
+ typedef enum
107
+ {
108
+ /**
109
+ * INVALID Value
110
+ */
111
+ CUPTI_PC_SAMPLING_COLLECTION_MODE_INVALID = 0,
112
+ /**
113
+ * Continuous mode. Kernels are not serialized in this mode.
114
+ */
115
+ CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS = 1,
116
+ /**
117
+ * Serialized mode. Kernels are serialized in this mode.
118
+ */
119
+ CUPTI_PC_SAMPLING_COLLECTION_MODE_KERNEL_SERIALIZED = 2,
120
+ } CUpti_PCSamplingCollectionMode;
121
+
122
+ /**
123
+ * \brief PC Sampling stall reasons
124
+ */
125
+ typedef struct PACKED_ALIGNMENT
126
+ {
127
+ /**
128
+ * [r] Collected stall reason index
129
+ */
130
+ uint32_t pcSamplingStallReasonIndex;
131
+ /**
132
+ * [r] Number of times the PC was sampled with the stallReason.
133
+ */
134
+ uint32_t samples;
135
+ } CUpti_PCSamplingStallReason;
136
+
137
+ /**
138
+ * \brief PC Sampling data
139
+ */
140
+ typedef struct PACKED_ALIGNMENT
141
+ {
142
+ /**
143
+ * [w] Size of the data structure.
144
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
145
+ * available in the structure. Used to preserve backward compatibility.
146
+ */
147
+ size_t size;
148
+ /**
149
+ * [r] Unique cubin id
150
+ */
151
+ uint64_t cubinCrc;
152
+ /**
153
+ * [r] PC offset
154
+ */
155
+ uint64_t pcOffset;
156
+ /**
157
+ * The function's unique symbol index in the module.
158
+ */
159
+ uint32_t functionIndex;
160
+ /**
161
+ * Padding
162
+ */
163
+ uint32_t pad;
164
+ /**
165
+ * [r] The function name. This name string might be shared across all the records
166
+ * including records from activity APIs representing the same function, and so it should not be
167
+ * modified or freed until post processing of all the records is done. Once done, it is user’s responsibility to
168
+ * free the memory using free() function.
169
+ */
170
+ char* functionName;
171
+ /**
172
+ * [r] Collected stall reason count
173
+ */
174
+ size_t stallReasonCount;
175
+ /**
176
+ * [r] Stall reason id
177
+ * Total samples
178
+ */
179
+ CUpti_PCSamplingStallReason *stallReason;
180
+ } CUpti_PCSamplingPCData;
181
+
182
+ /**
183
+ * \brief PC Sampling output data format
184
+ */
185
+ typedef enum
186
+ {
187
+ CUPTI_PC_SAMPLING_OUTPUT_DATA_FORMAT_INVALID = 0,
188
+ /**
189
+ * HW buffer data will be parsed during collection of data
190
+ */
191
+ CUPTI_PC_SAMPLING_OUTPUT_DATA_FORMAT_PARSED = 1,
192
+ } CUpti_PCSamplingOutputDataFormat;
193
+
194
+ /**
195
+ * \brief Collected PC Sampling data
196
+ *
197
+ */
198
+ typedef struct PACKED_ALIGNMENT
199
+ {
200
+ /**
201
+ * [w] Size of the data structure.
202
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
203
+ * available in the structure. Used to preserve backward compatibility.
204
+ */
205
+ size_t size;
206
+ /**
207
+ * [w] Number of PCs to be collected
208
+ */
209
+ size_t collectNumPcs;
210
+ /**
211
+ * [r] Number of samples collected across all PCs.
212
+ * It includes samples for user modules, samples for non-user kernels and dropped samples.
213
+ * It includes counts for all non selected stall reasons.
214
+ * CUPTI does not provide PC records for non-user kernels.
215
+ * CUPTI does not provide PC records for instructions for which all selected stall reason metrics counts are zero.
216
+ */
217
+ uint64_t totalSamples;
218
+ /**
219
+ * [r] Number of samples that were dropped by hardware due to backpressure/overflow.
220
+ */
221
+ uint64_t droppedSamples;
222
+ /**
223
+ * [r] Number of PCs collected
224
+ */
225
+ size_t totalNumPcs;
226
+ /**
227
+ * [r] Number of PCs available for collection
228
+ */
229
+ size_t remainingNumPcs;
230
+ /**
231
+ * [r] Unique identifier for each range.
232
+ * Data collected across multiple ranges in multiple buffers can be identified using range id.
233
+ */
234
+ uint64_t rangeId;
235
+ /**
236
+ * [r] Profiled PC data
237
+ * This data struct should have enough memory to collect number of PCs mentioned in \brief collectNumPcs
238
+ */
239
+ CUpti_PCSamplingPCData *pPcData;
240
+ /**
241
+ * [r] Number of samples collected across all non user kernels PCs.
242
+ * It includes samples for non-user kernels.
243
+ * It includes counts for all non selected stall reasons as well.
244
+ * CUPTI does not provide PC records for non-user kernels.
245
+ */
246
+ uint64_t nonUsrKernelsTotalSamples;
247
+
248
+ /**
249
+ * [r] Status of the hardware buffer.
250
+ * CUPTI returns the error code CUPTI_ERROR_OUT_OF_MEMORY when hardware buffer is full.
251
+ * When hardware buffer is full, user will get pc data as 0. To mitigate this issue, one or more of the below options can be tried:
252
+ * 1. Increase the hardware buffer size using the attribute CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_HARDWARE_BUFFER_SIZE
253
+ * 2. Decrease the thread sleep span using the attribute CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_WORKER_THREAD_PERIODIC_SLEEP_SPAN
254
+ * 3. Decrease the sampling frequency using the attribute CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_PERIOD
255
+ */
256
+ uint8_t hardwareBufferFull;
257
+ } CUpti_PCSamplingData;
258
+
259
+ /**
260
+ * \brief PC Sampling configuration attributes
261
+ *
262
+ * PC Sampling configuration attribute types. These attributes can be read
263
+ * using \ref cuptiPCSamplingGetConfigurationAttribute and can be written
264
+ * using \ref cuptiPCSamplingSetConfigurationAttribute. Attributes marked
265
+ * [r] can only be read using \ref cuptiPCSamplingGetConfigurationAttribute
266
+ * [w] can only be written using \ref cuptiPCSamplingSetConfigurationAttribute
267
+ * [rw] can be read using \ref cuptiPCSamplingGetConfigurationAttribute and
268
+ * written using \ref cuptiPCSamplingSetConfigurationAttribute
269
+ */
270
+ typedef enum
271
+ {
272
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_INVALID = 0,
273
+ /**
274
+ * [rw] Sampling period for PC Sampling.
275
+ * DEFAULT - CUPTI defined value based on number of SMs
276
+ * Valid values for the sampling
277
+ * periods are between 5 to 31 both inclusive. This will set the
278
+ * sampling period to (2^samplingPeriod) cycles.
279
+ * For e.g. for sampling period = 5 to 31, cycles = 32, 64, 128,..., 2^31
280
+ * Value is a uint32_t
281
+ */
282
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_PERIOD = 1,
283
+ /**
284
+ * [w] Number of stall reasons to collect.
285
+ * DEFAULT - All stall reasons will be collected
286
+ * Value is a size_t
287
+ * [w] Stall reasons to collect
288
+ * DEFAULT - All stall reasons will be collected
289
+ * Input value should be a pointer pointing to array of stall reason indexes
290
+ * containing all the stall reason indexes to collect.
291
+ */
292
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_STALL_REASON = 2,
293
+ /**
294
+ * [rw] Size of SW buffer for raw PC counter data downloaded from HW buffer
295
+ * DEFAULT - 1 MB, which can accommodate approximately 5500 PCs
296
+ * with all stall reasons
297
+ * Approximately it takes 16 Bytes (and some fixed size memory)
298
+ * to accommodate one PC with one stall reason
299
+ * For e.g. 1 PC with 1 stall reason = 32 Bytes
300
+ * 1 PC with 2 stall reason = 48 Bytes
301
+ * 1 PC with 4 stall reason = 96 Bytes
302
+ * Value is a size_t
303
+ */
304
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SCRATCH_BUFFER_SIZE = 3,
305
+ /**
306
+ * [rw] Size of HW buffer in bytes
307
+ * DEFAULT - 512 MB
308
+ * If sampling period is too less, HW buffer can overflow
309
+ * and drop PC data
310
+ * Value is a size_t
311
+ */
312
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_HARDWARE_BUFFER_SIZE = 4,
313
+ /**
314
+ * [rw] PC Sampling collection mode
315
+ * DEFAULT - CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS
316
+ * Input value should be of type \ref CUpti_PCSamplingCollectionMode.
317
+ */
318
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_COLLECTION_MODE = 5,
319
+ /**
320
+ * [rw] Control over PC Sampling data collection range
321
+ * Default - 0
322
+ * 1 - Allows user to start and stop PC Sampling using APIs -
323
+ * \ref cuptiPCSamplingStart() - Start PC Sampling
324
+ * \ref cuptiPCSamplingStop() - Stop PC Sampling
325
+ * Value is a uint32_t
326
+ */
327
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL = 6,
328
+ /**
329
+ * [w] Value for output data format
330
+ * Default - CUPTI_PC_SAMPLING_OUTPUT_DATA_FORMAT_PARSED
331
+ * Input value should be of type \ref CUpti_PCSamplingOutputDataFormat.
332
+ */
333
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_OUTPUT_DATA_FORMAT = 7,
334
+ /**
335
+ * [w] Data buffer to hold collected PC Sampling data PARSED_DATA
336
+ * Default - none.
337
+ * Buffer type is void * which can point to PARSED_DATA
338
+ * Refer \ref CUpti_PCSamplingData for buffer format for PARSED_DATA
339
+ */
340
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_DATA_BUFFER = 8,
341
+ /**
342
+ * [rw] Control sleep time of the worker threads created by CUPTI for various PC sampling operations.
343
+ * CUPTI creates multiple worker threads to offload certain operations to these threads. This includes decoding of HW data to
344
+ * the CUPTI PC sampling data and correlating PC data to SASS instructions. CUPTI wakes up these threads periodically.
345
+ * Default - 100 milliseconds.
346
+ * Value is a uint32_t
347
+ */
348
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_WORKER_THREAD_PERIODIC_SLEEP_SPAN = 9,
349
+ CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_FORCE_INT = 0x7fffffff,
350
+ } CUpti_PCSamplingConfigurationAttributeType;
351
+
352
+ /**
353
+ * \brief PC sampling configuration information structure
354
+ *
355
+ * This structure provides \ref CUpti_PCSamplingConfigurationAttributeType which can be configured
356
+ * or queried for PC sampling configuration
357
+ */
358
+ typedef struct
359
+ {
360
+ /**
361
+ * Refer \ref CUpti_PCSamplingConfigurationAttributeType for all supported attribute types
362
+ */
363
+ CUpti_PCSamplingConfigurationAttributeType attributeType;
364
+ /*
365
+ * Configure or query status for \p attributeType
366
+ * CUPTI_SUCCESS for valid \p attributeType and \p attributeData
367
+ * CUPTI_ERROR_INVALID_OPERATION if \p attributeData is not valid
368
+ * CUPTI_ERROR_INVALID_PARAMETER if \p attributeType is not valid
369
+ */
370
+ CUptiResult attributeStatus;
371
+ union
372
+ {
373
+ /**
374
+ * Invalid Value
375
+ */
376
+ struct
377
+ {
378
+ uint64_t data[3];
379
+ } invalidData;
380
+ /**
381
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_PERIOD
382
+ */
383
+ struct
384
+ {
385
+ uint32_t samplingPeriod;
386
+ } samplingPeriodData;
387
+ /**
388
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_STALL_REASON
389
+ */
390
+ struct
391
+ {
392
+ size_t stallReasonCount;
393
+ uint32_t *pStallReasonIndex;
394
+ } stallReasonData;
395
+ /**
396
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SCRATCH_BUFFER_SIZE
397
+ */
398
+ struct
399
+ {
400
+ size_t scratchBufferSize;
401
+ } scratchBufferSizeData;
402
+ /**
403
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_HARDWARE_BUFFER_SIZE
404
+ */
405
+ struct
406
+ {
407
+ size_t hardwareBufferSize;
408
+ } hardwareBufferSizeData;
409
+ /**
410
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_COLLECTION_MODE
411
+ */
412
+ struct
413
+ {
414
+ CUpti_PCSamplingCollectionMode collectionMode;
415
+ } collectionModeData;
416
+ /**
417
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL
418
+ */
419
+ struct
420
+ {
421
+ uint32_t enableStartStopControl;
422
+ } enableStartStopControlData;
423
+ /**
424
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_OUTPUT_DATA_FORMAT
425
+ */
426
+ struct
427
+ {
428
+ CUpti_PCSamplingOutputDataFormat outputDataFormat;
429
+ } outputDataFormatData;
430
+ /**
431
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_SAMPLING_DATA_BUFFER
432
+ */
433
+ struct
434
+ {
435
+ void *samplingDataBuffer;
436
+ } samplingDataBufferData;
437
+ /**
438
+ * Refer \ref CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_WORKER_THREAD_PERIODIC_SLEEP_SPAN
439
+ */
440
+ struct
441
+ {
442
+ uint32_t workerThreadPeriodicSleepSpan;
443
+ } workerThreadPeriodicSleepSpanData;
444
+
445
+ } attributeData;
446
+ } CUpti_PCSamplingConfigurationInfo;
447
+
448
+ /**
449
+ * \brief PC sampling configuration structure
450
+ *
451
+ * This structure configures PC sampling using \ref cuptiPCSamplingSetConfigurationAttribute
452
+ * and queries PC sampling default configuration using \ref cuptiPCSamplingGetConfigurationAttribute
453
+ */
454
+ typedef struct
455
+ {
456
+ /**
457
+ * [w] Size of the data structure i.e. CUpti_PCSamplingConfigurationInfoParamsSize
458
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
459
+ * available in the structure. Used to preserve backward compatibility.
460
+ */
461
+ size_t size;
462
+ /**
463
+ * [w] Assign to NULL
464
+ */
465
+ void* pPriv;
466
+ /**
467
+ * [w] CUcontext
468
+ */
469
+ CUcontext ctx;
470
+ /**
471
+ * [w] Number of attributes to configure using \ref cuptiPCSamplingSetConfigurationAttribute or query
472
+ * using \ref cuptiPCSamplingGetConfigurationAttribute
473
+ */
474
+ size_t numAttributes;
475
+ /**
476
+ * Refer \ref CUpti_PCSamplingConfigurationInfo
477
+ */
478
+ CUpti_PCSamplingConfigurationInfo *pPCSamplingConfigurationInfo;
479
+ } CUpti_PCSamplingConfigurationInfoParams;
480
+ #define CUpti_PCSamplingConfigurationInfoParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingConfigurationInfoParams,pPCSamplingConfigurationInfo)
481
+
482
+ /**
483
+ * \brief Write PC Sampling configuration attribute.
484
+ *
485
+ * \param pParams A pointer to \ref CUpti_PCSamplingConfigurationInfoParams
486
+ * containing PC sampling configuration.
487
+ *
488
+ * \retval CUPTI_SUCCESS
489
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
490
+ * some invalid \p attrib.
491
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if attribute \p value is not valid
492
+ * or any \p pParams is not valid
493
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
494
+ * does not support the API
495
+ */
496
+ CUptiResult CUPTIAPI cuptiPCSamplingSetConfigurationAttribute(CUpti_PCSamplingConfigurationInfoParams *pParams);
497
+
498
+ /**
499
+ * \brief Read PC Sampling configuration attribute.
500
+ *
501
+ * \param pParams A pointer to \ref CUpti_PCSamplingConfigurationInfoParams
502
+ * containing PC sampling configuration.
503
+ *
504
+ * \retval CUPTI_SUCCESS
505
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
506
+ * some invalid attribute.
507
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p attrib is not valid
508
+ * or any \p pParams is not valid
509
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT indicates that
510
+ * the \p value buffer is too small to hold the attribute value
511
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
512
+ * does not support the API
513
+ */
514
+ CUptiResult CUPTIAPI cuptiPCSamplingGetConfigurationAttribute(CUpti_PCSamplingConfigurationInfoParams *pParams);
515
+
516
+ /**
517
+ * \brief Params for cuptiPCSamplingEnable
518
+ */
519
+ typedef struct
520
+ {
521
+ /**
522
+ * [w] Size of the data structure i.e. CUpti_PCSamplingGetDataParamsSize
523
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
524
+ * available in the structure. Used to preserve backward compatibility.
525
+ */
526
+ size_t size;
527
+ /**
528
+ * [w] Assign to NULL
529
+ */
530
+ void* pPriv;
531
+ /**
532
+ * [w] CUcontext
533
+ */
534
+ CUcontext ctx;
535
+ /**
536
+ * \param pcSamplingData Data buffer to hold collected PC Sampling data PARSED_DATA
537
+ * Buffer type is void * which can point to PARSED_DATA
538
+ * Refer \ref CUpti_PCSamplingData for buffer format for PARSED_DATA
539
+ */
540
+ void *pcSamplingData;
541
+ } CUpti_PCSamplingGetDataParams;
542
+ #define CUpti_PCSamplingGetDataParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingGetDataParams, pcSamplingData)
543
+ /**
544
+ * \brief Flush GPU PC sampling data periodically.
545
+ *
546
+ * Flushing of GPU PC Sampling data is required at following point to maintain uniqueness of PCs:
547
+ * For \brief CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS, after every module load-unload-load
548
+ * For \brief CUPTI_PC_SAMPLING_COLLECTION_MODE_KERNEL_SERIALIZED, after every kernel ends
549
+ * If configuration option \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL
550
+ * is enabled, then after every range end i.e. \brief cuptiPCSamplingStop()
551
+ *
552
+ * If application is profiled in \brief CUPTI_PC_SAMPLING_COLLECTION_MODE_CONTINUOUS, with disabled
553
+ * \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL, and there is no module unload,
554
+ * user can collect data in two ways:
555
+ * Use \brief cuptiPCSamplingGetData() API periodically
556
+ * Use \brief cuptiPCSamplingDisable() on application exit and read GPU PC sampling data from sampling
557
+ * data buffer passed during configuration.
558
+ * Note: In case, \brief cuptiPCSamplingGetData() API is not called periodically, then sampling data buffer
559
+ * passed during configuration should be large enough to hold all PCs data.
560
+ * \brief cuptiPCSamplingGetData() API never does device synchronization.
561
+ * It is possible that when the API is called there is some unconsumed data from the HW buffer. In this case
562
+ * CUPTI provides only the data available with it at that moment.
563
+ *
564
+ * \param Refer \ref CUpti_PCSamplingGetDataParams
565
+ *
566
+ * \retval CUPTI_SUCCESS
567
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called without
568
+ * enabling PC sampling.
569
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
570
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
571
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY indicates that the HW buffer is full
572
+ * does not support the API
573
+ */
574
+ CUptiResult CUPTIAPI cuptiPCSamplingGetData(CUpti_PCSamplingGetDataParams *pParams);
575
+
576
+ /**
577
+ * \brief Params for cuptiPCSamplingEnable
578
+ */
579
+ typedef struct
580
+ {
581
+ /**
582
+ * [w] Size of the data structure i.e. CUpti_PCSamplingEnableParamsSize
583
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
584
+ * available in the structure. Used to preserve backward compatibility.
585
+ */
586
+ size_t size;
587
+ /**
588
+ * [w] Assign to NULL
589
+ */
590
+ void* pPriv;
591
+ /**
592
+ * [w] CUcontext
593
+ */
594
+ CUcontext ctx;
595
+ } CUpti_PCSamplingEnableParams;
596
+ #define CUpti_PCSamplingEnableParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingEnableParams, ctx)
597
+
598
+ /**
599
+ * \brief Enable PC sampling.
600
+ *
601
+ * \param Refer \ref CUpti_PCSamplingEnableParams
602
+ *
603
+ * \retval CUPTI_SUCCESS
604
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
605
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
606
+ * does not support the API
607
+ */
608
+ CUptiResult CUPTIAPI cuptiPCSamplingEnable(CUpti_PCSamplingEnableParams *pParams);
609
+
610
+ /**
611
+ * \brief Params for cuptiPCSamplingDisable
612
+ */
613
+ typedef struct
614
+ {
615
+ /**
616
+ * [w] Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
617
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
618
+ * available in the structure. Used to preserve backward compatibility.
619
+ */
620
+ size_t size;
621
+ /**
622
+ * [w] Assign to NULL
623
+ */
624
+ void* pPriv;
625
+ /**
626
+ * [w] CUcontext
627
+ */
628
+ CUcontext ctx;
629
+ } CUpti_PCSamplingDisableParams;
630
+ #define CUpti_PCSamplingDisableParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingDisableParams, ctx)
631
+
632
+ /**
633
+ * \brief Disable PC sampling.
634
+ *
635
+ * For application which doesn't destroy the CUDA context explicitly,
636
+ * this API does the PC Sampling tear-down, joins threads and copies PC records in the buffer provided
637
+ * during the PC sampling configuration. PC records which can't be accommodated in the buffer are discarded.
638
+ *
639
+ * \param Refer \ref CUpti_PCSamplingDisableParams
640
+ *
641
+ * \retval CUPTI_SUCCESS
642
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
643
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
644
+ * does not support the API
645
+ */
646
+ CUptiResult CUPTIAPI cuptiPCSamplingDisable(CUpti_PCSamplingDisableParams *pParams);
647
+
648
+ /**
649
+ * \brief Params for cuptiPCSamplingStart
650
+ */
651
+ typedef struct
652
+ {
653
+ /**
654
+ * [w] Size of the data structure i.e. CUpti_PCSamplingStartParamsSize
655
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
656
+ * available in the structure. Used to preserve backward compatibility.
657
+ */
658
+ size_t size;
659
+ /**
660
+ * [w] Assign to NULL
661
+ */
662
+ void* pPriv;
663
+ /**
664
+ * [w] CUcontext
665
+ */
666
+ CUcontext ctx;
667
+ } CUpti_PCSamplingStartParams;
668
+ #define CUpti_PCSamplingStartParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingStartParams, ctx)
669
+
670
+ /**
671
+ * \brief Start PC sampling.
672
+ *
673
+ * User can collect PC Sampling data for user-defined range specified by Start/Stop APIs.
674
+ * This API can be used to mark starting of range. Set configuration option
675
+ * \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL to use this API.
676
+ *
677
+ * \param Refer \ref CUpti_PCSamplingStartParams
678
+ *
679
+ * \retval CUPTI_SUCCESS
680
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
681
+ * incorrect PC Sampling configuration.
682
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
683
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
684
+ * does not support the API
685
+ */
686
+ CUptiResult CUPTIAPI cuptiPCSamplingStart(CUpti_PCSamplingStartParams *pParams);
687
+
688
+ /**
689
+ * \brief Params for cuptiPCSamplingStop
690
+ */
691
+ typedef struct
692
+ {
693
+ /**
694
+ * [w] Size of the data structure i.e. CUpti_PCSamplingStopParamsSize
695
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
696
+ * available in the structure. Used to preserve backward compatibility.
697
+ */
698
+ size_t size;
699
+ /**
700
+ * [w] Assign to NULL
701
+ */
702
+ void* pPriv;
703
+ /**
704
+ * [w] CUcontext
705
+ */
706
+ CUcontext ctx;
707
+ } CUpti_PCSamplingStopParams;
708
+ #define CUpti_PCSamplingStopParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingStopParams, ctx)
709
+
710
+ /**
711
+ * \brief Stop PC sampling.
712
+ *
713
+ * User can collect PC Sampling data for user-defined range specified by Start/Stop APIs.
714
+ * This API can be used to mark end of range. Set configuration option
715
+ * \brief CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_ENABLE_START_STOP_CONTROL to use this API.
716
+ *
717
+ * \param Refer \ref CUpti_PCSamplingStopParams
718
+ *
719
+ * \retval CUPTI_SUCCESS
720
+ * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called with
721
+ * incorrect PC Sampling configuration.
722
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
723
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
724
+ * does not support the API
725
+ */
726
+ CUptiResult CUPTIAPI cuptiPCSamplingStop(CUpti_PCSamplingStopParams *pParams);
727
+
728
+ /**
729
+ * \brief Params for cuptiPCSamplingGetNumStallReasons
730
+ */
731
+ typedef struct
732
+ {
733
+ /**
734
+ * [w] Size of the data structure i.e. CUpti_PCSamplingGetNumStallReasonsParamsSize
735
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
736
+ * available in the structure. Used to preserve backward compatibility.
737
+ */
738
+ size_t size;
739
+ /**
740
+ * [w] Assign to NULL
741
+ */
742
+ void* pPriv;
743
+ /**
744
+ * [w] CUcontext
745
+ */
746
+ CUcontext ctx;
747
+ /**
748
+ * [r] Number of stall reasons
749
+ */
750
+ size_t *numStallReasons;
751
+ } CUpti_PCSamplingGetNumStallReasonsParams;
752
+ #define CUpti_PCSamplingGetNumStallReasonsParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingGetNumStallReasonsParams, numStallReasons)
753
+
754
+ /**
755
+ * \brief Get PC sampling stall reason count.
756
+ *
757
+ * \param Refer \ref CUpti_PCSamplingGetNumStallReasonsParams
758
+ *
759
+ * \retval CUPTI_SUCCESS
760
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
761
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
762
+ * does not support the API
763
+ */
764
+ CUptiResult CUPTIAPI cuptiPCSamplingGetNumStallReasons(CUpti_PCSamplingGetNumStallReasonsParams *pParams);
765
+
766
+ /**
767
+ * \brief Params for cuptiPCSamplingGetStallReasons
768
+ */
769
+ typedef struct
770
+ {
771
+ /**
772
+ * [w] Size of the data structure i.e. CUpti_PCSamplingGetStallReasonsParamsSize
773
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
774
+ * available in the structure. Used to preserve backward compatibility.
775
+ */
776
+ size_t size;
777
+ /**
778
+ * [w] Assign to NULL
779
+ */
780
+ void* pPriv;
781
+ /**
782
+ * [w] CUcontext
783
+ */
784
+ CUcontext ctx;
785
+ /**
786
+ * [w] Number of stall reasons
787
+ */
788
+ size_t numStallReasons;
789
+ /**
790
+ * [r] Stall reason index
791
+ */
792
+ uint32_t *stallReasonIndex;
793
+ /**
794
+ * [r] Stall reasons name
795
+ */
796
+ char **stallReasons;
797
+ } CUpti_PCSamplingGetStallReasonsParams;
798
+ #define CUpti_PCSamplingGetStallReasonsParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_PCSamplingGetStallReasonsParams, stallReasons)
799
+
800
+ /**
801
+ * \brief Get PC sampling stall reasons.
802
+ *
803
+ * \param Refer \ref CUpti_PCSamplingGetStallReasonsParams
804
+ *
805
+ * \retval CUPTI_SUCCESS
806
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid
807
+ * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device
808
+ * does not support the API
809
+ */
810
+ CUptiResult CUPTIAPI cuptiPCSamplingGetStallReasons(CUpti_PCSamplingGetStallReasonsParams *pParams);
811
+
812
+ /**
813
+ * \brief Params for cuptiGetSassToSourceCorrelation
814
+ */
815
+ typedef struct {
816
+ /**
817
+ * [w] Size of the data structure i.e. CUpti_GetSassToSourceCorrelationParamsSize
818
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
819
+ * available in the structure. Used to preserve backward compatibility.
820
+ */
821
+ size_t size;
822
+ /**
823
+ * [w] Pointer to cubin binary where function belongs.
824
+ */
825
+ const void* cubin;
826
+ /**
827
+ * [w] Function name to which PC belongs.
828
+ */
829
+ const char *functionName;
830
+ /**
831
+ * [w] Size of cubin binary.
832
+ */
833
+ size_t cubinSize;
834
+ /**
835
+ * [r] Line number in the source code.
836
+ */
837
+ uint32_t lineNumber;
838
+ /**
839
+ * [w] PC offset
840
+ */
841
+ uint64_t pcOffset;
842
+ /**
843
+ * [r] Path for the source file.
844
+ */
845
+ char *fileName;
846
+ /**
847
+ * [r] Path for the directory of source file.
848
+ */
849
+ char *dirName;
850
+ } CUpti_GetSassToSourceCorrelationParams;
851
+ #define CUpti_GetSassToSourceCorrelationParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_GetSassToSourceCorrelationParams, dirName)
852
+
853
+ /**
854
+ * \brief SASS to Source correlation.
855
+ *
856
+ * \param Refer \ref CUpti_GetSassToSourceCorrelationParams
857
+ *
858
+ * It is expected from user to free allocated memory for fileName and dirName after use.
859
+ *
860
+ * \retval CUPTI_SUCCESS
861
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if either of the parameters cubin or functionName
862
+ * is NULL or cubinSize is zero or size field is not set correctly.
863
+ * \retval CUPTI_ERROR_INVALID_MODULE provided cubin is invalid.
864
+ * \retval CUPTI_ERROR_UNKNOWN an internal error occurred.
865
+ * This error code is also used for cases when the function is not present in the module.
866
+ * A better error code will be returned in the future release.
867
+ */
868
+ CUptiResult CUPTIAPI cuptiGetSassToSourceCorrelation(CUpti_GetSassToSourceCorrelationParams *pParams);
869
+
870
+ /**
871
+ * \brief Params for cuptiGetCubinCrc
872
+ */
873
+ typedef struct {
874
+ /**
875
+ * [w] Size of configuration structure.
876
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
877
+ * available in the structure. Used to preserve backward compatibility.
878
+ */
879
+ size_t size;
880
+ /**
881
+ * [w] Size of cubin binary.
882
+ */
883
+ size_t cubinSize;
884
+ /**
885
+ * [w] Pointer to cubin binary
886
+ */
887
+ const void* cubin;
888
+ /**
889
+ * [r] Computed CRC will be stored in it.
890
+ */
891
+ uint64_t cubinCrc;
892
+ } CUpti_GetCubinCrcParams;
893
+ #define CUpti_GetCubinCrcParamsSize CUPTI_PCSAMPLING_STRUCT_SIZE(CUpti_GetCubinCrcParams, cubinCrc)
894
+
895
+ /**
896
+ * \brief Get the CRC of cubin.
897
+ *
898
+ * This function returns the CRC of provided cubin binary.
899
+ *
900
+ * \param Refer \ref CUpti_GetCubinCrcParams
901
+ *
902
+ * \retval CUPTI_SUCCESS
903
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if parameter cubin is NULL or
904
+ * provided cubinSize is zero or size field is not set.
905
+ */
906
+ CUptiResult CUPTIAPI cuptiGetCubinCrc(CUpti_GetCubinCrcParams *pParams);
907
+
908
+ /**
909
+ * \brief Function type for callback used by CUPTI to request crc of
910
+ * loaded module.
911
+ *
912
+ * This callback function ask for crc of provided module in function.
913
+ * The provided crc will be stored in PC sampling records i.e. in the field 'cubinCrc' of the PC sampling
914
+ * struct CUpti_PCSamplingPCData. The CRC is uses during the offline source correlation to uniquely identify the module.
915
+ *
916
+ * \param cubin The pointer to cubin binary
917
+ * \param cubinSize The size of cubin binary.
918
+ * \param cubinCrc Returns the computed crc of cubin.
919
+ */
920
+ typedef void (CUPTIAPI *CUpti_ComputeCrcCallbackFunc)(
921
+ const void* cubin,
922
+ size_t cubinSize,
923
+ uint64_t *cubinCrc);
924
+
925
+ /**
926
+ * \brief Register callback function with CUPTI to use
927
+ * your own algorithm to compute cubin crc.
928
+ *
929
+ * This function registers a callback function and it gets called
930
+ * from CUPTI when a CUDA module is loaded.
931
+ *
932
+ * \param funcComputeCubinCrc callback is invoked when a CUDA module
933
+ * is loaded.
934
+ *
935
+ * \retval CUPTI_SUCCESS
936
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p funcComputeCubinCrc is NULL.
937
+ */
938
+ CUptiResult CUPTIAPI cuptiRegisterComputeCrcCallback(CUpti_ComputeCrcCallbackFunc funcComputeCubinCrc);
939
+
940
+ /** @} */ /* END CUPTI_PCSAMPLING_API */
941
+
942
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
943
+ #pragma GCC visibility pop
944
+ #endif
945
+
946
+ #if defined(__cplusplus)
947
+ }
948
+ #endif
949
+
950
+ #endif /*_CUPTI_PCSAMPLING_H_*/
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_pcsampling_util.h ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if !defined(_CUPTI_PCSAMPLING_UTIL_H_)
2
+ #define _CUPTI_PCSAMPLING_UTIL_H_
3
+
4
+ #include <cupti_pcsampling.h>
5
+ #include <fstream>
6
+
7
+ #ifndef CUPTIUTILAPI
8
+ #ifdef _WIN32
9
+ #define CUPTIUTILAPI __stdcall
10
+ #else
11
+ #define CUPTIUTILAPI
12
+ #endif
13
+ #endif
14
+
15
+ #define ACTIVITY_RECORD_ALIGNMENT 8
16
+ #if defined(_WIN32) // Windows 32- and 64-bit
17
+ #define START_PACKED_ALIGNMENT __pragma(pack(push,1)) // exact fit - no padding
18
+ #define PACKED_ALIGNMENT __declspec(align(ACTIVITY_RECORD_ALIGNMENT))
19
+ #define END_PACKED_ALIGNMENT __pragma(pack(pop))
20
+ #elif defined(__GNUC__) // GCC
21
+ #define START_PACKED_ALIGNMENT
22
+ #define PACKED_ALIGNMENT __attribute__ ((__packed__)) __attribute__ ((aligned (ACTIVITY_RECORD_ALIGNMENT)))
23
+ #define END_PACKED_ALIGNMENT
24
+ #else // all other compilers
25
+ #define START_PACKED_ALIGNMENT
26
+ #define PACKED_ALIGNMENT
27
+ #define END_PACKED_ALIGNMENT
28
+ #endif
29
+
30
+ #ifndef CUPTI_UTIL_STRUCT_SIZE
31
+ #define CUPTI_UTIL_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
32
+ #endif
33
+
34
+ #ifndef CHECK_PC_SAMPLING_STRUCT_FIELD_EXISTS
35
+ #define CHECK_PC_SAMPLING_STRUCT_FIELD_EXISTS(type, member, structSize) \
36
+ (offsetof(type, member) < structSize)
37
+ #endif
38
+
39
+ #if defined(__cplusplus)
40
+ extern "C" {
41
+ #endif
42
+
43
+ #if defined(__GNUC__)
44
+ #pragma GCC visibility push(default)
45
+ #endif
46
+
47
+ namespace CUPTI { namespace PcSamplingUtil {
48
+
49
+ /**
50
+ * \defgroup CUPTI_PCSAMPLING_UTILITY CUPTI PC Sampling Utility API
51
+ * Functions, types, and enums that implement the CUPTI PC Sampling Utility API.
52
+ * @{
53
+ */
54
+
55
+ /**
56
+ * \brief Header info will be stored in file.
57
+ */
58
+ typedef struct PACKED_ALIGNMENT {
59
+ /**
60
+ * Version of file format.
61
+ */
62
+ uint32_t version;
63
+ /**
64
+ * Total number of buffers present in the file.
65
+ */
66
+ uint32_t totalBuffers;
67
+ } Header;
68
+
69
+ /**
70
+ * \brief BufferInfo will be stored in the file for every buffer
71
+ * i.e for every call of UtilDumpPcSamplingBufferInFile() API.
72
+ */
73
+ typedef struct PACKED_ALIGNMENT {
74
+ /**
75
+ * Total number of PC records.
76
+ */
77
+ uint64_t recordCount;
78
+ /**
79
+ * Count of all stall reasons supported on the GPU
80
+ */
81
+ size_t numStallReasons;
82
+ /**
83
+ * Total number of stall reasons in single record.
84
+ */
85
+ uint64_t numSelectedStallReasons;
86
+ /**
87
+ * Buffer size in Bytes.
88
+ */
89
+ uint64_t bufferByteSize;
90
+ } BufferInfo;
91
+
92
+ /**
93
+ * \brief All available stall reasons name and respective indexes
94
+ * will be stored in it.
95
+ */
96
+ typedef struct PACKED_ALIGNMENT {
97
+ /**
98
+ * Number of all available stall reasons
99
+ */
100
+ size_t numStallReasons;
101
+ /**
102
+ * Stall reasons names of all available stall reasons
103
+ */
104
+ char **stallReasons;
105
+ /**
106
+ * Stall reason index of all available stall reasons
107
+ */
108
+ uint32_t *stallReasonIndex;
109
+ } PcSamplingStallReasons;
110
+
111
+ typedef enum {
112
+ /**
113
+ * Invalid buffer type.
114
+ */
115
+ PC_SAMPLING_BUFFER_INVALID = 0,
116
+ /**
117
+ * Refers to CUpti_PCSamplingData buffer.
118
+ */
119
+ PC_SAMPLING_BUFFER_PC_TO_COUNTER_DATA = 1
120
+ } PcSamplingBufferType;
121
+
122
+ /**
123
+ * \brief CUPTI PC sampling utility API result codes.
124
+ *
125
+ * Error and result codes returned by CUPTI PC sampling utility API.
126
+ */
127
+ typedef enum {
128
+ /**
129
+ * No error
130
+ */
131
+ CUPTI_UTIL_SUCCESS = 0,
132
+ /**
133
+ * One or more of the parameters are invalid.
134
+ */
135
+ CUPTI_UTIL_ERROR_INVALID_PARAMETER = 1,
136
+ /**
137
+ * Unable to create a new file
138
+ */
139
+ CUPTI_UTIL_ERROR_UNABLE_TO_CREATE_FILE = 2,
140
+ /**
141
+ * Unable to open a file
142
+ */
143
+ CUPTI_UTIL_ERROR_UNABLE_TO_OPEN_FILE = 3,
144
+ /**
145
+ * Read or write operation failed
146
+ */
147
+ CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED = 4,
148
+ /**
149
+ * Provided file handle is corrupted.
150
+ */
151
+ CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED = 5,
152
+ /**
153
+ * seek operation failed.
154
+ */
155
+ CUPTI_UTIL_ERROR_SEEK_OPERATION_FAILED = 6,
156
+ /**
157
+ * Unable to allocate enough memory to perform the requested
158
+ * operation.
159
+ */
160
+ CUPTI_UTIL_ERROR_OUT_OF_MEMORY = 7,
161
+ /**
162
+ * An unknown internal error has occurred.
163
+ */
164
+ CUPTI_UTIL_ERROR_UNKNOWN = 999,
165
+ CUPTI_UTIL_ERROR_FORCE_INT = 0x7fffffff
166
+ } CUptiUtilResult;
167
+
168
+ /**
169
+ * \brief Params for \ref CuptiUtilPutPcSampData
170
+ */
171
+ typedef struct {
172
+ /**
173
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
174
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
175
+ * available in the structure. Used to preserve backward compatibility.
176
+ */
177
+ size_t size;
178
+ /**
179
+ * Type of buffer to store in file
180
+ */
181
+ PcSamplingBufferType bufferType;
182
+ /**
183
+ * PC sampling buffer.
184
+ */
185
+ void *pSamplingData;
186
+ /**
187
+ * Number of configured attributes
188
+ */
189
+ size_t numAttributes;
190
+ /**
191
+ * Refer \ref CUpti_PCSamplingConfigurationInfo
192
+ * It is expected to provide configuration details of at least
193
+ * CUPTI_PC_SAMPLING_CONFIGURATION_ATTR_TYPE_STALL_REASON attribute.
194
+ */
195
+ CUpti_PCSamplingConfigurationInfo *pPCSamplingConfigurationInfo;
196
+ /**
197
+ * Refer \ref PcSamplingStallReasons.
198
+ */
199
+ PcSamplingStallReasons *pPcSamplingStallReasons;
200
+ /**
201
+ * File name to store buffer into it.
202
+ */
203
+ const char* fileName;
204
+ } CUptiUtil_PutPcSampDataParams;
205
+ #define CUptiUtil_PutPcSampDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_PutPcSampDataParams, fileName)
206
+
207
+ /**
208
+ * \brief Dump PC sampling data into the file.
209
+ *
210
+ * This API can be called multiple times.
211
+ * It will append buffer in the file.
212
+ * For every buffer it will store BufferInfo
213
+ * so that before retrieving data it will help to allocate buffer
214
+ * to store retrieved data.
215
+ * This API creates file if file does not present.
216
+ * If stallReasonIndex or stallReasons pointer of \ref CUptiUtil_PutPcSampDataParams is NULL
217
+ * then stall reasons data will not be stored in file.
218
+ * It is expected to store all available stall reason data at least once to refer it during
219
+ * offline correlation.
220
+ *
221
+ * \retval CUPTI_UTIL_SUCCESS
222
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if buffer type is invalid
223
+ * or if either of pSamplingData, pParams pointer is NULL or stall reason configuration details not provided
224
+ * or filename is empty.
225
+ * \retval CUPTI_UTIL_ERROR_UNABLE_TO_CREATE_FILE
226
+ * \retval CUPTI_UTIL_ERROR_UNABLE_TO_OPEN_FILE
227
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED
228
+ */
229
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilPutPcSampData(CUptiUtil_PutPcSampDataParams *pParams);
230
+
231
+ /**
232
+ * \brief Params for \ref CuptiUtilGetHeaderData
233
+ */
234
+ typedef struct {
235
+ /**
236
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
237
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
238
+ * available in the structure. Used to preserve backward compatibility.
239
+ */
240
+ size_t size;
241
+ /**
242
+ * File handle.
243
+ */
244
+ std::ifstream *fileHandler;
245
+ /**
246
+ * Header Info.
247
+ */
248
+ Header headerInfo;
249
+
250
+ } CUptiUtil_GetHeaderDataParams;
251
+ #define CUptiUtil_GetHeaderDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_GetHeaderDataParams, headerInfo)
252
+
253
+ /**
254
+ * \brief Get header data of file.
255
+ *
256
+ * This API must be called once initially while retrieving data from file.
257
+ * \ref Header structure, it gives info about total number
258
+ * of buffers present in the file.
259
+ *
260
+ * \retval CUPTI_UTIL_SUCCESS
261
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if either of pParam or fileHandle is NULL or param struct size is incorrect.
262
+ * \retval CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED file handle is not in good state to read data from file
263
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED failed to read data from file.
264
+ */
265
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilGetHeaderData(CUptiUtil_GetHeaderDataParams *pParams);
266
+
267
+ /**
268
+ * \brief Params for \ref CuptiUtilGetBufferInfo
269
+ */
270
+ typedef struct {
271
+ /**
272
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
273
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
274
+ * available in the structure. Used to preserve backward compatibility.
275
+ */
276
+ size_t size;
277
+ /**
278
+ * File handle.
279
+ */
280
+ std::ifstream *fileHandler;
281
+ /**
282
+ * Buffer Info.
283
+ */
284
+ BufferInfo bufferInfoData;
285
+ } CUptiUtil_GetBufferInfoParams;
286
+ #define CUptiUtil_GetBufferInfoParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_GetBufferInfoParams, bufferInfoData)
287
+
288
+ /**
289
+ * \brief Get buffer info data of file.
290
+ *
291
+ * This API must be called every time before calling CuptiUtilGetPcSampData API.
292
+ * \ref BufferInfo structure, it gives info about recordCount and stallReasonCount
293
+ * of every record in the buffer. This will help to allocate exact buffer to retrieve data into it.
294
+ *
295
+ * \retval CUPTI_UTIL_SUCCESS
296
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if either of pParam or fileHandle is NULL or param struct size is incorrect.
297
+ * \retval CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED file handle is not in good state to read data from file.
298
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED failed to read data from file.
299
+ */
300
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilGetBufferInfo(CUptiUtil_GetBufferInfoParams *pParams);
301
+
302
+ /**
303
+ * \brief Params for \ref CuptiUtilGetPcSampData
304
+ */
305
+ typedef struct {
306
+ /**
307
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
308
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
309
+ * available in the structure. Used to preserve backward compatibility.
310
+ */
311
+ size_t size;
312
+ /**
313
+ * File handle.
314
+ */
315
+ std::ifstream *fileHandler;
316
+ /**
317
+ * Type of buffer to store in file
318
+ */
319
+ PcSamplingBufferType bufferType;
320
+ /**
321
+ * Pointer to collected buffer info using \ref CuptiUtilGetBufferInfo
322
+ */
323
+ BufferInfo *pBufferInfoData;
324
+ /**
325
+ * Pointer to allocated memory to store retrieved data from file.
326
+ */
327
+ void *pSamplingData;
328
+ /**
329
+ * Number of configuration attributes
330
+ */
331
+ size_t numAttributes;
332
+ /**
333
+ * Refer \ref CUpti_PCSamplingConfigurationInfo
334
+ */
335
+ CUpti_PCSamplingConfigurationInfo *pPCSamplingConfigurationInfo;
336
+ /**
337
+ * Refer \ref PcSamplingStallReasons.
338
+ * For stallReasons field of \ref PcSamplingStallReasons it is expected to
339
+ * allocate memory for each string element of array.
340
+ */
341
+ PcSamplingStallReasons *pPcSamplingStallReasons;
342
+ } CUptiUtil_GetPcSampDataParams;
343
+ #define CUptiUtil_GetPcSampDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_GetPcSampDataParams, pPcSamplingStallReasons)
344
+
345
+ /**
346
+ * \brief Retrieve PC sampling data from file into allocated buffer.
347
+ *
348
+ * This API must be called after CuptiUtilGetBufferInfo API.
349
+ * It will retrieve data from file into allocated buffer.
350
+ *
351
+ * \retval CUPTI_UTIL_SUCCESS
352
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if buffer type is invalid
353
+ * or if either of pSampData, pParams is NULL. If pPcSamplingStallReasons is not NULL then
354
+ * error out if either of stallReasonIndex, stallReasons or stallReasons array element pointer is NULL.
355
+ * or filename is empty.
356
+ * \retval CUPTI_UTIL_ERROR_READ_WRITE_OPERATION_FAILED
357
+ * \retval CUPTI_UTIL_ERROR_FILE_HANDLE_CORRUPTED file handle is not in good state to read data from file.
358
+ */
359
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilGetPcSampData(CUptiUtil_GetPcSampDataParams *pParams);
360
+
361
+ /**
362
+ * \brief Params for \ref CuptiUtilMergePcSampData
363
+ */
364
+ typedef struct
365
+ {
366
+ /**
367
+ * Size of the data structure i.e. CUpti_PCSamplingDisableParamsSize
368
+ * CUPTI client should set the size of the structure. It will be used in CUPTI to check what fields are
369
+ * available in the structure. Used to preserve backward compatibility.
370
+ */
371
+ size_t size;
372
+ /**
373
+ * Number of buffers to merge.
374
+ */
375
+ size_t numberOfBuffers;
376
+ /**
377
+ * Pointer to array of buffers to merge
378
+ */
379
+ CUpti_PCSamplingData *PcSampDataBuffer;
380
+ /**
381
+ * Pointer to array of merged buffers as per the range id.
382
+ */
383
+ CUpti_PCSamplingData **MergedPcSampDataBuffers;
384
+ /**
385
+ * Number of merged buffers.
386
+ */
387
+ size_t *numMergedBuffer;
388
+ } CUptiUtil_MergePcSampDataParams;
389
+ #define CUptiUtil_MergePcSampDataParamsSize CUPTI_UTIL_STRUCT_SIZE(CUptiUtil_MergePcSampDataParams, numMergedBuffer)
390
+
391
+ /**
392
+ * \brief Merge PC sampling data range id wise.
393
+ *
394
+ * This API merge PC sampling data range id wise.
395
+ * It allocates memory for merged data and fill data in it
396
+ * and provide buffer pointer in MergedPcSampDataBuffers field.
397
+ * It is expected from user to free merge data buffers after use.
398
+ *
399
+ * \retval CUPTI_UTIL_SUCCESS
400
+ * \retval CUPTI_UTIL_ERROR_INVALID_PARAMETER error out if param struct size is invalid
401
+ * or count of buffers to merge is invalid i.e less than 1
402
+ * or either of PcSampDataBuffer, MergedPcSampDataBuffers, numMergedBuffer is NULL
403
+ * \retval CUPTI_UTIL_ERROR_OUT_OF_MEMORY Unable to allocate memory for merged buffer.
404
+ */
405
+ CUptiUtilResult CUPTIUTILAPI CuptiUtilMergePcSampData(CUptiUtil_MergePcSampDataParams *pParams);
406
+
407
+ /** @} */ /* END CUPTI_PCSAMPLING_UTILITY */
408
+
409
+ } }
410
+
411
+ #if defined(__GNUC__)
412
+ #pragma GCC visibility pop
413
+ #endif
414
+
415
+ #if defined(__cplusplus)
416
+ }
417
+ #endif
418
+
419
+ #endif
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_profiler_target.h ADDED
@@ -0,0 +1,589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2011-2020 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_PROFILER_TARGET_H_)
51
+ #define _CUPTI_PROFILER_TARGET_H_
52
+
53
+ #include <cuda.h>
54
+ #include <cupti_result.h>
55
+ #include <stddef.h>
56
+ #include <stdint.h>
57
+
58
+ #ifdef __cplusplus
59
+ extern "C" {
60
+ #endif
61
+
62
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
63
+ #pragma GCC visibility push(default)
64
+ #endif
65
+
66
+ /**
67
+ * \defgroup CUPTI_PROFILER_API CUPTI Profiling API
68
+ * Functions, types, and enums that implement the CUPTI Profiling API.
69
+ * @{
70
+ */
71
+ #ifndef CUPTI_PROFILER_STRUCT_SIZE
72
+ #define CUPTI_PROFILER_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
73
+ #endif
74
+
75
+ /**
76
+ * \brief Profiler range attribute
77
+ *
78
+ * A metric enabled in the session's configuration is collected separately per unique range-stack in the pass.
79
+ * This is an attribute to collect metrics around each kernel in a profiling session or in an user defined range.
80
+ */
81
+ typedef enum
82
+ {
83
+ /**
84
+ * Invalid value
85
+ */
86
+ CUPTI_Range_INVALID,
87
+ /**
88
+ * Ranges are auto defined around each kernel in a profiling session
89
+ */
90
+ CUPTI_AutoRange,
91
+ /**
92
+ * A range in which metric data to be collected is defined by the user
93
+ */
94
+ CUPTI_UserRange,
95
+ /**
96
+ * Range count
97
+ */
98
+ CUPTI_Range_COUNT,
99
+ } CUpti_ProfilerRange;
100
+
101
+ /**
102
+ * \brief Profiler replay attribute
103
+ *
104
+ * For metrics which require multipass collection, a replay of the GPU kernel(s) is required.
105
+ * This is an attribute which specify how the replay of the kernel(s) to be measured is done.
106
+ */
107
+ typedef enum
108
+ {
109
+ /**
110
+ * Invalid Value
111
+ */
112
+ CUPTI_Replay_INVALID,
113
+ /**
114
+ * Replay is done by CUPTI user around the process
115
+ */
116
+ CUPTI_ApplicationReplay,
117
+ /**
118
+ * Replay is done around kernel implicitly by CUPTI
119
+ */
120
+ CUPTI_KernelReplay,
121
+ /**
122
+ * Replay is done by CUPTI user within a process
123
+ */
124
+ CUPTI_UserReplay,
125
+ /**
126
+ * Replay count
127
+ */
128
+ CUPTI_Replay_COUNT,
129
+ } CUpti_ProfilerReplayMode;
130
+
131
+ /**
132
+ * \brief Default parameter for cuptiProfilerInitialize
133
+ */
134
+ typedef struct CUpti_Profiler_Initialize_Params
135
+ {
136
+ size_t structSize; //!< [in] CUpti_Profiler_Initialize_Params_STRUCT_SIZE
137
+ void* pPriv; //!< [in] assign to NULL
138
+
139
+ } CUpti_Profiler_Initialize_Params;
140
+ #define CUpti_Profiler_Initialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Initialize_Params, pPriv)
141
+
142
+ /**
143
+ * \brief Default parameter for cuptiProfilerDeInitialize
144
+ */
145
+ typedef struct CUpti_Profiler_DeInitialize_Params
146
+ {
147
+ size_t structSize; //!< [in] CUpti_Profiler_DeInitialize_Params_STRUCT_SIZE
148
+ void* pPriv; //!< [in] assign to NULL
149
+
150
+ } CUpti_Profiler_DeInitialize_Params;
151
+ #define CUpti_Profiler_DeInitialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_DeInitialize_Params, pPriv)
152
+
153
+ /**
154
+ * \brief Initializes the profiler interface
155
+ *
156
+ * Loads the required libraries in the process address space.
157
+ * Sets up the hooks with the CUDA driver.
158
+ */
159
+ CUptiResult CUPTIAPI cuptiProfilerInitialize(CUpti_Profiler_Initialize_Params *pParams);
160
+
161
+ /**
162
+ * \brief DeInitializes the profiler interface
163
+ */
164
+ CUptiResult CUPTIAPI cuptiProfilerDeInitialize(CUpti_Profiler_DeInitialize_Params *pParams);
165
+
166
+ /**
167
+ * \brief Input parameter to define the counterDataImage
168
+ */
169
+ typedef struct CUpti_Profiler_CounterDataImageOptions
170
+ {
171
+ size_t structSize; //!< [in] CUpti_Profiler_CounterDataImageOptions_Params_STRUCT_SIZE
172
+ void* pPriv; //!< [in] assign to NULL
173
+
174
+ const uint8_t* pCounterDataPrefix; /**< [in] Address of CounterDataPrefix generated from NVPW_CounterDataBuilder_GetCounterDataPrefix().
175
+ Must be align(8).*/
176
+ size_t counterDataPrefixSize; //!< [in] Size of CounterDataPrefix generated from NVPW_CounterDataBuilder_GetCounterDataPrefix().
177
+ uint32_t maxNumRanges; //!< [in] Maximum number of ranges that can be profiled
178
+ uint32_t maxNumRangeTreeNodes; //!< [in] Maximum number of RangeTree nodes; must be >= maxNumRanges
179
+ uint32_t maxRangeNameLength; //!< [in] Maximum string length of each RangeName, including the trailing NULL character
180
+ } CUpti_Profiler_CounterDataImageOptions;
181
+ #define CUpti_Profiler_CounterDataImageOptions_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImageOptions, maxRangeNameLength)
182
+
183
+ /**
184
+ * \brief Params for cuptiProfilerCounterDataImageCalculateSize
185
+ */
186
+ typedef struct CUpti_Profiler_CounterDataImage_CalculateSize_Params
187
+ {
188
+ size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_CalculateSize_Params_STRUCT_SIZE
189
+ void* pPriv; //!< [in] assign to NULL
190
+
191
+ size_t sizeofCounterDataImageOptions; //!< [in] CUpti_Profiler_CounterDataImageOptions_STRUCT_SIZE
192
+ const CUpti_Profiler_CounterDataImageOptions* pOptions; //!< [in] Pointer to Counter Data Image Options
193
+ size_t counterDataImageSize; //!< [out]
194
+ } CUpti_Profiler_CounterDataImage_CalculateSize_Params;
195
+ #define CUpti_Profiler_CounterDataImage_CalculateSize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_CalculateSize_Params, counterDataImageSize)
196
+
197
+ /**
198
+ * \brief Params for cuptiProfilerCounterDataImageInitialize
199
+ */
200
+ typedef struct CUpti_Profiler_CounterDataImage_Initialize_Params
201
+ {
202
+ size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_Initialize_Params_STRUCT_SIZE
203
+ void* pPriv; //!< [in] assign to NULL
204
+
205
+ size_t sizeofCounterDataImageOptions; //!< [in] CUpti_Profiler_CounterDataImageOptions_STRUCT_SIZE
206
+ const CUpti_Profiler_CounterDataImageOptions* pOptions; //!< [in] Pointer to Counter Data Image Options
207
+ size_t counterDataImageSize; //!< [in] Size calculated from cuptiProfilerCounterDataImageCalculateSize
208
+ uint8_t* pCounterDataImage; //!< [in] The buffer to be initialized.
209
+ } CUpti_Profiler_CounterDataImage_Initialize_Params;
210
+ #define CUpti_Profiler_CounterDataImage_Initialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_Initialize_Params, pCounterDataImage)
211
+
212
+ /**
213
+ * \brief A CounterData image allocates space for values for each counter for each range.
214
+ *
215
+ * User borne the resposibility of managing the counterDataImage allocations.
216
+ * CounterDataPrefix contains meta data about the metrics that will be stored in counterDataImage.
217
+ * Use these APIs to calculate the allocation size and initialize counterData image.
218
+ */
219
+ CUptiResult cuptiProfilerCounterDataImageCalculateSize(CUpti_Profiler_CounterDataImage_CalculateSize_Params* pParams);
220
+ CUptiResult cuptiProfilerCounterDataImageInitialize(CUpti_Profiler_CounterDataImage_Initialize_Params* pParams);
221
+
222
+ /**
223
+ * \brief Params for cuptiProfilerCounterDataImageCalculateScratchBufferSize
224
+ */
225
+ typedef struct CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params
226
+ {
227
+ size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params_STRUCT_SIZE
228
+ void* pPriv; //!< [in] assign to NULL
229
+
230
+ size_t counterDataImageSize; //!< [in] size calculated from cuptiProfilerCounterDataImageCalculateSize
231
+ uint8_t* pCounterDataImage; //!< [in]
232
+ size_t counterDataScratchBufferSize; //!< [out]
233
+ } CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params;
234
+ #define CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params, counterDataScratchBufferSize)
235
+
236
+ /**
237
+ * \brief Params for cuptiProfilerCounterDataImageInitializeScratchBuffer
238
+ */
239
+ typedef struct CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params
240
+ {
241
+ size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params_STRUCT_SIZE
242
+ void* pPriv; //!< [in] assign to NULL
243
+
244
+ size_t counterDataImageSize; //!< [in] size calculated from cuptiProfilerCounterDataImageCalculateSize
245
+ uint8_t* pCounterDataImage; //!< [in]
246
+ size_t counterDataScratchBufferSize; //!< [in] size calculated using cuptiProfilerCounterDataImageCalculateScratchBufferSize
247
+ uint8_t* pCounterDataScratchBuffer; //!< [in] the scratch buffer to be initialized.
248
+ } CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params;
249
+ #define CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params, pCounterDataScratchBuffer)
250
+
251
+ /**
252
+ * \brief A temporary storage for CounterData image needed for internal operations
253
+ *
254
+ * Use these APIs to calculate the allocation size and initialize counterData image scratch buffer.
255
+ */
256
+ CUptiResult cuptiProfilerCounterDataImageCalculateScratchBufferSize(CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params* pParams);
257
+ CUptiResult cuptiProfilerCounterDataImageInitializeScratchBuffer(CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params* pParams);
258
+
259
+ /**
260
+ * \brief Params for cuptiProfilerBeginSession
261
+ */
262
+ typedef struct CUpti_Profiler_BeginSession_Params
263
+ {
264
+ size_t structSize; //!< [in] CUpti_Profiler_BeginSession_Params_STRUCT_SIZE
265
+ void* pPriv; //!< [in] assign to NULL
266
+
267
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
268
+ size_t counterDataImageSize; //!< [in] size calculated from cuptiProfilerCounterDataImageCalculateSize
269
+ uint8_t* pCounterDataImage; //!< [in] address of CounterDataImage
270
+ size_t counterDataScratchBufferSize; //!< [in] size calculated from cuptiProfilerCounterDataImageInitializeScratchBuffer
271
+ uint8_t* pCounterDataScratchBuffer; //!< [in] address of CounterDataImage scratch buffer
272
+ uint8_t bDumpCounterDataInFile; //!< [in] [optional]
273
+ const char* pCounterDataFilePath; //!< [in] [optional]
274
+ CUpti_ProfilerRange range; //!< [in] CUpti_ProfilerRange
275
+ CUpti_ProfilerReplayMode replayMode; //!< [in] CUpti_ProfilerReplayMode
276
+ /* Replay options, required when replay is done by cupti user */
277
+ size_t maxRangesPerPass; //!< [in] Maximum number of ranges that can be recorded in a single pass.
278
+ size_t maxLaunchesPerPass; //!< [in] Maximum number of kernel launches that can be recorded in a single pass; must be >= maxRangesPerPass.
279
+
280
+ } CUpti_Profiler_BeginSession_Params;
281
+ #define CUpti_Profiler_BeginSession_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_BeginSession_Params, maxLaunchesPerPass)
282
+ /**
283
+ * \brief Params for cuptiProfilerEndSession
284
+ */
285
+ typedef struct CUpti_Profiler_EndSession_Params
286
+ {
287
+ size_t structSize; //!< [in] CUpti_Profiler_EndSession_Params_STRUCT_SIZE
288
+ void* pPriv; //!< [in] assign to NULL
289
+
290
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
291
+ } CUpti_Profiler_EndSession_Params;
292
+ #define CUpti_Profiler_EndSession_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_EndSession_Params, ctx)
293
+
294
+ /**
295
+ * \brief Begin profiling session sets up the profiling on the device
296
+ *
297
+ * Although, it doesn't start the profiling but GPU resources needed for profiling are allocated.
298
+ * Outside of a session, the GPU will return to its normal operating state.
299
+ */
300
+ CUptiResult CUPTIAPI cuptiProfilerBeginSession(CUpti_Profiler_BeginSession_Params* pParams);
301
+ /**
302
+ * \brief Ends profiling session
303
+ *
304
+ * Frees up the GPU resources acquired for profiling.
305
+ * Outside of a session, the GPU will return to it's normal operating state.
306
+ */
307
+ CUptiResult CUPTIAPI cuptiProfilerEndSession(CUpti_Profiler_EndSession_Params* pParams);
308
+
309
+ /**
310
+ * \brief Params for cuptiProfilerSetConfig
311
+ */
312
+ typedef struct CUpti_Profiler_SetConfig_Params
313
+ {
314
+ size_t structSize; //!< [in] CUpti_Profiler_SetConfig_Params_STRUCT_SIZE
315
+ void* pPriv; //!< [in] assign to NULL
316
+
317
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
318
+ const uint8_t* pConfig; //!< [in] Config created by NVPW_RawMetricsConfig_GetConfigImage(). Must be align(8).
319
+ size_t configSize; //!< [in] size of config
320
+ uint16_t minNestingLevel; //!< [in] the lowest nesting level to be profiled; must be >= 1
321
+ uint16_t numNestingLevels; //!< [in] the number of nesting levels to profile; must be >= 1
322
+ size_t passIndex; //!< [in] Set this to zero for in-app replay; set this to the output of EndPass() for application replay
323
+ uint16_t targetNestingLevel; //!< [in] Set this to minNestingLevel for in-app replay; set this to the output of EndPass() for application
324
+ } CUpti_Profiler_SetConfig_Params;
325
+
326
+ #define CUpti_Profiler_SetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_SetConfig_Params, targetNestingLevel)
327
+
328
+ /**
329
+ * \brief Params for cuptiProfilerUnsetConfig
330
+ */
331
+ typedef struct CUpti_Profiler_UnsetConfig_Params
332
+ {
333
+ size_t structSize; //!< [in] CUpti_Profiler_UnsetConfig_Params_STRUCT_SIZE
334
+ void* pPriv; //!< [in] assign to NULL
335
+
336
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
337
+ } CUpti_Profiler_UnsetConfig_Params;
338
+ #define CUpti_Profiler_UnsetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_UnsetConfig_Params, ctx)
339
+
340
+ /**
341
+ * \brief Set metrics configuration to be profiled
342
+ *
343
+ * Use these APIs to set the config to profile in a session. It can be used for advanced cases such as where multiple
344
+ * configurations are collected into a single CounterData Image on the need basis, without restarting the session.
345
+ */
346
+ CUptiResult CUPTIAPI cuptiProfilerSetConfig(CUpti_Profiler_SetConfig_Params* pParams);
347
+ /**
348
+ * \brief Unset metrics configuration profiled
349
+ *
350
+ */
351
+ CUptiResult CUPTIAPI cuptiProfilerUnsetConfig(CUpti_Profiler_UnsetConfig_Params* pParams);
352
+
353
+ /**
354
+ * \brief Params for cuptiProfilerBeginPass
355
+ */
356
+ typedef struct CUpti_Profiler_BeginPass_Params
357
+ {
358
+ size_t structSize; //!< [in] CUpti_Profiler_BeginPass_Params_STRUCT_SIZE
359
+ void* pPriv; //!< [in] assign to NULL
360
+
361
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
362
+ } CUpti_Profiler_BeginPass_Params;
363
+ #define CUpti_Profiler_BeginPass_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_BeginPass_Params, ctx)
364
+
365
+ /**
366
+ * \brief Params for cuptiProfilerEndPass
367
+ */
368
+ typedef struct CUpti_Profiler_EndPass_Params
369
+ {
370
+ size_t structSize; //!< [in] CUpti_Profiler_EndPass_Params_STRUCT_SIZE
371
+ void* pPriv; //!< [in] assign to NULL
372
+
373
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
374
+ uint16_t targetNestingLevel; //! [out] The targetNestingLevel that will be collected by the *next* BeginPass.
375
+ size_t passIndex; //!< [out] The passIndex that will be collected by the *next* BeginPass
376
+ uint8_t allPassesSubmitted; //!< [out] becomes true when the last pass has been queued to the GPU
377
+ } CUpti_Profiler_EndPass_Params;
378
+ #define CUpti_Profiler_EndPass_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_EndPass_Params, allPassesSubmitted)
379
+
380
+ /**
381
+ * \brief Replay API: used for multipass collection.
382
+
383
+ * These APIs are used if user chooses to replay by itself \ref CUPTI_UserReplay or \ref CUPTI_ApplicationReplay
384
+ * for multipass collection of the metrics configurations.
385
+ * It's a no-op in case of \ref CUPTI_KernelReplay.
386
+ */
387
+ CUptiResult cuptiProfilerBeginPass(CUpti_Profiler_BeginPass_Params* pParams);
388
+
389
+ /**
390
+ * \brief Replay API: used for multipass collection.
391
+
392
+ * These APIs are used if user chooses to replay by itself \ref CUPTI_UserReplay or \ref CUPTI_ApplicationReplay
393
+ * for multipass collection of the metrics configurations.
394
+ * Its a no-op in case of \ref CUPTI_KernelReplay.
395
+ * Returns information for next pass.
396
+ */
397
+ CUptiResult cuptiProfilerEndPass(CUpti_Profiler_EndPass_Params* pParams);
398
+
399
+ /**
400
+ * \brief Params for cuptiProfilerEnableProfiling
401
+ */
402
+ typedef struct CUpti_Profiler_EnableProfiling_Params
403
+ {
404
+ size_t structSize; //!< [in] CUpti_Profiler_EnableProfiling_Params_STRUCT_SIZE
405
+ void* pPriv; //!< [in] assign to NULL
406
+
407
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
408
+ } CUpti_Profiler_EnableProfiling_Params;
409
+ #define CUpti_Profiler_EnableProfiling_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_EnableProfiling_Params, ctx)
410
+
411
+ /**
412
+ * \brief Params for cuptiProfilerDisableProfiling
413
+ */
414
+ typedef struct CUpti_Profiler_DisableProfiling_Params
415
+ {
416
+ size_t structSize; //!< [in] CUpti_Profiler_DisableProfiling_Params_STRUCT_SIZE
417
+ void* pPriv; //!< [in] assign to NULL
418
+
419
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
420
+ } CUpti_Profiler_DisableProfiling_Params;
421
+ #define CUpti_Profiler_DisableProfiling_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_DisableProfiling_Params, ctx)
422
+
423
+ /**
424
+ * \brief Enables Profiling
425
+ *
426
+ * In \ref CUPTI_AutoRange, these APIs are used to enable/disable profiling for the kernels to be executed in
427
+ * a profiling session.
428
+ */
429
+ CUptiResult CUPTIAPI cuptiProfilerEnableProfiling(CUpti_Profiler_EnableProfiling_Params* pParams);
430
+
431
+ /**
432
+ * \brief Disable Profiling
433
+ *
434
+ * In \ref CUPTI_AutoRange, these APIs are used to enable/disable profiling for the kernels to be executed in
435
+ * a profiling session.
436
+ */
437
+ CUptiResult CUPTIAPI cuptiProfilerDisableProfiling(CUpti_Profiler_DisableProfiling_Params* pParams);
438
+
439
+ /**
440
+ * \brief Params for cuptiProfilerIsPassCollected
441
+ */
442
+ typedef struct CUpti_Profiler_IsPassCollected_Params
443
+ {
444
+ size_t structSize; //!< [in] CUpti_Profiler_IsPassCollected_Params_STRUCT_SIZE
445
+ void* pPriv; //!< [in] assign to NULL
446
+
447
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
448
+ size_t numRangesDropped; //!< [out] number of ranges whose data was dropped in the processed pass
449
+ size_t numTraceBytesDropped; //!< [out] number of bytes not written to TraceBuffer due to buffer full
450
+ uint8_t onePassCollected; //!< [out] true if a pass was successfully decoded
451
+ uint8_t allPassesCollected; //!< [out] becomes true when the last pass has been decoded
452
+ } CUpti_Profiler_IsPassCollected_Params;
453
+ #define CUpti_Profiler_IsPassCollected_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_IsPassCollected_Params, allPassesCollected)
454
+
455
+ /**
456
+ * \brief Asynchronous call to query if the submitted pass to GPU is collected
457
+ *
458
+ */
459
+ CUptiResult CUPTIAPI cuptiProfilerIsPassCollected(CUpti_Profiler_IsPassCollected_Params* pParams);
460
+
461
+ /**
462
+ * \brief Params for cuptiProfilerFlushCounterData
463
+ */
464
+ typedef struct CUpti_Profiler_FlushCounterData_Params
465
+ {
466
+ size_t structSize; //!< [in] CUpti_Profiler_FlushCounterData_Params_STRUCT_SIZE
467
+ void* pPriv; //!< [in] assign to NULL
468
+
469
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
470
+ size_t numRangesDropped; //!< [out] number of ranges whose data was dropped in the processed passes
471
+ size_t numTraceBytesDropped; //!< [out] number of bytes not written to TraceBuffer due to buffer full
472
+ } CUpti_Profiler_FlushCounterData_Params;
473
+ #define CUpti_Profiler_FlushCounterData_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_FlushCounterData_Params, numTraceBytesDropped)
474
+
475
+ /**
476
+ * \brief Decode all the submitted passes
477
+ *
478
+ * Flush Counter data API to ensure every pass is decoded into the counterDataImage passed at beginSession.
479
+ * This will cause the CPU/GPU sync to collect all the undecoded pass.
480
+ */
481
+ CUptiResult CUPTIAPI cuptiProfilerFlushCounterData(CUpti_Profiler_FlushCounterData_Params* pParams);
482
+
483
+ typedef struct CUpti_Profiler_PushRange_Params
484
+ {
485
+ size_t structSize; //!< [in] CUpti_Profiler_PushRange_Params_STRUCT_SIZE
486
+ void* pPriv; //!< [in] assign to NULL
487
+
488
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
489
+ const char* pRangeName; //!< [in] specifies the range for subsequent launches; must not be NULL
490
+ size_t rangeNameLength; //!< [in] assign to strlen(pRangeName) if known; if set to zero, the library will call strlen()
491
+ } CUpti_Profiler_PushRange_Params;
492
+ #define CUpti_Profiler_PushRange_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_PushRange_Params, rangeNameLength)
493
+
494
+ typedef struct CUpti_Profiler_PopRange_Params
495
+ {
496
+ size_t structSize; //!< [in] CUpti_Profiler_PopRange_Params_STRUCT_SIZE
497
+ void* pPriv; //!< [in] assign to NULL
498
+
499
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
500
+ } CUpti_Profiler_PopRange_Params;
501
+ #define CUpti_Profiler_PopRange_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_PopRange_Params, ctx)
502
+
503
+
504
+ /**
505
+ * \brief Range API's : Push user range
506
+ *
507
+ * Counter data is collected per unique range-stack. Identified by a string label passsed by the user.
508
+ * It's an invalid operation in case of \ref CUPTI_AutoRange.
509
+ */
510
+ CUptiResult CUPTIAPI cuptiProfilerPushRange(CUpti_Profiler_PushRange_Params *pParams);
511
+
512
+ /**
513
+ * \brief Range API's : Pop user range
514
+ *
515
+ * Counter data is collected per unique range-stack. Identified by a string label passsed by the user.
516
+ * It's an invalid operation in case of \ref CUPTI_AutoRange.
517
+ */
518
+ CUptiResult CUPTIAPI cuptiProfilerPopRange(CUpti_Profiler_PopRange_Params *pParams);
519
+
520
+ /**
521
+ * \brief Params for cuptiProfilerGetCounterAvailability
522
+ */
523
+ typedef struct CUpti_Profiler_GetCounterAvailability_Params
524
+ {
525
+ size_t structSize; //!< [in] CUpti_Profiler_GetCounterAvailability_Params_STRUCT_SIZE
526
+ void* pPriv; //!< [in] assign to NULL
527
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
528
+ size_t counterAvailabilityImageSize; //!< [in/out] If `pCounterAvailabilityImage` is NULL, then the required size is returned in
529
+ //!< `counterAvailabilityImageSize`, otherwise `counterAvailabilityImageSize` should be set to the size of
530
+ //!< `pCounterAvailabilityImage`, and on return it would be overwritten with number of actual bytes copied
531
+ uint8_t* pCounterAvailabilityImage; //!< [in] buffer receiving counter availability image, may be NULL
532
+ } CUpti_Profiler_GetCounterAvailability_Params;
533
+ #define CUpti_Profiler_GetCounterAvailability_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_GetCounterAvailability_Params, pCounterAvailabilityImage)
534
+
535
+ /**
536
+ * \brief Query counter availibility
537
+ *
538
+ * Use this API to query counter availability information in a buffer which can be used to filter unavailable raw metrics on host.
539
+ * Note: This API may fail, if any profiling or sampling session is active on the specified context or its device.
540
+ */
541
+ CUptiResult CUPTIAPI cuptiProfilerGetCounterAvailability(CUpti_Profiler_GetCounterAvailability_Params *pParams);
542
+
543
+ /// Generic support level enum for CUPTI
544
+ typedef enum
545
+ {
546
+ CUPTI_PROFILER_CONFIGURATION_UNKNOWN = 0, //!< Configuration support level unknown - either detection code errored out before setting this value, or unable to determine it
547
+ CUPTI_PROFILER_CONFIGURATION_UNSUPPORTED, //!< Profiling is unavailable. For specific feature fields, this means that the current configuration of this feature does not work with profiling. For instance, SLI-enabled devices do not support profiling, and this value would be returned for SLI on an SLI-enabled device.
548
+ CUPTI_PROFILER_CONFIGURATION_DISABLED, //!< Profiling would be available for this configuration, but was disabled by the system
549
+ CUPTI_PROFILER_CONFIGURATION_SUPPORTED //!< Profiling is supported. For specific feature fields, this means that the current configuration of this feature works with profiling. For instance, SLI-enabled devices do not support profiling, and this value would only be returned for devices which are not SLI-enabled.
550
+ } CUpti_Profiler_Support_Level;
551
+
552
+ /**
553
+ * \brief Params for cuptiProfilerDeviceSupported
554
+ */
555
+ typedef struct
556
+ {
557
+ size_t structSize; //!< [in] Must be CUpti_Profiler_DeviceSupported_Params_STRUCT_SIZE
558
+ void *pPriv; //!< [in] assign to NULL
559
+ CUdevice cuDevice; //!< [in] if NULL, the current CUcontext is used
560
+
561
+ CUpti_Profiler_Support_Level isSupported; //!< [out] overall SUPPORTED / UNSUPPORTED flag representing whether Profiling and PC Sampling APIs work on the given device and configuration. SUPPORTED if all following flags are SUPPORTED, UNSUPPORTED otherwise.
562
+
563
+ CUpti_Profiler_Support_Level architecture; //!< [out] SUPPORTED if the device architecture level supports the Profiling API (Compute Capability >= 7.0), UNSUPPORTED otherwise
564
+ CUpti_Profiler_Support_Level sli; //!< [out] SUPPORTED if SLI is not enabled, UNSUPPORTED otherwise
565
+ CUpti_Profiler_Support_Level vGpu; //!< [out] SUPPORTED if vGPU is supported and profiling is enabled, DISABLED if profiling is supported but not enabled, UNSUPPORTED otherwise
566
+ CUpti_Profiler_Support_Level confidentialCompute; //!< [out] SUPPORTED if confidential compute is not enabled, UNSUPPORTED otherwise
567
+ CUpti_Profiler_Support_Level cmp; //!< [out] SUPPORTED if not NVIDIA Crypto Mining Processors (CMP), UNSUPPORTED otherwise
568
+ CUpti_Profiler_Support_Level wsl; //!< [out] SUPPORTED if WSL supported, UNSUPPORTED otherwise
569
+ } CUpti_Profiler_DeviceSupported_Params;
570
+ #define CUpti_Profiler_DeviceSupported_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_DeviceSupported_Params, confidentialCompute)
571
+
572
+ /**
573
+ * \brief Query device compatibility with Profiling API
574
+ *
575
+ * Use this call to determine whether a compute device and configuration are compatible with the Profiling API.
576
+ * If the configuration does not support profiling, one of several flags will indicate why.
577
+ */
578
+ CUptiResult CUPTIAPI cuptiProfilerDeviceSupported(CUpti_Profiler_DeviceSupported_Params *pParams);
579
+
580
+ /** @} */ /* END CUPTI_METRIC_API */
581
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
582
+ #pragma GCC visibility pop
583
+ #endif
584
+
585
+ #ifdef __cplusplus
586
+ } /* extern "C" */
587
+ #endif
588
+
589
+ #endif /*_CUPTI_PROFILER_TARGET_H_*/
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_result.h ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_RESULT_H_)
51
+ #define _CUPTI_RESULT_H_
52
+
53
+ #ifndef CUPTIAPI
54
+ #ifdef _WIN32
55
+ #define CUPTIAPI __stdcall
56
+ #else
57
+ #define CUPTIAPI
58
+ #endif
59
+ #endif
60
+
61
+ #if defined(__cplusplus)
62
+ extern "C" {
63
+ #endif
64
+
65
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
66
+ #pragma GCC visibility push(default)
67
+ #endif
68
+
69
+ /**
70
+ * \defgroup CUPTI_RESULT_API CUPTI Result Codes
71
+ * Error and result codes returned by CUPTI functions.
72
+ * @{
73
+ */
74
+
75
+ /**
76
+ * \brief CUPTI result codes.
77
+ *
78
+ * Error and result codes returned by CUPTI functions.
79
+ */
80
+ typedef enum {
81
+ /**
82
+ * No error.
83
+ */
84
+ CUPTI_SUCCESS = 0,
85
+ /**
86
+ * One or more of the parameters is invalid.
87
+ */
88
+ CUPTI_ERROR_INVALID_PARAMETER = 1,
89
+ /**
90
+ * The device does not correspond to a valid CUDA device.
91
+ */
92
+ CUPTI_ERROR_INVALID_DEVICE = 2,
93
+ /**
94
+ * The context is NULL or not valid.
95
+ */
96
+ CUPTI_ERROR_INVALID_CONTEXT = 3,
97
+ /**
98
+ * The event domain id is invalid.
99
+ */
100
+ CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID = 4,
101
+ /**
102
+ * The event id is invalid.
103
+ */
104
+ CUPTI_ERROR_INVALID_EVENT_ID = 5,
105
+ /**
106
+ * The event name is invalid.
107
+ */
108
+ CUPTI_ERROR_INVALID_EVENT_NAME = 6,
109
+ /**
110
+ * The current operation cannot be performed due to dependency on
111
+ * other factors.
112
+ */
113
+ CUPTI_ERROR_INVALID_OPERATION = 7,
114
+ /**
115
+ * Unable to allocate enough memory to perform the requested
116
+ * operation.
117
+ */
118
+ CUPTI_ERROR_OUT_OF_MEMORY = 8,
119
+ /**
120
+ * An error occurred on the performance monitoring hardware.
121
+ */
122
+ CUPTI_ERROR_HARDWARE = 9,
123
+ /**
124
+ * The output buffer size is not sufficient to return all
125
+ * requested data.
126
+ */
127
+ CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT = 10,
128
+ /**
129
+ * API is not implemented.
130
+ */
131
+ CUPTI_ERROR_API_NOT_IMPLEMENTED = 11,
132
+ /**
133
+ * The maximum limit is reached.
134
+ */
135
+ CUPTI_ERROR_MAX_LIMIT_REACHED = 12,
136
+ /**
137
+ * The object is not yet ready to perform the requested operation.
138
+ */
139
+ CUPTI_ERROR_NOT_READY = 13,
140
+ /**
141
+ * The current operation is not compatible with the current state
142
+ * of the object
143
+ */
144
+ CUPTI_ERROR_NOT_COMPATIBLE = 14,
145
+ /**
146
+ * CUPTI is unable to initialize its connection to the CUDA
147
+ * driver.
148
+ */
149
+ CUPTI_ERROR_NOT_INITIALIZED = 15,
150
+ /**
151
+ * The metric id is invalid.
152
+ */
153
+ CUPTI_ERROR_INVALID_METRIC_ID = 16,
154
+ /**
155
+ * The metric name is invalid.
156
+ */
157
+ CUPTI_ERROR_INVALID_METRIC_NAME = 17,
158
+ /**
159
+ * The queue is empty.
160
+ */
161
+ CUPTI_ERROR_QUEUE_EMPTY = 18,
162
+ /**
163
+ * Invalid handle (internal?).
164
+ */
165
+ CUPTI_ERROR_INVALID_HANDLE = 19,
166
+ /**
167
+ * Invalid stream.
168
+ */
169
+ CUPTI_ERROR_INVALID_STREAM = 20,
170
+ /**
171
+ * Invalid kind.
172
+ */
173
+ CUPTI_ERROR_INVALID_KIND = 21,
174
+ /**
175
+ * Invalid event value.
176
+ */
177
+ CUPTI_ERROR_INVALID_EVENT_VALUE = 22,
178
+ /**
179
+ * CUPTI is disabled due to conflicts with other enabled profilers
180
+ */
181
+ CUPTI_ERROR_DISABLED = 23,
182
+ /**
183
+ * Invalid module.
184
+ */
185
+ CUPTI_ERROR_INVALID_MODULE = 24,
186
+ /**
187
+ * Invalid metric value.
188
+ */
189
+ CUPTI_ERROR_INVALID_METRIC_VALUE = 25,
190
+ /**
191
+ * The performance monitoring hardware is in use by other client.
192
+ */
193
+ CUPTI_ERROR_HARDWARE_BUSY = 26,
194
+ /**
195
+ * The attempted operation is not supported on the current
196
+ * system or device.
197
+ */
198
+ CUPTI_ERROR_NOT_SUPPORTED = 27,
199
+ /**
200
+ * Unified memory profiling is not supported on the system.
201
+ * Potential reason could be unsupported OS or architecture.
202
+ */
203
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED = 28,
204
+ /**
205
+ * Unified memory profiling is not supported on the device
206
+ */
207
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_DEVICE = 29,
208
+ /**
209
+ * Unified memory profiling is not supported on a multi-GPU
210
+ * configuration without P2P support between any pair of devices
211
+ */
212
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_NON_P2P_DEVICES = 30,
213
+ /**
214
+ * Unified memory profiling is not supported under the
215
+ * Multi-Process Service (MPS) environment. CUDA 7.5 removes this
216
+ * restriction.
217
+ */
218
+ CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_WITH_MPS = 31,
219
+ /**
220
+ * In CUDA 9.0, devices with compute capability 7.0 don't
221
+ * support CDP tracing
222
+ */
223
+ CUPTI_ERROR_CDP_TRACING_NOT_SUPPORTED = 32,
224
+ /**
225
+ * Profiling on virtualized GPU is not supported.
226
+ */
227
+ CUPTI_ERROR_VIRTUALIZED_DEVICE_NOT_SUPPORTED = 33,
228
+ /**
229
+ * Profiling results might be incorrect for CUDA applications
230
+ * compiled with nvcc version older than 9.0 for devices with
231
+ * compute capability 6.0 and 6.1.
232
+ * Profiling session will continue and CUPTI will notify it using this error code.
233
+ * User is advised to recompile the application code with nvcc version 9.0 or later.
234
+ * Ignore this warning if code is already compiled with the recommended nvcc version.
235
+ */
236
+ CUPTI_ERROR_CUDA_COMPILER_NOT_COMPATIBLE = 34,
237
+ /**
238
+ * User doesn't have sufficient privileges which are required to
239
+ * start the profiling session.
240
+ * One possible reason for this may be that the NVIDIA driver or your system
241
+ * administrator may have restricted access to the NVIDIA GPU performance counters.
242
+ * To learn how to resolve this issue and find more information, please visit
243
+ * https://developer.nvidia.com/CUPTI_ERROR_INSUFFICIENT_PRIVILEGES
244
+ */
245
+ CUPTI_ERROR_INSUFFICIENT_PRIVILEGES = 35,
246
+ /**
247
+ * Legacy CUPTI Profiling API i.e. event API from the header cupti_events.h and
248
+ * metric API from the header cupti_metrics.h are not compatible with the
249
+ * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API
250
+ * in the headers nvperf_host.h and nvperf_target.h.
251
+ */
252
+ CUPTI_ERROR_OLD_PROFILER_API_INITIALIZED = 36,
253
+ /**
254
+ * Missing definition of the OpenACC API routine in the linked OpenACC library.
255
+ *
256
+ * One possible reason is that OpenACC library is linked statically in the
257
+ * user application, which might not have the definition of all the OpenACC
258
+ * API routines needed for the OpenACC profiling, as compiler might ignore
259
+ * definitions for the functions not used in the application. This issue
260
+ * can be mitigated by linking the OpenACC library dynamically.
261
+ */
262
+ CUPTI_ERROR_OPENACC_UNDEFINED_ROUTINE = 37,
263
+ /**
264
+ * Legacy CUPTI Profiling API i.e. event API from the header cupti_events.h and
265
+ * metric API from the header cupti_metrics.h are not supported on devices with
266
+ * compute capability 7.5 and higher (i.e. Turing and later GPU architectures).
267
+ * These API will be deprecated in a future CUDA release. These are replaced by
268
+ * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API
269
+ * in the headers nvperf_host.h and nvperf_target.h.
270
+ */
271
+ CUPTI_ERROR_LEGACY_PROFILER_NOT_SUPPORTED = 38,
272
+ /**
273
+ * CUPTI doesn't allow multiple callback subscribers. Only a single subscriber
274
+ * can be registered at a time.
275
+ * Same error code is used when application is launched using NVIDIA tools
276
+ * like nvprof, Visual Profiler, Nsight Systems, Nsight Compute, cuda-gdb and
277
+ * cuda-memcheck.
278
+ */
279
+ CUPTI_ERROR_MULTIPLE_SUBSCRIBERS_NOT_SUPPORTED = 39,
280
+ /**
281
+ * Profiling on virtualized GPU is not allowed by hypervisor.
282
+ */
283
+ CUPTI_ERROR_VIRTUALIZED_DEVICE_INSUFFICIENT_PRIVILEGES = 40,
284
+ /**
285
+ * Profiling and tracing are not allowed when confidential computing mode
286
+ * is enabled.
287
+ */
288
+ CUPTI_ERROR_CONFIDENTIAL_COMPUTING_NOT_SUPPORTED = 41,
289
+ /**
290
+ * CUPTI does not support NVIDIA Crypto Mining Processors (CMP).
291
+ * For more information, please visit https://developer.nvidia.com/ERR_NVCMPGPU
292
+ */
293
+ CUPTI_ERROR_CMP_DEVICE_NOT_SUPPORTED = 42,
294
+ /**
295
+ * An unknown internal error has occurred.
296
+ */
297
+ CUPTI_ERROR_UNKNOWN = 999,
298
+ CUPTI_ERROR_FORCE_INT = 0x7fffffff
299
+ } CUptiResult;
300
+
301
+ /**
302
+ * \brief Get the descriptive string for a CUptiResult.
303
+ *
304
+ * Return the descriptive string for a CUptiResult in \p *str.
305
+ * \note \b Thread-safety: this function is thread safe.
306
+ *
307
+ * \param result The result to get the string for
308
+ * \param str Returns the string
309
+ *
310
+ * \retval CUPTI_SUCCESS on success
311
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p str is NULL or \p
312
+ * result is not a valid CUptiResult
313
+ */
314
+ CUptiResult CUPTIAPI cuptiGetResultString(CUptiResult result, const char **str);
315
+
316
+ /** @} */ /* END CUPTI_RESULT_API */
317
+
318
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
319
+ #pragma GCC visibility pop
320
+ #endif
321
+
322
+ #if defined(__cplusplus)
323
+ }
324
+ #endif
325
+
326
+ #endif /*_CUPTI_RESULT_H_*/
327
+
328
+
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_target.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #if !defined(_CUPTI_TARGET_H_)
2
+ #define _CUPTI_TARGET_H_
3
+
4
+ /*
5
+ CUPTI profiler target API's
6
+ This file contains the CUPTI profiling API's.
7
+ */
8
+ #include <cupti_result.h>
9
+ #include <stddef.h>
10
+ #include <stdint.h>
11
+
12
+ #ifdef __cplusplus
13
+ extern "C" {
14
+ #endif
15
+
16
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
17
+ #pragma GCC visibility push(default)
18
+ #endif
19
+
20
+ #ifndef CUPTI_PROFILER_STRUCT_SIZE
21
+ #define CUPTI_PROFILER_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
22
+ #endif
23
+
24
+ typedef struct CUpti_Device_GetChipName_Params
25
+ {
26
+ size_t structSize; //!< [in]
27
+ void* pPriv; //!< [in] assign to NULL
28
+
29
+ size_t deviceIndex; //!< [in]
30
+ const char* pChipName; //!< [out]
31
+ } CUpti_Device_GetChipName_Params;
32
+
33
+ #define CUpti_Device_GetChipName_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Device_GetChipName_Params, pChipName)
34
+ CUptiResult CUPTIAPI cuptiDeviceGetChipName(CUpti_Device_GetChipName_Params *pParams);
35
+
36
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
37
+ #pragma GCC visibility pop
38
+ #endif
39
+
40
+ #ifdef __cplusplus
41
+ } /* extern "C" */
42
+ #endif
43
+ #endif
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_version.h ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_VERSION_H_)
51
+ #define _CUPTI_VERSION_H_
52
+
53
+ #include <cuda_stdint.h>
54
+ #include <cupti_result.h>
55
+
56
+ #ifndef CUPTIAPI
57
+ #ifdef _WIN32
58
+ #define CUPTIAPI __stdcall
59
+ #else
60
+ #define CUPTIAPI
61
+ #endif
62
+ #endif
63
+
64
+ #if defined(__cplusplus)
65
+ extern "C" {
66
+ #endif
67
+
68
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
69
+ #pragma GCC visibility push(default)
70
+ #endif
71
+
72
+ /**
73
+ * \defgroup CUPTI_VERSION_API CUPTI Version
74
+ * Function and macro to determine the CUPTI version.
75
+ * @{
76
+ */
77
+
78
+ /**
79
+ * \brief The API version for this implementation of CUPTI.
80
+ *
81
+ * The API version for this implementation of CUPTI. This define along
82
+ * with \ref cuptiGetVersion can be used to dynamically detect if the
83
+ * version of CUPTI compiled against matches the version of the loaded
84
+ * CUPTI library.
85
+ *
86
+ * v1 : CUDAToolsSDK 4.0
87
+ * v2 : CUDAToolsSDK 4.1
88
+ * v3 : CUDA Toolkit 5.0
89
+ * v4 : CUDA Toolkit 5.5
90
+ * v5 : CUDA Toolkit 6.0
91
+ * v6 : CUDA Toolkit 6.5
92
+ * v7 : CUDA Toolkit 6.5(with sm_52 support)
93
+ * v8 : CUDA Toolkit 7.0
94
+ * v9 : CUDA Toolkit 8.0
95
+ * v10 : CUDA Toolkit 9.0
96
+ * v11 : CUDA Toolkit 9.1
97
+ * v12 : CUDA Toolkit 10.0, 10.1 and 10.2
98
+ * v13 : CUDA Toolkit 11.0
99
+ * v14 : CUDA Toolkit 11.1
100
+ * v15 : CUDA Toolkit 11.2, 11.3 and 11.4
101
+ * v16 : CUDA Toolkit 11.5
102
+ * v17 : CUDA Toolkit 11.6
103
+ * v18 : CUDA Toolkit 11.8
104
+ * v19 : CUDA Toolkit 12.0
105
+ */
106
+ #define CUPTI_API_VERSION 18
107
+
108
+ /**
109
+ * \brief Get the CUPTI API version.
110
+ *
111
+ * Return the API version in \p *version.
112
+ *
113
+ * \param version Returns the version
114
+ *
115
+ * \retval CUPTI_SUCCESS on success
116
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p version is NULL
117
+ * \sa CUPTI_API_VERSION
118
+ */
119
+ CUptiResult CUPTIAPI cuptiGetVersion(uint32_t *version);
120
+
121
+ /** @} */ /* END CUPTI_VERSION_API */
122
+
123
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
124
+ #pragma GCC visibility pop
125
+ #endif
126
+
127
+ #if defined(__cplusplus)
128
+ }
129
+ #endif
130
+
131
+ #endif /*_CUPTI_VERSION_H_*/
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudaGL_meta.h ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // Dependent includes
4
+ #ifdef __APPLE__
5
+ #include <OpenGL/gl.h>
6
+ #else
7
+ #include <GL/gl.h>
8
+ #endif
9
+
10
+ // CUDA public interface, for type definitions and cu* function prototypes
11
+ #include "cudaGL.h"
12
+
13
+
14
+ // *************************************************************************
15
+ // Definitions of structs to hold parameters for each function
16
+ // *************************************************************************
17
+
18
+ typedef struct cuGraphicsGLRegisterBuffer_params_st {
19
+ CUgraphicsResource *pCudaResource;
20
+ GLuint buffer;
21
+ unsigned int Flags;
22
+ } cuGraphicsGLRegisterBuffer_params;
23
+
24
+ typedef struct cuGraphicsGLRegisterImage_params_st {
25
+ CUgraphicsResource *pCudaResource;
26
+ GLuint image;
27
+ GLenum target;
28
+ unsigned int Flags;
29
+ } cuGraphicsGLRegisterImage_params;
30
+
31
+ typedef struct cuGLGetDevices_v2_params_st {
32
+ unsigned int *pCudaDeviceCount;
33
+ CUdevice *pCudaDevices;
34
+ unsigned int cudaDeviceCount;
35
+ CUGLDeviceList deviceList;
36
+ } cuGLGetDevices_v2_params;
37
+
38
+ typedef struct cuGLCtxCreate_v2_params_st {
39
+ CUcontext *pCtx;
40
+ unsigned int Flags;
41
+ CUdevice device;
42
+ } cuGLCtxCreate_v2_params;
43
+
44
+ typedef struct cuGLRegisterBufferObject_params_st {
45
+ GLuint buffer;
46
+ } cuGLRegisterBufferObject_params;
47
+
48
+ typedef struct cuGLMapBufferObject_v2_ptds_params_st {
49
+ CUdeviceptr *dptr;
50
+ size_t *size;
51
+ GLuint buffer;
52
+ } cuGLMapBufferObject_v2_ptds_params;
53
+
54
+ typedef struct cuGLUnmapBufferObject_params_st {
55
+ GLuint buffer;
56
+ } cuGLUnmapBufferObject_params;
57
+
58
+ typedef struct cuGLUnregisterBufferObject_params_st {
59
+ GLuint buffer;
60
+ } cuGLUnregisterBufferObject_params;
61
+
62
+ typedef struct cuGLSetBufferObjectMapFlags_params_st {
63
+ GLuint buffer;
64
+ unsigned int Flags;
65
+ } cuGLSetBufferObjectMapFlags_params;
66
+
67
+ typedef struct cuGLMapBufferObjectAsync_v2_ptsz_params_st {
68
+ CUdeviceptr *dptr;
69
+ size_t *size;
70
+ GLuint buffer;
71
+ CUstream hStream;
72
+ } cuGLMapBufferObjectAsync_v2_ptsz_params;
73
+
74
+ typedef struct cuGLUnmapBufferObjectAsync_params_st {
75
+ GLuint buffer;
76
+ CUstream hStream;
77
+ } cuGLUnmapBufferObjectAsync_params;
78
+
79
+ typedef struct cuGLGetDevices_params_st {
80
+ unsigned int *pCudaDeviceCount;
81
+ CUdevice *pCudaDevices;
82
+ unsigned int cudaDeviceCount;
83
+ CUGLDeviceList deviceList;
84
+ } cuGLGetDevices_params;
85
+
86
+ typedef struct cuGLMapBufferObject_v2_params_st {
87
+ CUdeviceptr *dptr;
88
+ size_t *size;
89
+ GLuint buffer;
90
+ } cuGLMapBufferObject_v2_params;
91
+
92
+ typedef struct cuGLMapBufferObjectAsync_v2_params_st {
93
+ CUdeviceptr *dptr;
94
+ size_t *size;
95
+ GLuint buffer;
96
+ CUstream hStream;
97
+ } cuGLMapBufferObjectAsync_v2_params;
98
+
99
+ typedef struct cuGLCtxCreate_params_st {
100
+ CUcontext *pCtx;
101
+ unsigned int Flags;
102
+ CUdevice device;
103
+ } cuGLCtxCreate_params;
104
+
105
+ typedef struct cuGLMapBufferObject_params_st {
106
+ CUdeviceptr_v1 *dptr;
107
+ unsigned int *size;
108
+ GLuint buffer;
109
+ } cuGLMapBufferObject_params;
110
+
111
+ typedef struct cuGLMapBufferObjectAsync_params_st {
112
+ CUdeviceptr_v1 *dptr;
113
+ unsigned int *size;
114
+ GLuint buffer;
115
+ CUstream hStream;
116
+ } cuGLMapBufferObjectAsync_params;
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudaVDPAU_meta.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // Dependent includes
4
+ #include <vdpau/vdpau.h>
5
+
6
+ // CUDA public interface, for type definitions and cu* function prototypes
7
+ #include "cudaVDPAU.h"
8
+
9
+
10
+ // *************************************************************************
11
+ // Definitions of structs to hold parameters for each function
12
+ // *************************************************************************
13
+
14
+ typedef struct cuVDPAUGetDevice_params_st {
15
+ CUdevice *pDevice;
16
+ VdpDevice vdpDevice;
17
+ VdpGetProcAddress *vdpGetProcAddress;
18
+ } cuVDPAUGetDevice_params;
19
+
20
+ typedef struct cuVDPAUCtxCreate_v2_params_st {
21
+ CUcontext *pCtx;
22
+ unsigned int flags;
23
+ CUdevice device;
24
+ VdpDevice vdpDevice;
25
+ VdpGetProcAddress *vdpGetProcAddress;
26
+ } cuVDPAUCtxCreate_v2_params;
27
+
28
+ typedef struct cuGraphicsVDPAURegisterVideoSurface_params_st {
29
+ CUgraphicsResource *pCudaResource;
30
+ VdpVideoSurface vdpSurface;
31
+ unsigned int flags;
32
+ } cuGraphicsVDPAURegisterVideoSurface_params;
33
+
34
+ typedef struct cuGraphicsVDPAURegisterOutputSurface_params_st {
35
+ CUgraphicsResource *pCudaResource;
36
+ VdpOutputSurface vdpSurface;
37
+ unsigned int flags;
38
+ } cuGraphicsVDPAURegisterOutputSurface_params;
39
+
40
+ typedef struct cuVDPAUCtxCreate_params_st {
41
+ CUcontext *pCtx;
42
+ unsigned int flags;
43
+ CUdevice device;
44
+ VdpDevice vdpDevice;
45
+ VdpGetProcAddress *vdpGetProcAddress;
46
+ } cuVDPAUCtxCreate_params;
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_gl_interop_meta.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // CUDA public interface, for type definitions and api function prototypes
4
+ #include "cuda_gl_interop.h"
5
+
6
+ // *************************************************************************
7
+ // Definitions of structs to hold parameters for each function
8
+ // *************************************************************************
9
+
10
+ // Currently used parameter trace structures
11
+ typedef struct cudaGLGetDevices_v4010_params_st {
12
+ unsigned int *pCudaDeviceCount;
13
+ int *pCudaDevices;
14
+ unsigned int cudaDeviceCount;
15
+ enum cudaGLDeviceList deviceList;
16
+ } cudaGLGetDevices_v4010_params;
17
+
18
+ typedef struct cudaGraphicsGLRegisterImage_v3020_params_st {
19
+ struct cudaGraphicsResource **resource;
20
+ GLuint image;
21
+ GLenum target;
22
+ unsigned int flags;
23
+ } cudaGraphicsGLRegisterImage_v3020_params;
24
+
25
+ typedef struct cudaGraphicsGLRegisterBuffer_v3020_params_st {
26
+ struct cudaGraphicsResource **resource;
27
+ GLuint buffer;
28
+ unsigned int flags;
29
+ } cudaGraphicsGLRegisterBuffer_v3020_params;
30
+
31
+ typedef struct cudaGLSetGLDevice_v3020_params_st {
32
+ int device;
33
+ } cudaGLSetGLDevice_v3020_params;
34
+
35
+ typedef struct cudaGLRegisterBufferObject_v3020_params_st {
36
+ GLuint bufObj;
37
+ } cudaGLRegisterBufferObject_v3020_params;
38
+
39
+ typedef struct cudaGLMapBufferObject_v3020_params_st {
40
+ void **devPtr;
41
+ GLuint bufObj;
42
+ } cudaGLMapBufferObject_v3020_params;
43
+
44
+ typedef struct cudaGLUnmapBufferObject_v3020_params_st {
45
+ GLuint bufObj;
46
+ } cudaGLUnmapBufferObject_v3020_params;
47
+
48
+ typedef struct cudaGLUnregisterBufferObject_v3020_params_st {
49
+ GLuint bufObj;
50
+ } cudaGLUnregisterBufferObject_v3020_params;
51
+
52
+ typedef struct cudaGLSetBufferObjectMapFlags_v3020_params_st {
53
+ GLuint bufObj;
54
+ unsigned int flags;
55
+ } cudaGLSetBufferObjectMapFlags_v3020_params;
56
+
57
+ typedef struct cudaGLMapBufferObjectAsync_v3020_params_st {
58
+ void **devPtr;
59
+ GLuint bufObj;
60
+ cudaStream_t stream;
61
+ } cudaGLMapBufferObjectAsync_v3020_params;
62
+
63
+ typedef struct cudaGLUnmapBufferObjectAsync_v3020_params_st {
64
+ GLuint bufObj;
65
+ cudaStream_t stream;
66
+ } cudaGLUnmapBufferObjectAsync_v3020_params;
67
+
68
+ // Parameter trace structures for removed functions
69
+
70
+
71
+ // End of parameter trace structures
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_meta.h ADDED
@@ -0,0 +1,3293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // No dependent includes
4
+
5
+ // CUDA public interface, for type definitions and cu* function prototypes
6
+ #include "cuda.h"
7
+
8
+
9
+ // *************************************************************************
10
+ // Definitions of structs to hold parameters for each function
11
+ // *************************************************************************
12
+
13
+ typedef struct cuGetErrorString_params_st {
14
+ CUresult error;
15
+ const char **pStr;
16
+ } cuGetErrorString_params;
17
+
18
+ typedef struct cuGetErrorName_params_st {
19
+ CUresult error;
20
+ const char **pStr;
21
+ } cuGetErrorName_params;
22
+
23
+ typedef struct cuInit_params_st {
24
+ unsigned int Flags;
25
+ } cuInit_params;
26
+
27
+ typedef struct cuDriverGetVersion_params_st {
28
+ int *driverVersion;
29
+ } cuDriverGetVersion_params;
30
+
31
+ typedef struct cuDeviceGet_params_st {
32
+ CUdevice *device;
33
+ int ordinal;
34
+ } cuDeviceGet_params;
35
+
36
+ typedef struct cuDeviceGetCount_params_st {
37
+ int *count;
38
+ } cuDeviceGetCount_params;
39
+
40
+ typedef struct cuDeviceGetName_params_st {
41
+ char *name;
42
+ int len;
43
+ CUdevice dev;
44
+ } cuDeviceGetName_params;
45
+
46
+ typedef struct cuDeviceGetUuid_params_st {
47
+ CUuuid *uuid;
48
+ CUdevice dev;
49
+ } cuDeviceGetUuid_params;
50
+
51
+ typedef struct cuDeviceGetUuid_v2_params_st {
52
+ CUuuid *uuid;
53
+ CUdevice dev;
54
+ } cuDeviceGetUuid_v2_params;
55
+
56
+ typedef struct cuDeviceGetLuid_params_st {
57
+ char *luid;
58
+ unsigned int *deviceNodeMask;
59
+ CUdevice dev;
60
+ } cuDeviceGetLuid_params;
61
+
62
+ typedef struct cuDeviceTotalMem_v2_params_st {
63
+ size_t *bytes;
64
+ CUdevice dev;
65
+ } cuDeviceTotalMem_v2_params;
66
+
67
+ typedef struct cuDeviceGetTexture1DLinearMaxWidth_params_st {
68
+ size_t *maxWidthInElements;
69
+ CUarray_format format;
70
+ unsigned numChannels;
71
+ CUdevice dev;
72
+ } cuDeviceGetTexture1DLinearMaxWidth_params;
73
+
74
+ typedef struct cuDeviceGetAttribute_params_st {
75
+ int *pi;
76
+ CUdevice_attribute attrib;
77
+ CUdevice dev;
78
+ } cuDeviceGetAttribute_params;
79
+
80
+ typedef struct cuDeviceGetNvSciSyncAttributes_params_st {
81
+ void *nvSciSyncAttrList;
82
+ CUdevice dev;
83
+ int flags;
84
+ } cuDeviceGetNvSciSyncAttributes_params;
85
+
86
+ typedef struct cuDeviceSetMemPool_params_st {
87
+ CUdevice dev;
88
+ CUmemoryPool pool;
89
+ } cuDeviceSetMemPool_params;
90
+
91
+ typedef struct cuDeviceGetMemPool_params_st {
92
+ CUmemoryPool *pool;
93
+ CUdevice dev;
94
+ } cuDeviceGetMemPool_params;
95
+
96
+ typedef struct cuDeviceGetDefaultMemPool_params_st {
97
+ CUmemoryPool *pool_out;
98
+ CUdevice dev;
99
+ } cuDeviceGetDefaultMemPool_params;
100
+
101
+ typedef struct cuDeviceGetExecAffinitySupport_params_st {
102
+ int *pi;
103
+ CUexecAffinityType type;
104
+ CUdevice dev;
105
+ } cuDeviceGetExecAffinitySupport_params;
106
+
107
+ typedef struct cuFlushGPUDirectRDMAWrites_params_st {
108
+ CUflushGPUDirectRDMAWritesTarget target;
109
+ CUflushGPUDirectRDMAWritesScope scope;
110
+ } cuFlushGPUDirectRDMAWrites_params;
111
+
112
+ typedef struct cuDeviceGetProperties_params_st {
113
+ CUdevprop *prop;
114
+ CUdevice dev;
115
+ } cuDeviceGetProperties_params;
116
+
117
+ typedef struct cuDeviceComputeCapability_params_st {
118
+ int *major;
119
+ int *minor;
120
+ CUdevice dev;
121
+ } cuDeviceComputeCapability_params;
122
+
123
+ typedef struct cuDevicePrimaryCtxRetain_params_st {
124
+ CUcontext *pctx;
125
+ CUdevice dev;
126
+ } cuDevicePrimaryCtxRetain_params;
127
+
128
+ typedef struct cuDevicePrimaryCtxRelease_v2_params_st {
129
+ CUdevice dev;
130
+ } cuDevicePrimaryCtxRelease_v2_params;
131
+
132
+ typedef struct cuDevicePrimaryCtxSetFlags_v2_params_st {
133
+ CUdevice dev;
134
+ unsigned int flags;
135
+ } cuDevicePrimaryCtxSetFlags_v2_params;
136
+
137
+ typedef struct cuDevicePrimaryCtxGetState_params_st {
138
+ CUdevice dev;
139
+ unsigned int *flags;
140
+ int *active;
141
+ } cuDevicePrimaryCtxGetState_params;
142
+
143
+ typedef struct cuDevicePrimaryCtxReset_v2_params_st {
144
+ CUdevice dev;
145
+ } cuDevicePrimaryCtxReset_v2_params;
146
+
147
+ typedef struct cuCtxCreate_v2_params_st {
148
+ CUcontext *pctx;
149
+ unsigned int flags;
150
+ CUdevice dev;
151
+ } cuCtxCreate_v2_params;
152
+
153
+ typedef struct cuCtxCreate_v3_params_st {
154
+ CUcontext *pctx;
155
+ CUexecAffinityParam *paramsArray;
156
+ int numParams;
157
+ unsigned int flags;
158
+ CUdevice dev;
159
+ } cuCtxCreate_v3_params;
160
+
161
+ typedef struct cuCtxDestroy_v2_params_st {
162
+ CUcontext ctx;
163
+ } cuCtxDestroy_v2_params;
164
+
165
+ typedef struct cuCtxPushCurrent_v2_params_st {
166
+ CUcontext ctx;
167
+ } cuCtxPushCurrent_v2_params;
168
+
169
+ typedef struct cuCtxPopCurrent_v2_params_st {
170
+ CUcontext *pctx;
171
+ } cuCtxPopCurrent_v2_params;
172
+
173
+ typedef struct cuCtxSetCurrent_params_st {
174
+ CUcontext ctx;
175
+ } cuCtxSetCurrent_params;
176
+
177
+ typedef struct cuCtxGetCurrent_params_st {
178
+ CUcontext *pctx;
179
+ } cuCtxGetCurrent_params;
180
+
181
+ typedef struct cuCtxGetDevice_params_st {
182
+ CUdevice *device;
183
+ } cuCtxGetDevice_params;
184
+
185
+ typedef struct cuCtxGetFlags_params_st {
186
+ unsigned int *flags;
187
+ } cuCtxGetFlags_params;
188
+
189
+ typedef struct cuCtxSetFlags_params_st {
190
+ unsigned int flags;
191
+ } cuCtxSetFlags_params;
192
+
193
+ typedef struct cuCtxGetId_params_st {
194
+ CUcontext ctx;
195
+ unsigned long long *ctxId;
196
+ } cuCtxGetId_params;
197
+
198
+ typedef struct cuCtxSetLimit_params_st {
199
+ CUlimit limit;
200
+ size_t value;
201
+ } cuCtxSetLimit_params;
202
+
203
+ typedef struct cuCtxGetLimit_params_st {
204
+ size_t *pvalue;
205
+ CUlimit limit;
206
+ } cuCtxGetLimit_params;
207
+
208
+ typedef struct cuCtxGetCacheConfig_params_st {
209
+ CUfunc_cache *pconfig;
210
+ } cuCtxGetCacheConfig_params;
211
+
212
+ typedef struct cuCtxSetCacheConfig_params_st {
213
+ CUfunc_cache config;
214
+ } cuCtxSetCacheConfig_params;
215
+
216
+ typedef struct cuCtxGetSharedMemConfig_params_st {
217
+ CUsharedconfig *pConfig;
218
+ } cuCtxGetSharedMemConfig_params;
219
+
220
+ typedef struct cuCtxSetSharedMemConfig_params_st {
221
+ CUsharedconfig config;
222
+ } cuCtxSetSharedMemConfig_params;
223
+
224
+ typedef struct cuCtxGetApiVersion_params_st {
225
+ CUcontext ctx;
226
+ unsigned int *version;
227
+ } cuCtxGetApiVersion_params;
228
+
229
+ typedef struct cuCtxGetStreamPriorityRange_params_st {
230
+ int *leastPriority;
231
+ int *greatestPriority;
232
+ } cuCtxGetStreamPriorityRange_params;
233
+
234
+ typedef struct cuCtxGetExecAffinity_params_st {
235
+ CUexecAffinityParam *pExecAffinity;
236
+ CUexecAffinityType type;
237
+ } cuCtxGetExecAffinity_params;
238
+
239
+ typedef struct cuCtxAttach_params_st {
240
+ CUcontext *pctx;
241
+ unsigned int flags;
242
+ } cuCtxAttach_params;
243
+
244
+ typedef struct cuCtxDetach_params_st {
245
+ CUcontext ctx;
246
+ } cuCtxDetach_params;
247
+
248
+ typedef struct cuModuleLoad_params_st {
249
+ CUmodule *module;
250
+ const char *fname;
251
+ } cuModuleLoad_params;
252
+
253
+ typedef struct cuModuleLoadData_params_st {
254
+ CUmodule *module;
255
+ const void *image;
256
+ } cuModuleLoadData_params;
257
+
258
+ typedef struct cuModuleLoadDataEx_params_st {
259
+ CUmodule *module;
260
+ const void *image;
261
+ unsigned int numOptions;
262
+ CUjit_option *options;
263
+ void **optionValues;
264
+ } cuModuleLoadDataEx_params;
265
+
266
+ typedef struct cuModuleLoadFatBinary_params_st {
267
+ CUmodule *module;
268
+ const void *fatCubin;
269
+ } cuModuleLoadFatBinary_params;
270
+
271
+ typedef struct cuModuleUnload_params_st {
272
+ CUmodule hmod;
273
+ } cuModuleUnload_params;
274
+
275
+ typedef struct cuModuleGetLoadingMode_params_st {
276
+ CUmoduleLoadingMode *mode;
277
+ } cuModuleGetLoadingMode_params;
278
+
279
+ typedef struct cuModuleGetFunction_params_st {
280
+ CUfunction *hfunc;
281
+ CUmodule hmod;
282
+ const char *name;
283
+ } cuModuleGetFunction_params;
284
+
285
+ typedef struct cuModuleGetGlobal_v2_params_st {
286
+ CUdeviceptr *dptr;
287
+ size_t *bytes;
288
+ CUmodule hmod;
289
+ const char *name;
290
+ } cuModuleGetGlobal_v2_params;
291
+
292
+ typedef struct cuLinkCreate_v2_params_st {
293
+ unsigned int numOptions;
294
+ CUjit_option *options;
295
+ void **optionValues;
296
+ CUlinkState *stateOut;
297
+ } cuLinkCreate_v2_params;
298
+
299
+ typedef struct cuLinkAddData_v2_params_st {
300
+ CUlinkState state;
301
+ CUjitInputType type;
302
+ void *data;
303
+ size_t size;
304
+ const char *name;
305
+ unsigned int numOptions;
306
+ CUjit_option *options;
307
+ void **optionValues;
308
+ } cuLinkAddData_v2_params;
309
+
310
+ typedef struct cuLinkAddFile_v2_params_st {
311
+ CUlinkState state;
312
+ CUjitInputType type;
313
+ const char *path;
314
+ unsigned int numOptions;
315
+ CUjit_option *options;
316
+ void **optionValues;
317
+ } cuLinkAddFile_v2_params;
318
+
319
+ typedef struct cuLinkComplete_params_st {
320
+ CUlinkState state;
321
+ void **cubinOut;
322
+ size_t *sizeOut;
323
+ } cuLinkComplete_params;
324
+
325
+ typedef struct cuLinkDestroy_params_st {
326
+ CUlinkState state;
327
+ } cuLinkDestroy_params;
328
+
329
+ typedef struct cuModuleGetTexRef_params_st {
330
+ CUtexref *pTexRef;
331
+ CUmodule hmod;
332
+ const char *name;
333
+ } cuModuleGetTexRef_params;
334
+
335
+ typedef struct cuModuleGetSurfRef_params_st {
336
+ CUsurfref *pSurfRef;
337
+ CUmodule hmod;
338
+ const char *name;
339
+ } cuModuleGetSurfRef_params;
340
+
341
+ typedef struct cuLibraryLoadData_params_st {
342
+ CUlibrary *library;
343
+ const void *code;
344
+ CUjit_option *jitOptions;
345
+ void **jitOptionsValues;
346
+ unsigned int numJitOptions;
347
+ CUlibraryOption *libraryOptions;
348
+ void **libraryOptionValues;
349
+ unsigned int numLibraryOptions;
350
+ } cuLibraryLoadData_params;
351
+
352
+ typedef struct cuLibraryLoadFromFile_params_st {
353
+ CUlibrary *library;
354
+ const char *fileName;
355
+ CUjit_option *jitOptions;
356
+ void **jitOptionsValues;
357
+ unsigned int numJitOptions;
358
+ CUlibraryOption *libraryOptions;
359
+ void **libraryOptionValues;
360
+ unsigned int numLibraryOptions;
361
+ } cuLibraryLoadFromFile_params;
362
+
363
+ typedef struct cuLibraryUnload_params_st {
364
+ CUlibrary library;
365
+ } cuLibraryUnload_params;
366
+
367
+ typedef struct cuLibraryGetKernel_params_st {
368
+ CUkernel *pKernel;
369
+ CUlibrary library;
370
+ const char *name;
371
+ } cuLibraryGetKernel_params;
372
+
373
+ typedef struct cuLibraryGetModule_params_st {
374
+ CUmodule *pMod;
375
+ CUlibrary library;
376
+ } cuLibraryGetModule_params;
377
+
378
+ typedef struct cuKernelGetFunction_params_st {
379
+ CUfunction *pFunc;
380
+ CUkernel kernel;
381
+ } cuKernelGetFunction_params;
382
+
383
+ typedef struct cuLibraryGetGlobal_params_st {
384
+ CUdeviceptr *dptr;
385
+ size_t *bytes;
386
+ CUlibrary library;
387
+ const char *name;
388
+ } cuLibraryGetGlobal_params;
389
+
390
+ typedef struct cuLibraryGetManaged_params_st {
391
+ CUdeviceptr *dptr;
392
+ size_t *bytes;
393
+ CUlibrary library;
394
+ const char *name;
395
+ } cuLibraryGetManaged_params;
396
+
397
+ typedef struct cuLibraryGetUnifiedFunction_params_st {
398
+ void **fptr;
399
+ CUlibrary library;
400
+ const char *symbol;
401
+ } cuLibraryGetUnifiedFunction_params;
402
+
403
+ typedef struct cuKernelGetAttribute_params_st {
404
+ int *pi;
405
+ CUfunction_attribute attrib;
406
+ CUkernel kernel;
407
+ CUdevice dev;
408
+ } cuKernelGetAttribute_params;
409
+
410
+ typedef struct cuKernelSetAttribute_params_st {
411
+ CUfunction_attribute attrib;
412
+ int val;
413
+ CUkernel kernel;
414
+ CUdevice dev;
415
+ } cuKernelSetAttribute_params;
416
+
417
+ typedef struct cuKernelSetCacheConfig_params_st {
418
+ CUkernel kernel;
419
+ CUfunc_cache config;
420
+ CUdevice dev;
421
+ } cuKernelSetCacheConfig_params;
422
+
423
+ typedef struct cuMemGetInfo_v2_params_st {
424
+ size_t *free;
425
+ size_t *total;
426
+ } cuMemGetInfo_v2_params;
427
+
428
+ typedef struct cuMemAlloc_v2_params_st {
429
+ CUdeviceptr *dptr;
430
+ size_t bytesize;
431
+ } cuMemAlloc_v2_params;
432
+
433
+ typedef struct cuMemAllocPitch_v2_params_st {
434
+ CUdeviceptr *dptr;
435
+ size_t *pPitch;
436
+ size_t WidthInBytes;
437
+ size_t Height;
438
+ unsigned int ElementSizeBytes;
439
+ } cuMemAllocPitch_v2_params;
440
+
441
+ typedef struct cuMemFree_v2_params_st {
442
+ CUdeviceptr dptr;
443
+ } cuMemFree_v2_params;
444
+
445
+ typedef struct cuMemGetAddressRange_v2_params_st {
446
+ CUdeviceptr *pbase;
447
+ size_t *psize;
448
+ CUdeviceptr dptr;
449
+ } cuMemGetAddressRange_v2_params;
450
+
451
+ typedef struct cuMemAllocHost_v2_params_st {
452
+ void **pp;
453
+ size_t bytesize;
454
+ } cuMemAllocHost_v2_params;
455
+
456
+ typedef struct cuMemFreeHost_params_st {
457
+ void *p;
458
+ } cuMemFreeHost_params;
459
+
460
+ typedef struct cuMemHostAlloc_params_st {
461
+ void **pp;
462
+ size_t bytesize;
463
+ unsigned int Flags;
464
+ } cuMemHostAlloc_params;
465
+
466
+ typedef struct cuMemHostGetDevicePointer_v2_params_st {
467
+ CUdeviceptr *pdptr;
468
+ void *p;
469
+ unsigned int Flags;
470
+ } cuMemHostGetDevicePointer_v2_params;
471
+
472
+ typedef struct cuMemHostGetFlags_params_st {
473
+ unsigned int *pFlags;
474
+ void *p;
475
+ } cuMemHostGetFlags_params;
476
+
477
+ typedef struct cuMemAllocManaged_params_st {
478
+ CUdeviceptr *dptr;
479
+ size_t bytesize;
480
+ unsigned int flags;
481
+ } cuMemAllocManaged_params;
482
+
483
+ typedef struct cuDeviceGetByPCIBusId_params_st {
484
+ CUdevice *dev;
485
+ const char *pciBusId;
486
+ } cuDeviceGetByPCIBusId_params;
487
+
488
+ typedef struct cuDeviceGetPCIBusId_params_st {
489
+ char *pciBusId;
490
+ int len;
491
+ CUdevice dev;
492
+ } cuDeviceGetPCIBusId_params;
493
+
494
+ typedef struct cuIpcGetEventHandle_params_st {
495
+ CUipcEventHandle *pHandle;
496
+ CUevent event;
497
+ } cuIpcGetEventHandle_params;
498
+
499
+ typedef struct cuIpcOpenEventHandle_params_st {
500
+ CUevent *phEvent;
501
+ CUipcEventHandle handle;
502
+ } cuIpcOpenEventHandle_params;
503
+
504
+ typedef struct cuIpcGetMemHandle_params_st {
505
+ CUipcMemHandle *pHandle;
506
+ CUdeviceptr dptr;
507
+ } cuIpcGetMemHandle_params;
508
+
509
+ typedef struct cuIpcOpenMemHandle_v2_params_st {
510
+ CUdeviceptr *pdptr;
511
+ CUipcMemHandle handle;
512
+ unsigned int Flags;
513
+ } cuIpcOpenMemHandle_v2_params;
514
+
515
+ typedef struct cuIpcCloseMemHandle_params_st {
516
+ CUdeviceptr dptr;
517
+ } cuIpcCloseMemHandle_params;
518
+
519
+ typedef struct cuMemHostRegister_v2_params_st {
520
+ void *p;
521
+ size_t bytesize;
522
+ unsigned int Flags;
523
+ } cuMemHostRegister_v2_params;
524
+
525
+ typedef struct cuMemHostUnregister_params_st {
526
+ void *p;
527
+ } cuMemHostUnregister_params;
528
+
529
+ typedef struct cuMemcpy_ptds_params_st {
530
+ CUdeviceptr dst;
531
+ CUdeviceptr src;
532
+ size_t ByteCount;
533
+ } cuMemcpy_ptds_params;
534
+
535
+ typedef struct cuMemcpyPeer_ptds_params_st {
536
+ CUdeviceptr dstDevice;
537
+ CUcontext dstContext;
538
+ CUdeviceptr srcDevice;
539
+ CUcontext srcContext;
540
+ size_t ByteCount;
541
+ } cuMemcpyPeer_ptds_params;
542
+
543
+ typedef struct cuMemcpyHtoD_v2_ptds_params_st {
544
+ CUdeviceptr dstDevice;
545
+ const void *srcHost;
546
+ size_t ByteCount;
547
+ } cuMemcpyHtoD_v2_ptds_params;
548
+
549
+ typedef struct cuMemcpyDtoH_v2_ptds_params_st {
550
+ void *dstHost;
551
+ CUdeviceptr srcDevice;
552
+ size_t ByteCount;
553
+ } cuMemcpyDtoH_v2_ptds_params;
554
+
555
+ typedef struct cuMemcpyDtoD_v2_ptds_params_st {
556
+ CUdeviceptr dstDevice;
557
+ CUdeviceptr srcDevice;
558
+ size_t ByteCount;
559
+ } cuMemcpyDtoD_v2_ptds_params;
560
+
561
+ typedef struct cuMemcpyDtoA_v2_ptds_params_st {
562
+ CUarray dstArray;
563
+ size_t dstOffset;
564
+ CUdeviceptr srcDevice;
565
+ size_t ByteCount;
566
+ } cuMemcpyDtoA_v2_ptds_params;
567
+
568
+ typedef struct cuMemcpyAtoD_v2_ptds_params_st {
569
+ CUdeviceptr dstDevice;
570
+ CUarray srcArray;
571
+ size_t srcOffset;
572
+ size_t ByteCount;
573
+ } cuMemcpyAtoD_v2_ptds_params;
574
+
575
+ typedef struct cuMemcpyHtoA_v2_ptds_params_st {
576
+ CUarray dstArray;
577
+ size_t dstOffset;
578
+ const void *srcHost;
579
+ size_t ByteCount;
580
+ } cuMemcpyHtoA_v2_ptds_params;
581
+
582
+ typedef struct cuMemcpyAtoH_v2_ptds_params_st {
583
+ void *dstHost;
584
+ CUarray srcArray;
585
+ size_t srcOffset;
586
+ size_t ByteCount;
587
+ } cuMemcpyAtoH_v2_ptds_params;
588
+
589
+ typedef struct cuMemcpyAtoA_v2_ptds_params_st {
590
+ CUarray dstArray;
591
+ size_t dstOffset;
592
+ CUarray srcArray;
593
+ size_t srcOffset;
594
+ size_t ByteCount;
595
+ } cuMemcpyAtoA_v2_ptds_params;
596
+
597
+ typedef struct cuMemcpy2D_v2_ptds_params_st {
598
+ const CUDA_MEMCPY2D *pCopy;
599
+ } cuMemcpy2D_v2_ptds_params;
600
+
601
+ typedef struct cuMemcpy2DUnaligned_v2_ptds_params_st {
602
+ const CUDA_MEMCPY2D *pCopy;
603
+ } cuMemcpy2DUnaligned_v2_ptds_params;
604
+
605
+ typedef struct cuMemcpy3D_v2_ptds_params_st {
606
+ const CUDA_MEMCPY3D *pCopy;
607
+ } cuMemcpy3D_v2_ptds_params;
608
+
609
+ typedef struct cuMemcpy3DPeer_ptds_params_st {
610
+ const CUDA_MEMCPY3D_PEER *pCopy;
611
+ } cuMemcpy3DPeer_ptds_params;
612
+
613
+ typedef struct cuMemcpyAsync_ptsz_params_st {
614
+ CUdeviceptr dst;
615
+ CUdeviceptr src;
616
+ size_t ByteCount;
617
+ CUstream hStream;
618
+ } cuMemcpyAsync_ptsz_params;
619
+
620
+ typedef struct cuMemcpyPeerAsync_ptsz_params_st {
621
+ CUdeviceptr dstDevice;
622
+ CUcontext dstContext;
623
+ CUdeviceptr srcDevice;
624
+ CUcontext srcContext;
625
+ size_t ByteCount;
626
+ CUstream hStream;
627
+ } cuMemcpyPeerAsync_ptsz_params;
628
+
629
+ typedef struct cuMemcpyHtoDAsync_v2_ptsz_params_st {
630
+ CUdeviceptr dstDevice;
631
+ const void *srcHost;
632
+ size_t ByteCount;
633
+ CUstream hStream;
634
+ } cuMemcpyHtoDAsync_v2_ptsz_params;
635
+
636
+ typedef struct cuMemcpyDtoHAsync_v2_ptsz_params_st {
637
+ void *dstHost;
638
+ CUdeviceptr srcDevice;
639
+ size_t ByteCount;
640
+ CUstream hStream;
641
+ } cuMemcpyDtoHAsync_v2_ptsz_params;
642
+
643
+ typedef struct cuMemcpyDtoDAsync_v2_ptsz_params_st {
644
+ CUdeviceptr dstDevice;
645
+ CUdeviceptr srcDevice;
646
+ size_t ByteCount;
647
+ CUstream hStream;
648
+ } cuMemcpyDtoDAsync_v2_ptsz_params;
649
+
650
+ typedef struct cuMemcpyHtoAAsync_v2_ptsz_params_st {
651
+ CUarray dstArray;
652
+ size_t dstOffset;
653
+ const void *srcHost;
654
+ size_t ByteCount;
655
+ CUstream hStream;
656
+ } cuMemcpyHtoAAsync_v2_ptsz_params;
657
+
658
+ typedef struct cuMemcpyAtoHAsync_v2_ptsz_params_st {
659
+ void *dstHost;
660
+ CUarray srcArray;
661
+ size_t srcOffset;
662
+ size_t ByteCount;
663
+ CUstream hStream;
664
+ } cuMemcpyAtoHAsync_v2_ptsz_params;
665
+
666
+ typedef struct cuMemcpy2DAsync_v2_ptsz_params_st {
667
+ const CUDA_MEMCPY2D *pCopy;
668
+ CUstream hStream;
669
+ } cuMemcpy2DAsync_v2_ptsz_params;
670
+
671
+ typedef struct cuMemcpy3DAsync_v2_ptsz_params_st {
672
+ const CUDA_MEMCPY3D *pCopy;
673
+ CUstream hStream;
674
+ } cuMemcpy3DAsync_v2_ptsz_params;
675
+
676
+ typedef struct cuMemcpy3DPeerAsync_ptsz_params_st {
677
+ const CUDA_MEMCPY3D_PEER *pCopy;
678
+ CUstream hStream;
679
+ } cuMemcpy3DPeerAsync_ptsz_params;
680
+
681
+ typedef struct cuMemsetD8_v2_ptds_params_st {
682
+ CUdeviceptr dstDevice;
683
+ unsigned char uc;
684
+ size_t N;
685
+ } cuMemsetD8_v2_ptds_params;
686
+
687
+ typedef struct cuMemsetD16_v2_ptds_params_st {
688
+ CUdeviceptr dstDevice;
689
+ unsigned short us;
690
+ size_t N;
691
+ } cuMemsetD16_v2_ptds_params;
692
+
693
+ typedef struct cuMemsetD32_v2_ptds_params_st {
694
+ CUdeviceptr dstDevice;
695
+ unsigned int ui;
696
+ size_t N;
697
+ } cuMemsetD32_v2_ptds_params;
698
+
699
+ typedef struct cuMemsetD2D8_v2_ptds_params_st {
700
+ CUdeviceptr dstDevice;
701
+ size_t dstPitch;
702
+ unsigned char uc;
703
+ size_t Width;
704
+ size_t Height;
705
+ } cuMemsetD2D8_v2_ptds_params;
706
+
707
+ typedef struct cuMemsetD2D16_v2_ptds_params_st {
708
+ CUdeviceptr dstDevice;
709
+ size_t dstPitch;
710
+ unsigned short us;
711
+ size_t Width;
712
+ size_t Height;
713
+ } cuMemsetD2D16_v2_ptds_params;
714
+
715
+ typedef struct cuMemsetD2D32_v2_ptds_params_st {
716
+ CUdeviceptr dstDevice;
717
+ size_t dstPitch;
718
+ unsigned int ui;
719
+ size_t Width;
720
+ size_t Height;
721
+ } cuMemsetD2D32_v2_ptds_params;
722
+
723
+ typedef struct cuMemsetD8Async_ptsz_params_st {
724
+ CUdeviceptr dstDevice;
725
+ unsigned char uc;
726
+ size_t N;
727
+ CUstream hStream;
728
+ } cuMemsetD8Async_ptsz_params;
729
+
730
+ typedef struct cuMemsetD16Async_ptsz_params_st {
731
+ CUdeviceptr dstDevice;
732
+ unsigned short us;
733
+ size_t N;
734
+ CUstream hStream;
735
+ } cuMemsetD16Async_ptsz_params;
736
+
737
+ typedef struct cuMemsetD32Async_ptsz_params_st {
738
+ CUdeviceptr dstDevice;
739
+ unsigned int ui;
740
+ size_t N;
741
+ CUstream hStream;
742
+ } cuMemsetD32Async_ptsz_params;
743
+
744
+ typedef struct cuMemsetD2D8Async_ptsz_params_st {
745
+ CUdeviceptr dstDevice;
746
+ size_t dstPitch;
747
+ unsigned char uc;
748
+ size_t Width;
749
+ size_t Height;
750
+ CUstream hStream;
751
+ } cuMemsetD2D8Async_ptsz_params;
752
+
753
+ typedef struct cuMemsetD2D16Async_ptsz_params_st {
754
+ CUdeviceptr dstDevice;
755
+ size_t dstPitch;
756
+ unsigned short us;
757
+ size_t Width;
758
+ size_t Height;
759
+ CUstream hStream;
760
+ } cuMemsetD2D16Async_ptsz_params;
761
+
762
+ typedef struct cuMemsetD2D32Async_ptsz_params_st {
763
+ CUdeviceptr dstDevice;
764
+ size_t dstPitch;
765
+ unsigned int ui;
766
+ size_t Width;
767
+ size_t Height;
768
+ CUstream hStream;
769
+ } cuMemsetD2D32Async_ptsz_params;
770
+
771
+ typedef struct cuArrayCreate_v2_params_st {
772
+ CUarray *pHandle;
773
+ const CUDA_ARRAY_DESCRIPTOR *pAllocateArray;
774
+ } cuArrayCreate_v2_params;
775
+
776
+ typedef struct cuArrayGetDescriptor_v2_params_st {
777
+ CUDA_ARRAY_DESCRIPTOR *pArrayDescriptor;
778
+ CUarray hArray;
779
+ } cuArrayGetDescriptor_v2_params;
780
+
781
+ typedef struct cuArrayGetSparseProperties_params_st {
782
+ CUDA_ARRAY_SPARSE_PROPERTIES *sparseProperties;
783
+ CUarray array;
784
+ } cuArrayGetSparseProperties_params;
785
+
786
+ typedef struct cuMipmappedArrayGetSparseProperties_params_st {
787
+ CUDA_ARRAY_SPARSE_PROPERTIES *sparseProperties;
788
+ CUmipmappedArray mipmap;
789
+ } cuMipmappedArrayGetSparseProperties_params;
790
+
791
+ typedef struct cuArrayGetMemoryRequirements_params_st {
792
+ CUDA_ARRAY_MEMORY_REQUIREMENTS *memoryRequirements;
793
+ CUarray array;
794
+ CUdevice device;
795
+ } cuArrayGetMemoryRequirements_params;
796
+
797
+ typedef struct cuMipmappedArrayGetMemoryRequirements_params_st {
798
+ CUDA_ARRAY_MEMORY_REQUIREMENTS *memoryRequirements;
799
+ CUmipmappedArray mipmap;
800
+ CUdevice device;
801
+ } cuMipmappedArrayGetMemoryRequirements_params;
802
+
803
+ typedef struct cuArrayGetPlane_params_st {
804
+ CUarray *pPlaneArray;
805
+ CUarray hArray;
806
+ unsigned int planeIdx;
807
+ } cuArrayGetPlane_params;
808
+
809
+ typedef struct cuArrayDestroy_params_st {
810
+ CUarray hArray;
811
+ } cuArrayDestroy_params;
812
+
813
+ typedef struct cuArray3DCreate_v2_params_st {
814
+ CUarray *pHandle;
815
+ const CUDA_ARRAY3D_DESCRIPTOR *pAllocateArray;
816
+ } cuArray3DCreate_v2_params;
817
+
818
+ typedef struct cuArray3DGetDescriptor_v2_params_st {
819
+ CUDA_ARRAY3D_DESCRIPTOR *pArrayDescriptor;
820
+ CUarray hArray;
821
+ } cuArray3DGetDescriptor_v2_params;
822
+
823
+ typedef struct cuMipmappedArrayCreate_params_st {
824
+ CUmipmappedArray *pHandle;
825
+ const CUDA_ARRAY3D_DESCRIPTOR *pMipmappedArrayDesc;
826
+ unsigned int numMipmapLevels;
827
+ } cuMipmappedArrayCreate_params;
828
+
829
+ typedef struct cuMipmappedArrayGetLevel_params_st {
830
+ CUarray *pLevelArray;
831
+ CUmipmappedArray hMipmappedArray;
832
+ unsigned int level;
833
+ } cuMipmappedArrayGetLevel_params;
834
+
835
+ typedef struct cuMipmappedArrayDestroy_params_st {
836
+ CUmipmappedArray hMipmappedArray;
837
+ } cuMipmappedArrayDestroy_params;
838
+
839
+ typedef struct cuMemGetHandleForAddressRange_params_st {
840
+ void *handle;
841
+ CUdeviceptr dptr;
842
+ size_t size;
843
+ CUmemRangeHandleType handleType;
844
+ unsigned long long flags;
845
+ } cuMemGetHandleForAddressRange_params;
846
+
847
+ typedef struct cuMemAddressReserve_params_st {
848
+ CUdeviceptr *ptr;
849
+ size_t size;
850
+ size_t alignment;
851
+ CUdeviceptr addr;
852
+ unsigned long long flags;
853
+ } cuMemAddressReserve_params;
854
+
855
+ typedef struct cuMemAddressFree_params_st {
856
+ CUdeviceptr ptr;
857
+ size_t size;
858
+ } cuMemAddressFree_params;
859
+
860
+ typedef struct cuMemCreate_params_st {
861
+ CUmemGenericAllocationHandle *handle;
862
+ size_t size;
863
+ const CUmemAllocationProp *prop;
864
+ unsigned long long flags;
865
+ } cuMemCreate_params;
866
+
867
+ typedef struct cuMemRelease_params_st {
868
+ CUmemGenericAllocationHandle handle;
869
+ } cuMemRelease_params;
870
+
871
+ typedef struct cuMemMap_params_st {
872
+ CUdeviceptr ptr;
873
+ size_t size;
874
+ size_t offset;
875
+ CUmemGenericAllocationHandle handle;
876
+ unsigned long long flags;
877
+ } cuMemMap_params;
878
+
879
+ typedef struct cuMemMapArrayAsync_ptsz_params_st {
880
+ CUarrayMapInfo *mapInfoList;
881
+ unsigned int count;
882
+ CUstream hStream;
883
+ } cuMemMapArrayAsync_ptsz_params;
884
+
885
+ typedef struct cuMemUnmap_params_st {
886
+ CUdeviceptr ptr;
887
+ size_t size;
888
+ } cuMemUnmap_params;
889
+
890
+ typedef struct cuMemSetAccess_params_st {
891
+ CUdeviceptr ptr;
892
+ size_t size;
893
+ const CUmemAccessDesc *desc;
894
+ size_t count;
895
+ } cuMemSetAccess_params;
896
+
897
+ typedef struct cuMemGetAccess_params_st {
898
+ unsigned long long *flags;
899
+ const CUmemLocation *location;
900
+ CUdeviceptr ptr;
901
+ } cuMemGetAccess_params;
902
+
903
+ typedef struct cuMemExportToShareableHandle_params_st {
904
+ void *shareableHandle;
905
+ CUmemGenericAllocationHandle handle;
906
+ CUmemAllocationHandleType handleType;
907
+ unsigned long long flags;
908
+ } cuMemExportToShareableHandle_params;
909
+
910
+ typedef struct cuMemImportFromShareableHandle_params_st {
911
+ CUmemGenericAllocationHandle *handle;
912
+ void *osHandle;
913
+ CUmemAllocationHandleType shHandleType;
914
+ } cuMemImportFromShareableHandle_params;
915
+
916
+ typedef struct cuMemGetAllocationGranularity_params_st {
917
+ size_t *granularity;
918
+ const CUmemAllocationProp *prop;
919
+ CUmemAllocationGranularity_flags option;
920
+ } cuMemGetAllocationGranularity_params;
921
+
922
+ typedef struct cuMemGetAllocationPropertiesFromHandle_params_st {
923
+ CUmemAllocationProp *prop;
924
+ CUmemGenericAllocationHandle handle;
925
+ } cuMemGetAllocationPropertiesFromHandle_params;
926
+
927
+ typedef struct cuMemRetainAllocationHandle_params_st {
928
+ CUmemGenericAllocationHandle *handle;
929
+ void *addr;
930
+ } cuMemRetainAllocationHandle_params;
931
+
932
+ typedef struct cuMemFreeAsync_ptsz_params_st {
933
+ CUdeviceptr dptr;
934
+ CUstream hStream;
935
+ } cuMemFreeAsync_ptsz_params;
936
+
937
+ typedef struct cuMemAllocAsync_ptsz_params_st {
938
+ CUdeviceptr *dptr;
939
+ size_t bytesize;
940
+ CUstream hStream;
941
+ } cuMemAllocAsync_ptsz_params;
942
+
943
+ typedef struct cuMemPoolTrimTo_params_st {
944
+ CUmemoryPool pool;
945
+ size_t minBytesToKeep;
946
+ } cuMemPoolTrimTo_params;
947
+
948
+ typedef struct cuMemPoolSetAttribute_params_st {
949
+ CUmemoryPool pool;
950
+ CUmemPool_attribute attr;
951
+ void *value;
952
+ } cuMemPoolSetAttribute_params;
953
+
954
+ typedef struct cuMemPoolGetAttribute_params_st {
955
+ CUmemoryPool pool;
956
+ CUmemPool_attribute attr;
957
+ void *value;
958
+ } cuMemPoolGetAttribute_params;
959
+
960
+ typedef struct cuMemPoolSetAccess_params_st {
961
+ CUmemoryPool pool;
962
+ const CUmemAccessDesc *map;
963
+ size_t count;
964
+ } cuMemPoolSetAccess_params;
965
+
966
+ typedef struct cuMemPoolGetAccess_params_st {
967
+ CUmemAccess_flags *flags;
968
+ CUmemoryPool memPool;
969
+ CUmemLocation *location;
970
+ } cuMemPoolGetAccess_params;
971
+
972
+ typedef struct cuMemPoolCreate_params_st {
973
+ CUmemoryPool *pool;
974
+ const CUmemPoolProps *poolProps;
975
+ } cuMemPoolCreate_params;
976
+
977
+ typedef struct cuMemPoolDestroy_params_st {
978
+ CUmemoryPool pool;
979
+ } cuMemPoolDestroy_params;
980
+
981
+ typedef struct cuMemAllocFromPoolAsync_ptsz_params_st {
982
+ CUdeviceptr *dptr;
983
+ size_t bytesize;
984
+ CUmemoryPool pool;
985
+ CUstream hStream;
986
+ } cuMemAllocFromPoolAsync_ptsz_params;
987
+
988
+ typedef struct cuMemPoolExportToShareableHandle_params_st {
989
+ void *handle_out;
990
+ CUmemoryPool pool;
991
+ CUmemAllocationHandleType handleType;
992
+ unsigned long long flags;
993
+ } cuMemPoolExportToShareableHandle_params;
994
+
995
+ typedef struct cuMemPoolImportFromShareableHandle_params_st {
996
+ CUmemoryPool *pool_out;
997
+ void *handle;
998
+ CUmemAllocationHandleType handleType;
999
+ unsigned long long flags;
1000
+ } cuMemPoolImportFromShareableHandle_params;
1001
+
1002
+ typedef struct cuMemPoolExportPointer_params_st {
1003
+ CUmemPoolPtrExportData *shareData_out;
1004
+ CUdeviceptr ptr;
1005
+ } cuMemPoolExportPointer_params;
1006
+
1007
+ typedef struct cuMemPoolImportPointer_params_st {
1008
+ CUdeviceptr *ptr_out;
1009
+ CUmemoryPool pool;
1010
+ CUmemPoolPtrExportData *shareData;
1011
+ } cuMemPoolImportPointer_params;
1012
+
1013
+ typedef struct cuMulticastCreate_params_st {
1014
+ CUmemGenericAllocationHandle *mcHandle;
1015
+ const CUmulticastObjectProp *prop;
1016
+ } cuMulticastCreate_params;
1017
+
1018
+ typedef struct cuMulticastAddDevice_params_st {
1019
+ CUmemGenericAllocationHandle mcHandle;
1020
+ CUdevice dev;
1021
+ } cuMulticastAddDevice_params;
1022
+
1023
+ typedef struct cuMulticastBindMem_params_st {
1024
+ CUmemGenericAllocationHandle mcHandle;
1025
+ size_t mcOffset;
1026
+ CUmemGenericAllocationHandle memHandle;
1027
+ size_t memOffset;
1028
+ size_t size;
1029
+ unsigned long long flags;
1030
+ } cuMulticastBindMem_params;
1031
+
1032
+ typedef struct cuMulticastBindAddr_params_st {
1033
+ CUmemGenericAllocationHandle mcHandle;
1034
+ size_t mcOffset;
1035
+ CUdeviceptr memptr;
1036
+ size_t size;
1037
+ unsigned long long flags;
1038
+ } cuMulticastBindAddr_params;
1039
+
1040
+ typedef struct cuMulticastUnbind_params_st {
1041
+ CUmemGenericAllocationHandle mcHandle;
1042
+ CUdevice dev;
1043
+ size_t mcOffset;
1044
+ size_t size;
1045
+ } cuMulticastUnbind_params;
1046
+
1047
+ typedef struct cuMulticastGetGranularity_params_st {
1048
+ size_t *granularity;
1049
+ const CUmulticastObjectProp *prop;
1050
+ CUmulticastGranularity_flags option;
1051
+ } cuMulticastGetGranularity_params;
1052
+
1053
+ typedef struct cuPointerGetAttribute_params_st {
1054
+ void *data;
1055
+ CUpointer_attribute attribute;
1056
+ CUdeviceptr ptr;
1057
+ } cuPointerGetAttribute_params;
1058
+
1059
+ typedef struct cuMemPrefetchAsync_ptsz_params_st {
1060
+ CUdeviceptr devPtr;
1061
+ size_t count;
1062
+ CUdevice dstDevice;
1063
+ CUstream hStream;
1064
+ } cuMemPrefetchAsync_ptsz_params;
1065
+
1066
+ typedef struct cuMemAdvise_params_st {
1067
+ CUdeviceptr devPtr;
1068
+ size_t count;
1069
+ CUmem_advise advice;
1070
+ CUdevice device;
1071
+ } cuMemAdvise_params;
1072
+
1073
+ typedef struct cuMemRangeGetAttribute_params_st {
1074
+ void *data;
1075
+ size_t dataSize;
1076
+ CUmem_range_attribute attribute;
1077
+ CUdeviceptr devPtr;
1078
+ size_t count;
1079
+ } cuMemRangeGetAttribute_params;
1080
+
1081
+ typedef struct cuMemRangeGetAttributes_params_st {
1082
+ void **data;
1083
+ size_t *dataSizes;
1084
+ CUmem_range_attribute *attributes;
1085
+ size_t numAttributes;
1086
+ CUdeviceptr devPtr;
1087
+ size_t count;
1088
+ } cuMemRangeGetAttributes_params;
1089
+
1090
+ typedef struct cuPointerSetAttribute_params_st {
1091
+ const void *value;
1092
+ CUpointer_attribute attribute;
1093
+ CUdeviceptr ptr;
1094
+ } cuPointerSetAttribute_params;
1095
+
1096
+ typedef struct cuPointerGetAttributes_params_st {
1097
+ unsigned int numAttributes;
1098
+ CUpointer_attribute *attributes;
1099
+ void **data;
1100
+ CUdeviceptr ptr;
1101
+ } cuPointerGetAttributes_params;
1102
+
1103
+ typedef struct cuStreamCreate_params_st {
1104
+ CUstream *phStream;
1105
+ unsigned int Flags;
1106
+ } cuStreamCreate_params;
1107
+
1108
+ typedef struct cuStreamCreateWithPriority_params_st {
1109
+ CUstream *phStream;
1110
+ unsigned int flags;
1111
+ int priority;
1112
+ } cuStreamCreateWithPriority_params;
1113
+
1114
+ typedef struct cuStreamGetPriority_ptsz_params_st {
1115
+ CUstream hStream;
1116
+ int *priority;
1117
+ } cuStreamGetPriority_ptsz_params;
1118
+
1119
+ typedef struct cuStreamGetFlags_ptsz_params_st {
1120
+ CUstream hStream;
1121
+ unsigned int *flags;
1122
+ } cuStreamGetFlags_ptsz_params;
1123
+
1124
+ typedef struct cuStreamGetId_ptsz_params_st {
1125
+ CUstream hStream;
1126
+ unsigned long long *streamId;
1127
+ } cuStreamGetId_ptsz_params;
1128
+
1129
+ typedef struct cuStreamGetCtx_ptsz_params_st {
1130
+ CUstream hStream;
1131
+ CUcontext *pctx;
1132
+ } cuStreamGetCtx_ptsz_params;
1133
+
1134
+ typedef struct cuStreamWaitEvent_ptsz_params_st {
1135
+ CUstream hStream;
1136
+ CUevent hEvent;
1137
+ unsigned int Flags;
1138
+ } cuStreamWaitEvent_ptsz_params;
1139
+
1140
+ typedef struct cuStreamAddCallback_ptsz_params_st {
1141
+ CUstream hStream;
1142
+ CUstreamCallback callback;
1143
+ void *userData;
1144
+ unsigned int flags;
1145
+ } cuStreamAddCallback_ptsz_params;
1146
+
1147
+ typedef struct cuStreamBeginCapture_v2_ptsz_params_st {
1148
+ CUstream hStream;
1149
+ CUstreamCaptureMode mode;
1150
+ } cuStreamBeginCapture_v2_ptsz_params;
1151
+
1152
+ typedef struct cuThreadExchangeStreamCaptureMode_params_st {
1153
+ CUstreamCaptureMode *mode;
1154
+ } cuThreadExchangeStreamCaptureMode_params;
1155
+
1156
+ typedef struct cuStreamEndCapture_ptsz_params_st {
1157
+ CUstream hStream;
1158
+ CUgraph *phGraph;
1159
+ } cuStreamEndCapture_ptsz_params;
1160
+
1161
+ typedef struct cuStreamIsCapturing_ptsz_params_st {
1162
+ CUstream hStream;
1163
+ CUstreamCaptureStatus *captureStatus;
1164
+ } cuStreamIsCapturing_ptsz_params;
1165
+
1166
+ typedef struct cuStreamGetCaptureInfo_v2_ptsz_params_st {
1167
+ CUstream hStream;
1168
+ CUstreamCaptureStatus *captureStatus_out;
1169
+ cuuint64_t *id_out;
1170
+ CUgraph *graph_out;
1171
+ const CUgraphNode **dependencies_out;
1172
+ size_t *numDependencies_out;
1173
+ } cuStreamGetCaptureInfo_v2_ptsz_params;
1174
+
1175
+ typedef struct cuStreamUpdateCaptureDependencies_ptsz_params_st {
1176
+ CUstream hStream;
1177
+ CUgraphNode *dependencies;
1178
+ size_t numDependencies;
1179
+ unsigned int flags;
1180
+ } cuStreamUpdateCaptureDependencies_ptsz_params;
1181
+
1182
+ typedef struct cuStreamAttachMemAsync_ptsz_params_st {
1183
+ CUstream hStream;
1184
+ CUdeviceptr dptr;
1185
+ size_t length;
1186
+ unsigned int flags;
1187
+ } cuStreamAttachMemAsync_ptsz_params;
1188
+
1189
+ typedef struct cuStreamQuery_ptsz_params_st {
1190
+ CUstream hStream;
1191
+ } cuStreamQuery_ptsz_params;
1192
+
1193
+ typedef struct cuStreamSynchronize_ptsz_params_st {
1194
+ CUstream hStream;
1195
+ } cuStreamSynchronize_ptsz_params;
1196
+
1197
+ typedef struct cuStreamDestroy_v2_params_st {
1198
+ CUstream hStream;
1199
+ } cuStreamDestroy_v2_params;
1200
+
1201
+ typedef struct cuStreamCopyAttributes_ptsz_params_st {
1202
+ CUstream dst;
1203
+ CUstream src;
1204
+ } cuStreamCopyAttributes_ptsz_params;
1205
+
1206
+ typedef struct cuStreamGetAttribute_ptsz_params_st {
1207
+ CUstream hStream;
1208
+ CUstreamAttrID attr;
1209
+ CUstreamAttrValue *value_out;
1210
+ } cuStreamGetAttribute_ptsz_params;
1211
+
1212
+ typedef struct cuStreamSetAttribute_ptsz_params_st {
1213
+ CUstream hStream;
1214
+ CUstreamAttrID attr;
1215
+ const CUstreamAttrValue *value;
1216
+ } cuStreamSetAttribute_ptsz_params;
1217
+
1218
+ typedef struct cuEventCreate_params_st {
1219
+ CUevent *phEvent;
1220
+ unsigned int Flags;
1221
+ } cuEventCreate_params;
1222
+
1223
+ typedef struct cuEventRecord_ptsz_params_st {
1224
+ CUevent hEvent;
1225
+ CUstream hStream;
1226
+ } cuEventRecord_ptsz_params;
1227
+
1228
+ typedef struct cuEventRecordWithFlags_ptsz_params_st {
1229
+ CUevent hEvent;
1230
+ CUstream hStream;
1231
+ unsigned int flags;
1232
+ } cuEventRecordWithFlags_ptsz_params;
1233
+
1234
+ typedef struct cuEventQuery_params_st {
1235
+ CUevent hEvent;
1236
+ } cuEventQuery_params;
1237
+
1238
+ typedef struct cuEventSynchronize_params_st {
1239
+ CUevent hEvent;
1240
+ } cuEventSynchronize_params;
1241
+
1242
+ typedef struct cuEventDestroy_v2_params_st {
1243
+ CUevent hEvent;
1244
+ } cuEventDestroy_v2_params;
1245
+
1246
+ typedef struct cuEventElapsedTime_params_st {
1247
+ float *pMilliseconds;
1248
+ CUevent hStart;
1249
+ CUevent hEnd;
1250
+ } cuEventElapsedTime_params;
1251
+
1252
+ typedef struct cuImportExternalMemory_params_st {
1253
+ CUexternalMemory *extMem_out;
1254
+ const CUDA_EXTERNAL_MEMORY_HANDLE_DESC *memHandleDesc;
1255
+ } cuImportExternalMemory_params;
1256
+
1257
+ typedef struct cuExternalMemoryGetMappedBuffer_params_st {
1258
+ CUdeviceptr *devPtr;
1259
+ CUexternalMemory extMem;
1260
+ const CUDA_EXTERNAL_MEMORY_BUFFER_DESC *bufferDesc;
1261
+ } cuExternalMemoryGetMappedBuffer_params;
1262
+
1263
+ typedef struct cuExternalMemoryGetMappedMipmappedArray_params_st {
1264
+ CUmipmappedArray *mipmap;
1265
+ CUexternalMemory extMem;
1266
+ const CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC *mipmapDesc;
1267
+ } cuExternalMemoryGetMappedMipmappedArray_params;
1268
+
1269
+ typedef struct cuDestroyExternalMemory_params_st {
1270
+ CUexternalMemory extMem;
1271
+ } cuDestroyExternalMemory_params;
1272
+
1273
+ typedef struct cuImportExternalSemaphore_params_st {
1274
+ CUexternalSemaphore *extSem_out;
1275
+ const CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC *semHandleDesc;
1276
+ } cuImportExternalSemaphore_params;
1277
+
1278
+ typedef struct cuSignalExternalSemaphoresAsync_ptsz_params_st {
1279
+ const CUexternalSemaphore *extSemArray;
1280
+ const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS *paramsArray;
1281
+ unsigned int numExtSems;
1282
+ CUstream stream;
1283
+ } cuSignalExternalSemaphoresAsync_ptsz_params;
1284
+
1285
+ typedef struct cuWaitExternalSemaphoresAsync_ptsz_params_st {
1286
+ const CUexternalSemaphore *extSemArray;
1287
+ const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS *paramsArray;
1288
+ unsigned int numExtSems;
1289
+ CUstream stream;
1290
+ } cuWaitExternalSemaphoresAsync_ptsz_params;
1291
+
1292
+ typedef struct cuDestroyExternalSemaphore_params_st {
1293
+ CUexternalSemaphore extSem;
1294
+ } cuDestroyExternalSemaphore_params;
1295
+
1296
+ typedef struct cuStreamWaitValue32_v2_ptsz_params_st {
1297
+ CUstream stream;
1298
+ CUdeviceptr addr;
1299
+ cuuint32_t value;
1300
+ unsigned int flags;
1301
+ } cuStreamWaitValue32_v2_ptsz_params;
1302
+
1303
+ typedef struct cuStreamWaitValue64_v2_ptsz_params_st {
1304
+ CUstream stream;
1305
+ CUdeviceptr addr;
1306
+ cuuint64_t value;
1307
+ unsigned int flags;
1308
+ } cuStreamWaitValue64_v2_ptsz_params;
1309
+
1310
+ typedef struct cuStreamWriteValue32_v2_ptsz_params_st {
1311
+ CUstream stream;
1312
+ CUdeviceptr addr;
1313
+ cuuint32_t value;
1314
+ unsigned int flags;
1315
+ } cuStreamWriteValue32_v2_ptsz_params;
1316
+
1317
+ typedef struct cuStreamWriteValue64_v2_ptsz_params_st {
1318
+ CUstream stream;
1319
+ CUdeviceptr addr;
1320
+ cuuint64_t value;
1321
+ unsigned int flags;
1322
+ } cuStreamWriteValue64_v2_ptsz_params;
1323
+
1324
+ typedef struct cuStreamBatchMemOp_v2_ptsz_params_st {
1325
+ CUstream stream;
1326
+ unsigned int count;
1327
+ CUstreamBatchMemOpParams *paramArray;
1328
+ unsigned int flags;
1329
+ } cuStreamBatchMemOp_v2_ptsz_params;
1330
+
1331
+ typedef struct cuFuncGetAttribute_params_st {
1332
+ int *pi;
1333
+ CUfunction_attribute attrib;
1334
+ CUfunction hfunc;
1335
+ } cuFuncGetAttribute_params;
1336
+
1337
+ typedef struct cuFuncSetAttribute_params_st {
1338
+ CUfunction hfunc;
1339
+ CUfunction_attribute attrib;
1340
+ int value;
1341
+ } cuFuncSetAttribute_params;
1342
+
1343
+ typedef struct cuFuncSetCacheConfig_params_st {
1344
+ CUfunction hfunc;
1345
+ CUfunc_cache config;
1346
+ } cuFuncSetCacheConfig_params;
1347
+
1348
+ typedef struct cuFuncSetSharedMemConfig_params_st {
1349
+ CUfunction hfunc;
1350
+ CUsharedconfig config;
1351
+ } cuFuncSetSharedMemConfig_params;
1352
+
1353
+ typedef struct cuFuncGetModule_params_st {
1354
+ CUmodule *hmod;
1355
+ CUfunction hfunc;
1356
+ } cuFuncGetModule_params;
1357
+
1358
+ typedef struct cuLaunchKernel_ptsz_params_st {
1359
+ CUfunction f;
1360
+ unsigned int gridDimX;
1361
+ unsigned int gridDimY;
1362
+ unsigned int gridDimZ;
1363
+ unsigned int blockDimX;
1364
+ unsigned int blockDimY;
1365
+ unsigned int blockDimZ;
1366
+ unsigned int sharedMemBytes;
1367
+ CUstream hStream;
1368
+ void **kernelParams;
1369
+ void **extra;
1370
+ } cuLaunchKernel_ptsz_params;
1371
+
1372
+ typedef struct cuLaunchKernelEx_ptsz_params_st {
1373
+ const CUlaunchConfig *config;
1374
+ CUfunction f;
1375
+ void **kernelParams;
1376
+ void **extra;
1377
+ } cuLaunchKernelEx_ptsz_params;
1378
+
1379
+ typedef struct cuLaunchCooperativeKernel_ptsz_params_st {
1380
+ CUfunction f;
1381
+ unsigned int gridDimX;
1382
+ unsigned int gridDimY;
1383
+ unsigned int gridDimZ;
1384
+ unsigned int blockDimX;
1385
+ unsigned int blockDimY;
1386
+ unsigned int blockDimZ;
1387
+ unsigned int sharedMemBytes;
1388
+ CUstream hStream;
1389
+ void **kernelParams;
1390
+ } cuLaunchCooperativeKernel_ptsz_params;
1391
+
1392
+ typedef struct cuLaunchCooperativeKernelMultiDevice_params_st {
1393
+ CUDA_LAUNCH_PARAMS *launchParamsList;
1394
+ unsigned int numDevices;
1395
+ unsigned int flags;
1396
+ } cuLaunchCooperativeKernelMultiDevice_params;
1397
+
1398
+ typedef struct cuLaunchHostFunc_ptsz_params_st {
1399
+ CUstream hStream;
1400
+ CUhostFn fn;
1401
+ void *userData;
1402
+ } cuLaunchHostFunc_ptsz_params;
1403
+
1404
+ typedef struct cuFuncSetBlockShape_params_st {
1405
+ CUfunction hfunc;
1406
+ int x;
1407
+ int y;
1408
+ int z;
1409
+ } cuFuncSetBlockShape_params;
1410
+
1411
+ typedef struct cuFuncSetSharedSize_params_st {
1412
+ CUfunction hfunc;
1413
+ unsigned int bytes;
1414
+ } cuFuncSetSharedSize_params;
1415
+
1416
+ typedef struct cuParamSetSize_params_st {
1417
+ CUfunction hfunc;
1418
+ unsigned int numbytes;
1419
+ } cuParamSetSize_params;
1420
+
1421
+ typedef struct cuParamSeti_params_st {
1422
+ CUfunction hfunc;
1423
+ int offset;
1424
+ unsigned int value;
1425
+ } cuParamSeti_params;
1426
+
1427
+ typedef struct cuParamSetf_params_st {
1428
+ CUfunction hfunc;
1429
+ int offset;
1430
+ float value;
1431
+ } cuParamSetf_params;
1432
+
1433
+ typedef struct cuParamSetv_params_st {
1434
+ CUfunction hfunc;
1435
+ int offset;
1436
+ void *ptr;
1437
+ unsigned int numbytes;
1438
+ } cuParamSetv_params;
1439
+
1440
+ typedef struct cuLaunch_params_st {
1441
+ CUfunction f;
1442
+ } cuLaunch_params;
1443
+
1444
+ typedef struct cuLaunchGrid_params_st {
1445
+ CUfunction f;
1446
+ int grid_width;
1447
+ int grid_height;
1448
+ } cuLaunchGrid_params;
1449
+
1450
+ typedef struct cuLaunchGridAsync_params_st {
1451
+ CUfunction f;
1452
+ int grid_width;
1453
+ int grid_height;
1454
+ CUstream hStream;
1455
+ } cuLaunchGridAsync_params;
1456
+
1457
+ typedef struct cuParamSetTexRef_params_st {
1458
+ CUfunction hfunc;
1459
+ int texunit;
1460
+ CUtexref hTexRef;
1461
+ } cuParamSetTexRef_params;
1462
+
1463
+ typedef struct cuGraphCreate_params_st {
1464
+ CUgraph *phGraph;
1465
+ unsigned int flags;
1466
+ } cuGraphCreate_params;
1467
+
1468
+ typedef struct cuGraphAddKernelNode_v2_params_st {
1469
+ CUgraphNode *phGraphNode;
1470
+ CUgraph hGraph;
1471
+ const CUgraphNode *dependencies;
1472
+ size_t numDependencies;
1473
+ const CUDA_KERNEL_NODE_PARAMS *nodeParams;
1474
+ } cuGraphAddKernelNode_v2_params;
1475
+
1476
+ typedef struct cuGraphKernelNodeGetParams_v2_params_st {
1477
+ CUgraphNode hNode;
1478
+ CUDA_KERNEL_NODE_PARAMS *nodeParams;
1479
+ } cuGraphKernelNodeGetParams_v2_params;
1480
+
1481
+ typedef struct cuGraphKernelNodeSetParams_v2_params_st {
1482
+ CUgraphNode hNode;
1483
+ const CUDA_KERNEL_NODE_PARAMS *nodeParams;
1484
+ } cuGraphKernelNodeSetParams_v2_params;
1485
+
1486
+ typedef struct cuGraphAddMemcpyNode_params_st {
1487
+ CUgraphNode *phGraphNode;
1488
+ CUgraph hGraph;
1489
+ const CUgraphNode *dependencies;
1490
+ size_t numDependencies;
1491
+ const CUDA_MEMCPY3D *copyParams;
1492
+ CUcontext ctx;
1493
+ } cuGraphAddMemcpyNode_params;
1494
+
1495
+ typedef struct cuGraphMemcpyNodeGetParams_params_st {
1496
+ CUgraphNode hNode;
1497
+ CUDA_MEMCPY3D *nodeParams;
1498
+ } cuGraphMemcpyNodeGetParams_params;
1499
+
1500
+ typedef struct cuGraphMemcpyNodeSetParams_params_st {
1501
+ CUgraphNode hNode;
1502
+ const CUDA_MEMCPY3D *nodeParams;
1503
+ } cuGraphMemcpyNodeSetParams_params;
1504
+
1505
+ typedef struct cuGraphAddMemsetNode_params_st {
1506
+ CUgraphNode *phGraphNode;
1507
+ CUgraph hGraph;
1508
+ const CUgraphNode *dependencies;
1509
+ size_t numDependencies;
1510
+ const CUDA_MEMSET_NODE_PARAMS *memsetParams;
1511
+ CUcontext ctx;
1512
+ } cuGraphAddMemsetNode_params;
1513
+
1514
+ typedef struct cuGraphMemsetNodeGetParams_params_st {
1515
+ CUgraphNode hNode;
1516
+ CUDA_MEMSET_NODE_PARAMS *nodeParams;
1517
+ } cuGraphMemsetNodeGetParams_params;
1518
+
1519
+ typedef struct cuGraphMemsetNodeSetParams_params_st {
1520
+ CUgraphNode hNode;
1521
+ const CUDA_MEMSET_NODE_PARAMS *nodeParams;
1522
+ } cuGraphMemsetNodeSetParams_params;
1523
+
1524
+ typedef struct cuGraphAddHostNode_params_st {
1525
+ CUgraphNode *phGraphNode;
1526
+ CUgraph hGraph;
1527
+ const CUgraphNode *dependencies;
1528
+ size_t numDependencies;
1529
+ const CUDA_HOST_NODE_PARAMS *nodeParams;
1530
+ } cuGraphAddHostNode_params;
1531
+
1532
+ typedef struct cuGraphHostNodeGetParams_params_st {
1533
+ CUgraphNode hNode;
1534
+ CUDA_HOST_NODE_PARAMS *nodeParams;
1535
+ } cuGraphHostNodeGetParams_params;
1536
+
1537
+ typedef struct cuGraphHostNodeSetParams_params_st {
1538
+ CUgraphNode hNode;
1539
+ const CUDA_HOST_NODE_PARAMS *nodeParams;
1540
+ } cuGraphHostNodeSetParams_params;
1541
+
1542
+ typedef struct cuGraphAddChildGraphNode_params_st {
1543
+ CUgraphNode *phGraphNode;
1544
+ CUgraph hGraph;
1545
+ const CUgraphNode *dependencies;
1546
+ size_t numDependencies;
1547
+ CUgraph childGraph;
1548
+ } cuGraphAddChildGraphNode_params;
1549
+
1550
+ typedef struct cuGraphChildGraphNodeGetGraph_params_st {
1551
+ CUgraphNode hNode;
1552
+ CUgraph *phGraph;
1553
+ } cuGraphChildGraphNodeGetGraph_params;
1554
+
1555
+ typedef struct cuGraphAddEmptyNode_params_st {
1556
+ CUgraphNode *phGraphNode;
1557
+ CUgraph hGraph;
1558
+ const CUgraphNode *dependencies;
1559
+ size_t numDependencies;
1560
+ } cuGraphAddEmptyNode_params;
1561
+
1562
+ typedef struct cuGraphAddEventRecordNode_params_st {
1563
+ CUgraphNode *phGraphNode;
1564
+ CUgraph hGraph;
1565
+ const CUgraphNode *dependencies;
1566
+ size_t numDependencies;
1567
+ CUevent event;
1568
+ } cuGraphAddEventRecordNode_params;
1569
+
1570
+ typedef struct cuGraphEventRecordNodeGetEvent_params_st {
1571
+ CUgraphNode hNode;
1572
+ CUevent *event_out;
1573
+ } cuGraphEventRecordNodeGetEvent_params;
1574
+
1575
+ typedef struct cuGraphEventRecordNodeSetEvent_params_st {
1576
+ CUgraphNode hNode;
1577
+ CUevent event;
1578
+ } cuGraphEventRecordNodeSetEvent_params;
1579
+
1580
+ typedef struct cuGraphAddEventWaitNode_params_st {
1581
+ CUgraphNode *phGraphNode;
1582
+ CUgraph hGraph;
1583
+ const CUgraphNode *dependencies;
1584
+ size_t numDependencies;
1585
+ CUevent event;
1586
+ } cuGraphAddEventWaitNode_params;
1587
+
1588
+ typedef struct cuGraphEventWaitNodeGetEvent_params_st {
1589
+ CUgraphNode hNode;
1590
+ CUevent *event_out;
1591
+ } cuGraphEventWaitNodeGetEvent_params;
1592
+
1593
+ typedef struct cuGraphEventWaitNodeSetEvent_params_st {
1594
+ CUgraphNode hNode;
1595
+ CUevent event;
1596
+ } cuGraphEventWaitNodeSetEvent_params;
1597
+
1598
+ typedef struct cuGraphAddExternalSemaphoresSignalNode_params_st {
1599
+ CUgraphNode *phGraphNode;
1600
+ CUgraph hGraph;
1601
+ const CUgraphNode *dependencies;
1602
+ size_t numDependencies;
1603
+ const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams;
1604
+ } cuGraphAddExternalSemaphoresSignalNode_params;
1605
+
1606
+ typedef struct cuGraphExternalSemaphoresSignalNodeGetParams_params_st {
1607
+ CUgraphNode hNode;
1608
+ CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *params_out;
1609
+ } cuGraphExternalSemaphoresSignalNodeGetParams_params;
1610
+
1611
+ typedef struct cuGraphExternalSemaphoresSignalNodeSetParams_params_st {
1612
+ CUgraphNode hNode;
1613
+ const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams;
1614
+ } cuGraphExternalSemaphoresSignalNodeSetParams_params;
1615
+
1616
+ typedef struct cuGraphAddExternalSemaphoresWaitNode_params_st {
1617
+ CUgraphNode *phGraphNode;
1618
+ CUgraph hGraph;
1619
+ const CUgraphNode *dependencies;
1620
+ size_t numDependencies;
1621
+ const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams;
1622
+ } cuGraphAddExternalSemaphoresWaitNode_params;
1623
+
1624
+ typedef struct cuGraphExternalSemaphoresWaitNodeGetParams_params_st {
1625
+ CUgraphNode hNode;
1626
+ CUDA_EXT_SEM_WAIT_NODE_PARAMS *params_out;
1627
+ } cuGraphExternalSemaphoresWaitNodeGetParams_params;
1628
+
1629
+ typedef struct cuGraphExternalSemaphoresWaitNodeSetParams_params_st {
1630
+ CUgraphNode hNode;
1631
+ const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams;
1632
+ } cuGraphExternalSemaphoresWaitNodeSetParams_params;
1633
+
1634
+ typedef struct cuGraphAddBatchMemOpNode_params_st {
1635
+ CUgraphNode *phGraphNode;
1636
+ CUgraph hGraph;
1637
+ const CUgraphNode *dependencies;
1638
+ size_t numDependencies;
1639
+ const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams;
1640
+ } cuGraphAddBatchMemOpNode_params;
1641
+
1642
+ typedef struct cuGraphBatchMemOpNodeGetParams_params_st {
1643
+ CUgraphNode hNode;
1644
+ CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams_out;
1645
+ } cuGraphBatchMemOpNodeGetParams_params;
1646
+
1647
+ typedef struct cuGraphBatchMemOpNodeSetParams_params_st {
1648
+ CUgraphNode hNode;
1649
+ const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams;
1650
+ } cuGraphBatchMemOpNodeSetParams_params;
1651
+
1652
+ typedef struct cuGraphExecBatchMemOpNodeSetParams_params_st {
1653
+ CUgraphExec hGraphExec;
1654
+ CUgraphNode hNode;
1655
+ const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams;
1656
+ } cuGraphExecBatchMemOpNodeSetParams_params;
1657
+
1658
+ typedef struct cuGraphAddMemAllocNode_params_st {
1659
+ CUgraphNode *phGraphNode;
1660
+ CUgraph hGraph;
1661
+ const CUgraphNode *dependencies;
1662
+ size_t numDependencies;
1663
+ CUDA_MEM_ALLOC_NODE_PARAMS *nodeParams;
1664
+ } cuGraphAddMemAllocNode_params;
1665
+
1666
+ typedef struct cuGraphMemAllocNodeGetParams_params_st {
1667
+ CUgraphNode hNode;
1668
+ CUDA_MEM_ALLOC_NODE_PARAMS *params_out;
1669
+ } cuGraphMemAllocNodeGetParams_params;
1670
+
1671
+ typedef struct cuGraphAddMemFreeNode_params_st {
1672
+ CUgraphNode *phGraphNode;
1673
+ CUgraph hGraph;
1674
+ const CUgraphNode *dependencies;
1675
+ size_t numDependencies;
1676
+ CUdeviceptr dptr;
1677
+ } cuGraphAddMemFreeNode_params;
1678
+
1679
+ typedef struct cuGraphMemFreeNodeGetParams_params_st {
1680
+ CUgraphNode hNode;
1681
+ CUdeviceptr *dptr_out;
1682
+ } cuGraphMemFreeNodeGetParams_params;
1683
+
1684
+ typedef struct cuDeviceGraphMemTrim_params_st {
1685
+ CUdevice device;
1686
+ } cuDeviceGraphMemTrim_params;
1687
+
1688
+ typedef struct cuDeviceGetGraphMemAttribute_params_st {
1689
+ CUdevice device;
1690
+ CUgraphMem_attribute attr;
1691
+ void *value;
1692
+ } cuDeviceGetGraphMemAttribute_params;
1693
+
1694
+ typedef struct cuDeviceSetGraphMemAttribute_params_st {
1695
+ CUdevice device;
1696
+ CUgraphMem_attribute attr;
1697
+ void *value;
1698
+ } cuDeviceSetGraphMemAttribute_params;
1699
+
1700
+ typedef struct cuGraphClone_params_st {
1701
+ CUgraph *phGraphClone;
1702
+ CUgraph originalGraph;
1703
+ } cuGraphClone_params;
1704
+
1705
+ typedef struct cuGraphNodeFindInClone_params_st {
1706
+ CUgraphNode *phNode;
1707
+ CUgraphNode hOriginalNode;
1708
+ CUgraph hClonedGraph;
1709
+ } cuGraphNodeFindInClone_params;
1710
+
1711
+ typedef struct cuGraphNodeGetType_params_st {
1712
+ CUgraphNode hNode;
1713
+ CUgraphNodeType *type;
1714
+ } cuGraphNodeGetType_params;
1715
+
1716
+ typedef struct cuGraphGetNodes_params_st {
1717
+ CUgraph hGraph;
1718
+ CUgraphNode *nodes;
1719
+ size_t *numNodes;
1720
+ } cuGraphGetNodes_params;
1721
+
1722
+ typedef struct cuGraphGetRootNodes_params_st {
1723
+ CUgraph hGraph;
1724
+ CUgraphNode *rootNodes;
1725
+ size_t *numRootNodes;
1726
+ } cuGraphGetRootNodes_params;
1727
+
1728
+ typedef struct cuGraphGetEdges_params_st {
1729
+ CUgraph hGraph;
1730
+ CUgraphNode *from;
1731
+ CUgraphNode *to;
1732
+ size_t *numEdges;
1733
+ } cuGraphGetEdges_params;
1734
+
1735
+ typedef struct cuGraphNodeGetDependencies_params_st {
1736
+ CUgraphNode hNode;
1737
+ CUgraphNode *dependencies;
1738
+ size_t *numDependencies;
1739
+ } cuGraphNodeGetDependencies_params;
1740
+
1741
+ typedef struct cuGraphNodeGetDependentNodes_params_st {
1742
+ CUgraphNode hNode;
1743
+ CUgraphNode *dependentNodes;
1744
+ size_t *numDependentNodes;
1745
+ } cuGraphNodeGetDependentNodes_params;
1746
+
1747
+ typedef struct cuGraphAddDependencies_params_st {
1748
+ CUgraph hGraph;
1749
+ const CUgraphNode *from;
1750
+ const CUgraphNode *to;
1751
+ size_t numDependencies;
1752
+ } cuGraphAddDependencies_params;
1753
+
1754
+ typedef struct cuGraphRemoveDependencies_params_st {
1755
+ CUgraph hGraph;
1756
+ const CUgraphNode *from;
1757
+ const CUgraphNode *to;
1758
+ size_t numDependencies;
1759
+ } cuGraphRemoveDependencies_params;
1760
+
1761
+ typedef struct cuGraphDestroyNode_params_st {
1762
+ CUgraphNode hNode;
1763
+ } cuGraphDestroyNode_params;
1764
+
1765
+ typedef struct cuGraphInstantiateWithFlags_params_st {
1766
+ CUgraphExec *phGraphExec;
1767
+ CUgraph hGraph;
1768
+ unsigned long long flags;
1769
+ } cuGraphInstantiateWithFlags_params;
1770
+
1771
+ typedef struct cuGraphInstantiateWithParams_ptsz_params_st {
1772
+ CUgraphExec *phGraphExec;
1773
+ CUgraph hGraph;
1774
+ CUDA_GRAPH_INSTANTIATE_PARAMS *instantiateParams;
1775
+ } cuGraphInstantiateWithParams_ptsz_params;
1776
+
1777
+ typedef struct cuGraphExecGetFlags_params_st {
1778
+ CUgraphExec hGraphExec;
1779
+ cuuint64_t *flags;
1780
+ } cuGraphExecGetFlags_params;
1781
+
1782
+ typedef struct cuGraphExecKernelNodeSetParams_v2_params_st {
1783
+ CUgraphExec hGraphExec;
1784
+ CUgraphNode hNode;
1785
+ const CUDA_KERNEL_NODE_PARAMS *nodeParams;
1786
+ } cuGraphExecKernelNodeSetParams_v2_params;
1787
+
1788
+ typedef struct cuGraphExecMemcpyNodeSetParams_params_st {
1789
+ CUgraphExec hGraphExec;
1790
+ CUgraphNode hNode;
1791
+ const CUDA_MEMCPY3D *copyParams;
1792
+ CUcontext ctx;
1793
+ } cuGraphExecMemcpyNodeSetParams_params;
1794
+
1795
+ typedef struct cuGraphExecMemsetNodeSetParams_params_st {
1796
+ CUgraphExec hGraphExec;
1797
+ CUgraphNode hNode;
1798
+ const CUDA_MEMSET_NODE_PARAMS *memsetParams;
1799
+ CUcontext ctx;
1800
+ } cuGraphExecMemsetNodeSetParams_params;
1801
+
1802
+ typedef struct cuGraphExecHostNodeSetParams_params_st {
1803
+ CUgraphExec hGraphExec;
1804
+ CUgraphNode hNode;
1805
+ const CUDA_HOST_NODE_PARAMS *nodeParams;
1806
+ } cuGraphExecHostNodeSetParams_params;
1807
+
1808
+ typedef struct cuGraphExecChildGraphNodeSetParams_params_st {
1809
+ CUgraphExec hGraphExec;
1810
+ CUgraphNode hNode;
1811
+ CUgraph childGraph;
1812
+ } cuGraphExecChildGraphNodeSetParams_params;
1813
+
1814
+ typedef struct cuGraphExecEventRecordNodeSetEvent_params_st {
1815
+ CUgraphExec hGraphExec;
1816
+ CUgraphNode hNode;
1817
+ CUevent event;
1818
+ } cuGraphExecEventRecordNodeSetEvent_params;
1819
+
1820
+ typedef struct cuGraphExecEventWaitNodeSetEvent_params_st {
1821
+ CUgraphExec hGraphExec;
1822
+ CUgraphNode hNode;
1823
+ CUevent event;
1824
+ } cuGraphExecEventWaitNodeSetEvent_params;
1825
+
1826
+ typedef struct cuGraphExecExternalSemaphoresSignalNodeSetParams_params_st {
1827
+ CUgraphExec hGraphExec;
1828
+ CUgraphNode hNode;
1829
+ const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams;
1830
+ } cuGraphExecExternalSemaphoresSignalNodeSetParams_params;
1831
+
1832
+ typedef struct cuGraphExecExternalSemaphoresWaitNodeSetParams_params_st {
1833
+ CUgraphExec hGraphExec;
1834
+ CUgraphNode hNode;
1835
+ const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams;
1836
+ } cuGraphExecExternalSemaphoresWaitNodeSetParams_params;
1837
+
1838
+ typedef struct cuGraphNodeSetEnabled_params_st {
1839
+ CUgraphExec hGraphExec;
1840
+ CUgraphNode hNode;
1841
+ unsigned int isEnabled;
1842
+ } cuGraphNodeSetEnabled_params;
1843
+
1844
+ typedef struct cuGraphNodeGetEnabled_params_st {
1845
+ CUgraphExec hGraphExec;
1846
+ CUgraphNode hNode;
1847
+ unsigned int *isEnabled;
1848
+ } cuGraphNodeGetEnabled_params;
1849
+
1850
+ typedef struct cuGraphUpload_ptsz_params_st {
1851
+ CUgraphExec hGraphExec;
1852
+ CUstream hStream;
1853
+ } cuGraphUpload_ptsz_params;
1854
+
1855
+ typedef struct cuGraphLaunch_ptsz_params_st {
1856
+ CUgraphExec hGraphExec;
1857
+ CUstream hStream;
1858
+ } cuGraphLaunch_ptsz_params;
1859
+
1860
+ typedef struct cuGraphExecDestroy_params_st {
1861
+ CUgraphExec hGraphExec;
1862
+ } cuGraphExecDestroy_params;
1863
+
1864
+ typedef struct cuGraphDestroy_params_st {
1865
+ CUgraph hGraph;
1866
+ } cuGraphDestroy_params;
1867
+
1868
+ typedef struct cuGraphExecUpdate_v2_params_st {
1869
+ CUgraphExec hGraphExec;
1870
+ CUgraph hGraph;
1871
+ CUgraphExecUpdateResultInfo *resultInfo;
1872
+ } cuGraphExecUpdate_v2_params;
1873
+
1874
+ typedef struct cuGraphKernelNodeCopyAttributes_params_st {
1875
+ CUgraphNode dst;
1876
+ CUgraphNode src;
1877
+ } cuGraphKernelNodeCopyAttributes_params;
1878
+
1879
+ typedef struct cuGraphKernelNodeGetAttribute_params_st {
1880
+ CUgraphNode hNode;
1881
+ CUkernelNodeAttrID attr;
1882
+ CUkernelNodeAttrValue *value_out;
1883
+ } cuGraphKernelNodeGetAttribute_params;
1884
+
1885
+ typedef struct cuGraphKernelNodeSetAttribute_params_st {
1886
+ CUgraphNode hNode;
1887
+ CUkernelNodeAttrID attr;
1888
+ const CUkernelNodeAttrValue *value;
1889
+ } cuGraphKernelNodeSetAttribute_params;
1890
+
1891
+ typedef struct cuGraphDebugDotPrint_params_st {
1892
+ CUgraph hGraph;
1893
+ const char *path;
1894
+ unsigned int flags;
1895
+ } cuGraphDebugDotPrint_params;
1896
+
1897
+ typedef struct cuUserObjectCreate_params_st {
1898
+ CUuserObject *object_out;
1899
+ void *ptr;
1900
+ CUhostFn destroy;
1901
+ unsigned int initialRefcount;
1902
+ unsigned int flags;
1903
+ } cuUserObjectCreate_params;
1904
+
1905
+ typedef struct cuUserObjectRetain_params_st {
1906
+ CUuserObject object;
1907
+ unsigned int count;
1908
+ } cuUserObjectRetain_params;
1909
+
1910
+ typedef struct cuUserObjectRelease_params_st {
1911
+ CUuserObject object;
1912
+ unsigned int count;
1913
+ } cuUserObjectRelease_params;
1914
+
1915
+ typedef struct cuGraphRetainUserObject_params_st {
1916
+ CUgraph graph;
1917
+ CUuserObject object;
1918
+ unsigned int count;
1919
+ unsigned int flags;
1920
+ } cuGraphRetainUserObject_params;
1921
+
1922
+ typedef struct cuGraphReleaseUserObject_params_st {
1923
+ CUgraph graph;
1924
+ CUuserObject object;
1925
+ unsigned int count;
1926
+ } cuGraphReleaseUserObject_params;
1927
+
1928
+ typedef struct cuOccupancyMaxActiveBlocksPerMultiprocessor_params_st {
1929
+ int *numBlocks;
1930
+ CUfunction func;
1931
+ int blockSize;
1932
+ size_t dynamicSMemSize;
1933
+ } cuOccupancyMaxActiveBlocksPerMultiprocessor_params;
1934
+
1935
+ typedef struct cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_params_st {
1936
+ int *numBlocks;
1937
+ CUfunction func;
1938
+ int blockSize;
1939
+ size_t dynamicSMemSize;
1940
+ unsigned int flags;
1941
+ } cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_params;
1942
+
1943
+ typedef struct cuOccupancyMaxPotentialBlockSize_params_st {
1944
+ int *minGridSize;
1945
+ int *blockSize;
1946
+ CUfunction func;
1947
+ CUoccupancyB2DSize blockSizeToDynamicSMemSize;
1948
+ size_t dynamicSMemSize;
1949
+ int blockSizeLimit;
1950
+ } cuOccupancyMaxPotentialBlockSize_params;
1951
+
1952
+ typedef struct cuOccupancyMaxPotentialBlockSizeWithFlags_params_st {
1953
+ int *minGridSize;
1954
+ int *blockSize;
1955
+ CUfunction func;
1956
+ CUoccupancyB2DSize blockSizeToDynamicSMemSize;
1957
+ size_t dynamicSMemSize;
1958
+ int blockSizeLimit;
1959
+ unsigned int flags;
1960
+ } cuOccupancyMaxPotentialBlockSizeWithFlags_params;
1961
+
1962
+ typedef struct cuOccupancyAvailableDynamicSMemPerBlock_params_st {
1963
+ size_t *dynamicSmemSize;
1964
+ CUfunction func;
1965
+ int numBlocks;
1966
+ int blockSize;
1967
+ } cuOccupancyAvailableDynamicSMemPerBlock_params;
1968
+
1969
+ typedef struct cuOccupancyMaxPotentialClusterSize_params_st {
1970
+ int *clusterSize;
1971
+ CUfunction func;
1972
+ const CUlaunchConfig *config;
1973
+ } cuOccupancyMaxPotentialClusterSize_params;
1974
+
1975
+ typedef struct cuOccupancyMaxActiveClusters_params_st {
1976
+ int *numClusters;
1977
+ CUfunction func;
1978
+ const CUlaunchConfig *config;
1979
+ } cuOccupancyMaxActiveClusters_params;
1980
+
1981
+ typedef struct cuTexRefSetArray_params_st {
1982
+ CUtexref hTexRef;
1983
+ CUarray hArray;
1984
+ unsigned int Flags;
1985
+ } cuTexRefSetArray_params;
1986
+
1987
+ typedef struct cuTexRefSetMipmappedArray_params_st {
1988
+ CUtexref hTexRef;
1989
+ CUmipmappedArray hMipmappedArray;
1990
+ unsigned int Flags;
1991
+ } cuTexRefSetMipmappedArray_params;
1992
+
1993
+ typedef struct cuTexRefSetAddress_v2_params_st {
1994
+ size_t *ByteOffset;
1995
+ CUtexref hTexRef;
1996
+ CUdeviceptr dptr;
1997
+ size_t bytes;
1998
+ } cuTexRefSetAddress_v2_params;
1999
+
2000
+ typedef struct cuTexRefSetAddress2D_v3_params_st {
2001
+ CUtexref hTexRef;
2002
+ const CUDA_ARRAY_DESCRIPTOR *desc;
2003
+ CUdeviceptr dptr;
2004
+ size_t Pitch;
2005
+ } cuTexRefSetAddress2D_v3_params;
2006
+
2007
+ typedef struct cuTexRefSetFormat_params_st {
2008
+ CUtexref hTexRef;
2009
+ CUarray_format fmt;
2010
+ int NumPackedComponents;
2011
+ } cuTexRefSetFormat_params;
2012
+
2013
+ typedef struct cuTexRefSetAddressMode_params_st {
2014
+ CUtexref hTexRef;
2015
+ int dim;
2016
+ CUaddress_mode am;
2017
+ } cuTexRefSetAddressMode_params;
2018
+
2019
+ typedef struct cuTexRefSetFilterMode_params_st {
2020
+ CUtexref hTexRef;
2021
+ CUfilter_mode fm;
2022
+ } cuTexRefSetFilterMode_params;
2023
+
2024
+ typedef struct cuTexRefSetMipmapFilterMode_params_st {
2025
+ CUtexref hTexRef;
2026
+ CUfilter_mode fm;
2027
+ } cuTexRefSetMipmapFilterMode_params;
2028
+
2029
+ typedef struct cuTexRefSetMipmapLevelBias_params_st {
2030
+ CUtexref hTexRef;
2031
+ float bias;
2032
+ } cuTexRefSetMipmapLevelBias_params;
2033
+
2034
+ typedef struct cuTexRefSetMipmapLevelClamp_params_st {
2035
+ CUtexref hTexRef;
2036
+ float minMipmapLevelClamp;
2037
+ float maxMipmapLevelClamp;
2038
+ } cuTexRefSetMipmapLevelClamp_params;
2039
+
2040
+ typedef struct cuTexRefSetMaxAnisotropy_params_st {
2041
+ CUtexref hTexRef;
2042
+ unsigned int maxAniso;
2043
+ } cuTexRefSetMaxAnisotropy_params;
2044
+
2045
+ typedef struct cuTexRefSetBorderColor_params_st {
2046
+ CUtexref hTexRef;
2047
+ float *pBorderColor;
2048
+ } cuTexRefSetBorderColor_params;
2049
+
2050
+ typedef struct cuTexRefSetFlags_params_st {
2051
+ CUtexref hTexRef;
2052
+ unsigned int Flags;
2053
+ } cuTexRefSetFlags_params;
2054
+
2055
+ typedef struct cuTexRefGetAddress_v2_params_st {
2056
+ CUdeviceptr *pdptr;
2057
+ CUtexref hTexRef;
2058
+ } cuTexRefGetAddress_v2_params;
2059
+
2060
+ typedef struct cuTexRefGetArray_params_st {
2061
+ CUarray *phArray;
2062
+ CUtexref hTexRef;
2063
+ } cuTexRefGetArray_params;
2064
+
2065
+ typedef struct cuTexRefGetMipmappedArray_params_st {
2066
+ CUmipmappedArray *phMipmappedArray;
2067
+ CUtexref hTexRef;
2068
+ } cuTexRefGetMipmappedArray_params;
2069
+
2070
+ typedef struct cuTexRefGetAddressMode_params_st {
2071
+ CUaddress_mode *pam;
2072
+ CUtexref hTexRef;
2073
+ int dim;
2074
+ } cuTexRefGetAddressMode_params;
2075
+
2076
+ typedef struct cuTexRefGetFilterMode_params_st {
2077
+ CUfilter_mode *pfm;
2078
+ CUtexref hTexRef;
2079
+ } cuTexRefGetFilterMode_params;
2080
+
2081
+ typedef struct cuTexRefGetFormat_params_st {
2082
+ CUarray_format *pFormat;
2083
+ int *pNumChannels;
2084
+ CUtexref hTexRef;
2085
+ } cuTexRefGetFormat_params;
2086
+
2087
+ typedef struct cuTexRefGetMipmapFilterMode_params_st {
2088
+ CUfilter_mode *pfm;
2089
+ CUtexref hTexRef;
2090
+ } cuTexRefGetMipmapFilterMode_params;
2091
+
2092
+ typedef struct cuTexRefGetMipmapLevelBias_params_st {
2093
+ float *pbias;
2094
+ CUtexref hTexRef;
2095
+ } cuTexRefGetMipmapLevelBias_params;
2096
+
2097
+ typedef struct cuTexRefGetMipmapLevelClamp_params_st {
2098
+ float *pminMipmapLevelClamp;
2099
+ float *pmaxMipmapLevelClamp;
2100
+ CUtexref hTexRef;
2101
+ } cuTexRefGetMipmapLevelClamp_params;
2102
+
2103
+ typedef struct cuTexRefGetMaxAnisotropy_params_st {
2104
+ int *pmaxAniso;
2105
+ CUtexref hTexRef;
2106
+ } cuTexRefGetMaxAnisotropy_params;
2107
+
2108
+ typedef struct cuTexRefGetBorderColor_params_st {
2109
+ float *pBorderColor;
2110
+ CUtexref hTexRef;
2111
+ } cuTexRefGetBorderColor_params;
2112
+
2113
+ typedef struct cuTexRefGetFlags_params_st {
2114
+ unsigned int *pFlags;
2115
+ CUtexref hTexRef;
2116
+ } cuTexRefGetFlags_params;
2117
+
2118
+ typedef struct cuTexRefCreate_params_st {
2119
+ CUtexref *pTexRef;
2120
+ } cuTexRefCreate_params;
2121
+
2122
+ typedef struct cuTexRefDestroy_params_st {
2123
+ CUtexref hTexRef;
2124
+ } cuTexRefDestroy_params;
2125
+
2126
+ typedef struct cuSurfRefSetArray_params_st {
2127
+ CUsurfref hSurfRef;
2128
+ CUarray hArray;
2129
+ unsigned int Flags;
2130
+ } cuSurfRefSetArray_params;
2131
+
2132
+ typedef struct cuSurfRefGetArray_params_st {
2133
+ CUarray *phArray;
2134
+ CUsurfref hSurfRef;
2135
+ } cuSurfRefGetArray_params;
2136
+
2137
+ typedef struct cuTexObjectCreate_params_st {
2138
+ CUtexObject *pTexObject;
2139
+ const CUDA_RESOURCE_DESC *pResDesc;
2140
+ const CUDA_TEXTURE_DESC *pTexDesc;
2141
+ const CUDA_RESOURCE_VIEW_DESC *pResViewDesc;
2142
+ } cuTexObjectCreate_params;
2143
+
2144
+ typedef struct cuTexObjectDestroy_params_st {
2145
+ CUtexObject texObject;
2146
+ } cuTexObjectDestroy_params;
2147
+
2148
+ typedef struct cuTexObjectGetResourceDesc_params_st {
2149
+ CUDA_RESOURCE_DESC *pResDesc;
2150
+ CUtexObject texObject;
2151
+ } cuTexObjectGetResourceDesc_params;
2152
+
2153
+ typedef struct cuTexObjectGetTextureDesc_params_st {
2154
+ CUDA_TEXTURE_DESC *pTexDesc;
2155
+ CUtexObject texObject;
2156
+ } cuTexObjectGetTextureDesc_params;
2157
+
2158
+ typedef struct cuTexObjectGetResourceViewDesc_params_st {
2159
+ CUDA_RESOURCE_VIEW_DESC *pResViewDesc;
2160
+ CUtexObject texObject;
2161
+ } cuTexObjectGetResourceViewDesc_params;
2162
+
2163
+ typedef struct cuSurfObjectCreate_params_st {
2164
+ CUsurfObject *pSurfObject;
2165
+ const CUDA_RESOURCE_DESC *pResDesc;
2166
+ } cuSurfObjectCreate_params;
2167
+
2168
+ typedef struct cuSurfObjectDestroy_params_st {
2169
+ CUsurfObject surfObject;
2170
+ } cuSurfObjectDestroy_params;
2171
+
2172
+ typedef struct cuSurfObjectGetResourceDesc_params_st {
2173
+ CUDA_RESOURCE_DESC *pResDesc;
2174
+ CUsurfObject surfObject;
2175
+ } cuSurfObjectGetResourceDesc_params;
2176
+
2177
+ typedef struct cuTensorMapEncodeTiled_params_st {
2178
+ CUtensorMap *tensorMap;
2179
+ CUtensorMapDataType tensorDataType;
2180
+ cuuint32_t tensorRank;
2181
+ void *globalAddress;
2182
+ const cuuint64_t *globalDim;
2183
+ const cuuint64_t *globalStrides;
2184
+ const cuuint32_t *boxDim;
2185
+ const cuuint32_t *elementStrides;
2186
+ CUtensorMapInterleave interleave;
2187
+ CUtensorMapSwizzle swizzle;
2188
+ CUtensorMapL2promotion l2Promotion;
2189
+ CUtensorMapFloatOOBfill oobFill;
2190
+ } cuTensorMapEncodeTiled_params;
2191
+
2192
+ typedef struct cuTensorMapEncodeIm2col_params_st {
2193
+ CUtensorMap *tensorMap;
2194
+ CUtensorMapDataType tensorDataType;
2195
+ cuuint32_t tensorRank;
2196
+ void *globalAddress;
2197
+ const cuuint64_t *globalDim;
2198
+ const cuuint64_t *globalStrides;
2199
+ const int *pixelBoxLowerCorner;
2200
+ const int *pixelBoxUpperCorner;
2201
+ cuuint32_t channelsPerPixel;
2202
+ cuuint32_t pixelsPerColumn;
2203
+ const cuuint32_t *elementStrides;
2204
+ CUtensorMapInterleave interleave;
2205
+ CUtensorMapSwizzle swizzle;
2206
+ CUtensorMapL2promotion l2Promotion;
2207
+ CUtensorMapFloatOOBfill oobFill;
2208
+ } cuTensorMapEncodeIm2col_params;
2209
+
2210
+ typedef struct cuTensorMapReplaceAddress_params_st {
2211
+ CUtensorMap *tensorMap;
2212
+ void *globalAddress;
2213
+ } cuTensorMapReplaceAddress_params;
2214
+
2215
+ typedef struct cuDeviceCanAccessPeer_params_st {
2216
+ int *canAccessPeer;
2217
+ CUdevice dev;
2218
+ CUdevice peerDev;
2219
+ } cuDeviceCanAccessPeer_params;
2220
+
2221
+ typedef struct cuCtxEnablePeerAccess_params_st {
2222
+ CUcontext peerContext;
2223
+ unsigned int Flags;
2224
+ } cuCtxEnablePeerAccess_params;
2225
+
2226
+ typedef struct cuCtxDisablePeerAccess_params_st {
2227
+ CUcontext peerContext;
2228
+ } cuCtxDisablePeerAccess_params;
2229
+
2230
+ typedef struct cuDeviceGetP2PAttribute_params_st {
2231
+ int *value;
2232
+ CUdevice_P2PAttribute attrib;
2233
+ CUdevice srcDevice;
2234
+ CUdevice dstDevice;
2235
+ } cuDeviceGetP2PAttribute_params;
2236
+
2237
+ typedef struct cuGraphicsUnregisterResource_params_st {
2238
+ CUgraphicsResource resource;
2239
+ } cuGraphicsUnregisterResource_params;
2240
+
2241
+ typedef struct cuGraphicsSubResourceGetMappedArray_params_st {
2242
+ CUarray *pArray;
2243
+ CUgraphicsResource resource;
2244
+ unsigned int arrayIndex;
2245
+ unsigned int mipLevel;
2246
+ } cuGraphicsSubResourceGetMappedArray_params;
2247
+
2248
+ typedef struct cuGraphicsResourceGetMappedMipmappedArray_params_st {
2249
+ CUmipmappedArray *pMipmappedArray;
2250
+ CUgraphicsResource resource;
2251
+ } cuGraphicsResourceGetMappedMipmappedArray_params;
2252
+
2253
+ typedef struct cuGraphicsResourceGetMappedPointer_v2_params_st {
2254
+ CUdeviceptr *pDevPtr;
2255
+ size_t *pSize;
2256
+ CUgraphicsResource resource;
2257
+ } cuGraphicsResourceGetMappedPointer_v2_params;
2258
+
2259
+ typedef struct cuGraphicsResourceSetMapFlags_v2_params_st {
2260
+ CUgraphicsResource resource;
2261
+ unsigned int flags;
2262
+ } cuGraphicsResourceSetMapFlags_v2_params;
2263
+
2264
+ typedef struct cuGraphicsMapResources_ptsz_params_st {
2265
+ unsigned int count;
2266
+ CUgraphicsResource *resources;
2267
+ CUstream hStream;
2268
+ } cuGraphicsMapResources_ptsz_params;
2269
+
2270
+ typedef struct cuGraphicsUnmapResources_ptsz_params_st {
2271
+ unsigned int count;
2272
+ CUgraphicsResource *resources;
2273
+ CUstream hStream;
2274
+ } cuGraphicsUnmapResources_ptsz_params;
2275
+
2276
+ typedef struct cuGetProcAddress_v2_params_st {
2277
+ const char *symbol;
2278
+ void **pfn;
2279
+ int cudaVersion;
2280
+ cuuint64_t flags;
2281
+ CUdriverProcAddressQueryResult *symbolStatus;
2282
+ } cuGetProcAddress_v2_params;
2283
+
2284
+ typedef struct cuCoredumpGetAttribute_params_st {
2285
+ CUcoredumpSettings attrib;
2286
+ void *value;
2287
+ size_t *size;
2288
+ } cuCoredumpGetAttribute_params;
2289
+
2290
+ typedef struct cuCoredumpGetAttributeGlobal_params_st {
2291
+ CUcoredumpSettings attrib;
2292
+ void *value;
2293
+ size_t *size;
2294
+ } cuCoredumpGetAttributeGlobal_params;
2295
+
2296
+ typedef struct cuCoredumpSetAttribute_params_st {
2297
+ CUcoredumpSettings attrib;
2298
+ void *value;
2299
+ size_t *size;
2300
+ } cuCoredumpSetAttribute_params;
2301
+
2302
+ typedef struct cuCoredumpSetAttributeGlobal_params_st {
2303
+ CUcoredumpSettings attrib;
2304
+ void *value;
2305
+ size_t *size;
2306
+ } cuCoredumpSetAttributeGlobal_params;
2307
+
2308
+ typedef struct cuGetExportTable_params_st {
2309
+ const void **ppExportTable;
2310
+ const CUuuid *pExportTableId;
2311
+ } cuGetExportTable_params;
2312
+
2313
+ typedef struct cuMemHostRegister_params_st {
2314
+ void *p;
2315
+ size_t bytesize;
2316
+ unsigned int Flags;
2317
+ } cuMemHostRegister_params;
2318
+
2319
+ typedef struct cuGraphicsResourceSetMapFlags_params_st {
2320
+ CUgraphicsResource resource;
2321
+ unsigned int flags;
2322
+ } cuGraphicsResourceSetMapFlags_params;
2323
+
2324
+ typedef struct cuLinkCreate_params_st {
2325
+ unsigned int numOptions;
2326
+ CUjit_option *options;
2327
+ void **optionValues;
2328
+ CUlinkState *stateOut;
2329
+ } cuLinkCreate_params;
2330
+
2331
+ typedef struct cuLinkAddData_params_st {
2332
+ CUlinkState state;
2333
+ CUjitInputType type;
2334
+ void *data;
2335
+ size_t size;
2336
+ const char *name;
2337
+ unsigned int numOptions;
2338
+ CUjit_option *options;
2339
+ void **optionValues;
2340
+ } cuLinkAddData_params;
2341
+
2342
+ typedef struct cuLinkAddFile_params_st {
2343
+ CUlinkState state;
2344
+ CUjitInputType type;
2345
+ const char *path;
2346
+ unsigned int numOptions;
2347
+ CUjit_option *options;
2348
+ void **optionValues;
2349
+ } cuLinkAddFile_params;
2350
+
2351
+ typedef struct cuTexRefSetAddress2D_v2_params_st {
2352
+ CUtexref hTexRef;
2353
+ const CUDA_ARRAY_DESCRIPTOR *desc;
2354
+ CUdeviceptr dptr;
2355
+ size_t Pitch;
2356
+ } cuTexRefSetAddress2D_v2_params;
2357
+
2358
+ typedef struct cuDeviceTotalMem_params_st {
2359
+ unsigned int *bytes;
2360
+ CUdevice dev;
2361
+ } cuDeviceTotalMem_params;
2362
+
2363
+ typedef struct cuCtxCreate_params_st {
2364
+ CUcontext *pctx;
2365
+ unsigned int flags;
2366
+ CUdevice dev;
2367
+ } cuCtxCreate_params;
2368
+
2369
+ typedef struct cuModuleGetGlobal_params_st {
2370
+ CUdeviceptr_v1 *dptr;
2371
+ unsigned int *bytes;
2372
+ CUmodule hmod;
2373
+ const char *name;
2374
+ } cuModuleGetGlobal_params;
2375
+
2376
+ typedef struct cuMemGetInfo_params_st {
2377
+ unsigned int *free;
2378
+ unsigned int *total;
2379
+ } cuMemGetInfo_params;
2380
+
2381
+ typedef struct cuMemAlloc_params_st {
2382
+ CUdeviceptr_v1 *dptr;
2383
+ unsigned int bytesize;
2384
+ } cuMemAlloc_params;
2385
+
2386
+ typedef struct cuMemAllocPitch_params_st {
2387
+ CUdeviceptr_v1 *dptr;
2388
+ unsigned int *pPitch;
2389
+ unsigned int WidthInBytes;
2390
+ unsigned int Height;
2391
+ unsigned int ElementSizeBytes;
2392
+ } cuMemAllocPitch_params;
2393
+
2394
+ typedef struct cuMemFree_params_st {
2395
+ CUdeviceptr_v1 dptr;
2396
+ } cuMemFree_params;
2397
+
2398
+ typedef struct cuMemGetAddressRange_params_st {
2399
+ CUdeviceptr_v1 *pbase;
2400
+ unsigned int *psize;
2401
+ CUdeviceptr_v1 dptr;
2402
+ } cuMemGetAddressRange_params;
2403
+
2404
+ typedef struct cuMemAllocHost_params_st {
2405
+ void **pp;
2406
+ unsigned int bytesize;
2407
+ } cuMemAllocHost_params;
2408
+
2409
+ typedef struct cuMemHostGetDevicePointer_params_st {
2410
+ CUdeviceptr_v1 *pdptr;
2411
+ void *p;
2412
+ unsigned int Flags;
2413
+ } cuMemHostGetDevicePointer_params;
2414
+
2415
+ typedef struct cuMemcpyHtoD_params_st {
2416
+ CUdeviceptr_v1 dstDevice;
2417
+ const void *srcHost;
2418
+ unsigned int ByteCount;
2419
+ } cuMemcpyHtoD_params;
2420
+
2421
+ typedef struct cuMemcpyDtoH_params_st {
2422
+ void *dstHost;
2423
+ CUdeviceptr_v1 srcDevice;
2424
+ unsigned int ByteCount;
2425
+ } cuMemcpyDtoH_params;
2426
+
2427
+ typedef struct cuMemcpyDtoD_params_st {
2428
+ CUdeviceptr_v1 dstDevice;
2429
+ CUdeviceptr_v1 srcDevice;
2430
+ unsigned int ByteCount;
2431
+ } cuMemcpyDtoD_params;
2432
+
2433
+ typedef struct cuMemcpyDtoA_params_st {
2434
+ CUarray dstArray;
2435
+ unsigned int dstOffset;
2436
+ CUdeviceptr_v1 srcDevice;
2437
+ unsigned int ByteCount;
2438
+ } cuMemcpyDtoA_params;
2439
+
2440
+ typedef struct cuMemcpyAtoD_params_st {
2441
+ CUdeviceptr_v1 dstDevice;
2442
+ CUarray srcArray;
2443
+ unsigned int srcOffset;
2444
+ unsigned int ByteCount;
2445
+ } cuMemcpyAtoD_params;
2446
+
2447
+ typedef struct cuMemcpyHtoA_params_st {
2448
+ CUarray dstArray;
2449
+ unsigned int dstOffset;
2450
+ const void *srcHost;
2451
+ unsigned int ByteCount;
2452
+ } cuMemcpyHtoA_params;
2453
+
2454
+ typedef struct cuMemcpyAtoH_params_st {
2455
+ void *dstHost;
2456
+ CUarray srcArray;
2457
+ unsigned int srcOffset;
2458
+ unsigned int ByteCount;
2459
+ } cuMemcpyAtoH_params;
2460
+
2461
+ typedef struct cuMemcpyAtoA_params_st {
2462
+ CUarray dstArray;
2463
+ unsigned int dstOffset;
2464
+ CUarray srcArray;
2465
+ unsigned int srcOffset;
2466
+ unsigned int ByteCount;
2467
+ } cuMemcpyAtoA_params;
2468
+
2469
+ typedef struct cuMemcpyHtoAAsync_params_st {
2470
+ CUarray dstArray;
2471
+ unsigned int dstOffset;
2472
+ const void *srcHost;
2473
+ unsigned int ByteCount;
2474
+ CUstream hStream;
2475
+ } cuMemcpyHtoAAsync_params;
2476
+
2477
+ typedef struct cuMemcpyAtoHAsync_params_st {
2478
+ void *dstHost;
2479
+ CUarray srcArray;
2480
+ unsigned int srcOffset;
2481
+ unsigned int ByteCount;
2482
+ CUstream hStream;
2483
+ } cuMemcpyAtoHAsync_params;
2484
+
2485
+ typedef struct cuMemcpy2D_params_st {
2486
+ const CUDA_MEMCPY2D_v1 *pCopy;
2487
+ } cuMemcpy2D_params;
2488
+
2489
+ typedef struct cuMemcpy2DUnaligned_params_st {
2490
+ const CUDA_MEMCPY2D_v1 *pCopy;
2491
+ } cuMemcpy2DUnaligned_params;
2492
+
2493
+ typedef struct cuMemcpy3D_params_st {
2494
+ const CUDA_MEMCPY3D_v1 *pCopy;
2495
+ } cuMemcpy3D_params;
2496
+
2497
+ typedef struct cuMemcpyHtoDAsync_params_st {
2498
+ CUdeviceptr_v1 dstDevice;
2499
+ const void *srcHost;
2500
+ unsigned int ByteCount;
2501
+ CUstream hStream;
2502
+ } cuMemcpyHtoDAsync_params;
2503
+
2504
+ typedef struct cuMemcpyDtoHAsync_params_st {
2505
+ void *dstHost;
2506
+ CUdeviceptr_v1 srcDevice;
2507
+ unsigned int ByteCount;
2508
+ CUstream hStream;
2509
+ } cuMemcpyDtoHAsync_params;
2510
+
2511
+ typedef struct cuMemcpyDtoDAsync_params_st {
2512
+ CUdeviceptr_v1 dstDevice;
2513
+ CUdeviceptr_v1 srcDevice;
2514
+ unsigned int ByteCount;
2515
+ CUstream hStream;
2516
+ } cuMemcpyDtoDAsync_params;
2517
+
2518
+ typedef struct cuMemcpy2DAsync_params_st {
2519
+ const CUDA_MEMCPY2D_v1 *pCopy;
2520
+ CUstream hStream;
2521
+ } cuMemcpy2DAsync_params;
2522
+
2523
+ typedef struct cuMemcpy3DAsync_params_st {
2524
+ const CUDA_MEMCPY3D_v1 *pCopy;
2525
+ CUstream hStream;
2526
+ } cuMemcpy3DAsync_params;
2527
+
2528
+ typedef struct cuMemsetD8_params_st {
2529
+ CUdeviceptr_v1 dstDevice;
2530
+ unsigned char uc;
2531
+ unsigned int N;
2532
+ } cuMemsetD8_params;
2533
+
2534
+ typedef struct cuMemsetD16_params_st {
2535
+ CUdeviceptr_v1 dstDevice;
2536
+ unsigned short us;
2537
+ unsigned int N;
2538
+ } cuMemsetD16_params;
2539
+
2540
+ typedef struct cuMemsetD32_params_st {
2541
+ CUdeviceptr_v1 dstDevice;
2542
+ unsigned int ui;
2543
+ unsigned int N;
2544
+ } cuMemsetD32_params;
2545
+
2546
+ typedef struct cuMemsetD2D8_params_st {
2547
+ CUdeviceptr_v1 dstDevice;
2548
+ unsigned int dstPitch;
2549
+ unsigned char uc;
2550
+ unsigned int Width;
2551
+ unsigned int Height;
2552
+ } cuMemsetD2D8_params;
2553
+
2554
+ typedef struct cuMemsetD2D16_params_st {
2555
+ CUdeviceptr_v1 dstDevice;
2556
+ unsigned int dstPitch;
2557
+ unsigned short us;
2558
+ unsigned int Width;
2559
+ unsigned int Height;
2560
+ } cuMemsetD2D16_params;
2561
+
2562
+ typedef struct cuMemsetD2D32_params_st {
2563
+ CUdeviceptr_v1 dstDevice;
2564
+ unsigned int dstPitch;
2565
+ unsigned int ui;
2566
+ unsigned int Width;
2567
+ unsigned int Height;
2568
+ } cuMemsetD2D32_params;
2569
+
2570
+ typedef struct cuArrayCreate_params_st {
2571
+ CUarray *pHandle;
2572
+ const CUDA_ARRAY_DESCRIPTOR_v1 *pAllocateArray;
2573
+ } cuArrayCreate_params;
2574
+
2575
+ typedef struct cuArrayGetDescriptor_params_st {
2576
+ CUDA_ARRAY_DESCRIPTOR_v1 *pArrayDescriptor;
2577
+ CUarray hArray;
2578
+ } cuArrayGetDescriptor_params;
2579
+
2580
+ typedef struct cuArray3DCreate_params_st {
2581
+ CUarray *pHandle;
2582
+ const CUDA_ARRAY3D_DESCRIPTOR_v1 *pAllocateArray;
2583
+ } cuArray3DCreate_params;
2584
+
2585
+ typedef struct cuArray3DGetDescriptor_params_st {
2586
+ CUDA_ARRAY3D_DESCRIPTOR_v1 *pArrayDescriptor;
2587
+ CUarray hArray;
2588
+ } cuArray3DGetDescriptor_params;
2589
+
2590
+ typedef struct cuTexRefSetAddress_params_st {
2591
+ unsigned int *ByteOffset;
2592
+ CUtexref hTexRef;
2593
+ CUdeviceptr_v1 dptr;
2594
+ unsigned int bytes;
2595
+ } cuTexRefSetAddress_params;
2596
+
2597
+ typedef struct cuTexRefSetAddress2D_params_st {
2598
+ CUtexref hTexRef;
2599
+ const CUDA_ARRAY_DESCRIPTOR_v1 *desc;
2600
+ CUdeviceptr_v1 dptr;
2601
+ unsigned int Pitch;
2602
+ } cuTexRefSetAddress2D_params;
2603
+
2604
+ typedef struct cuTexRefGetAddress_params_st {
2605
+ CUdeviceptr_v1 *pdptr;
2606
+ CUtexref hTexRef;
2607
+ } cuTexRefGetAddress_params;
2608
+
2609
+ typedef struct cuGraphicsResourceGetMappedPointer_params_st {
2610
+ CUdeviceptr_v1 *pDevPtr;
2611
+ unsigned int *pSize;
2612
+ CUgraphicsResource resource;
2613
+ } cuGraphicsResourceGetMappedPointer_params;
2614
+
2615
+ typedef struct cuCtxDestroy_params_st {
2616
+ CUcontext ctx;
2617
+ } cuCtxDestroy_params;
2618
+
2619
+ typedef struct cuCtxPopCurrent_params_st {
2620
+ CUcontext *pctx;
2621
+ } cuCtxPopCurrent_params;
2622
+
2623
+ typedef struct cuCtxPushCurrent_params_st {
2624
+ CUcontext ctx;
2625
+ } cuCtxPushCurrent_params;
2626
+
2627
+ typedef struct cuStreamDestroy_params_st {
2628
+ CUstream hStream;
2629
+ } cuStreamDestroy_params;
2630
+
2631
+ typedef struct cuEventDestroy_params_st {
2632
+ CUevent hEvent;
2633
+ } cuEventDestroy_params;
2634
+
2635
+ typedef struct cuDevicePrimaryCtxRelease_params_st {
2636
+ CUdevice dev;
2637
+ } cuDevicePrimaryCtxRelease_params;
2638
+
2639
+ typedef struct cuDevicePrimaryCtxReset_params_st {
2640
+ CUdevice dev;
2641
+ } cuDevicePrimaryCtxReset_params;
2642
+
2643
+ typedef struct cuDevicePrimaryCtxSetFlags_params_st {
2644
+ CUdevice dev;
2645
+ unsigned int flags;
2646
+ } cuDevicePrimaryCtxSetFlags_params;
2647
+
2648
+ typedef struct cuMemcpyHtoD_v2_params_st {
2649
+ CUdeviceptr dstDevice;
2650
+ const void *srcHost;
2651
+ size_t ByteCount;
2652
+ } cuMemcpyHtoD_v2_params;
2653
+
2654
+ typedef struct cuMemcpyDtoH_v2_params_st {
2655
+ void *dstHost;
2656
+ CUdeviceptr srcDevice;
2657
+ size_t ByteCount;
2658
+ } cuMemcpyDtoH_v2_params;
2659
+
2660
+ typedef struct cuMemcpyDtoD_v2_params_st {
2661
+ CUdeviceptr dstDevice;
2662
+ CUdeviceptr srcDevice;
2663
+ size_t ByteCount;
2664
+ } cuMemcpyDtoD_v2_params;
2665
+
2666
+ typedef struct cuMemcpyDtoA_v2_params_st {
2667
+ CUarray dstArray;
2668
+ size_t dstOffset;
2669
+ CUdeviceptr srcDevice;
2670
+ size_t ByteCount;
2671
+ } cuMemcpyDtoA_v2_params;
2672
+
2673
+ typedef struct cuMemcpyAtoD_v2_params_st {
2674
+ CUdeviceptr dstDevice;
2675
+ CUarray srcArray;
2676
+ size_t srcOffset;
2677
+ size_t ByteCount;
2678
+ } cuMemcpyAtoD_v2_params;
2679
+
2680
+ typedef struct cuMemcpyHtoA_v2_params_st {
2681
+ CUarray dstArray;
2682
+ size_t dstOffset;
2683
+ const void *srcHost;
2684
+ size_t ByteCount;
2685
+ } cuMemcpyHtoA_v2_params;
2686
+
2687
+ typedef struct cuMemcpyAtoH_v2_params_st {
2688
+ void *dstHost;
2689
+ CUarray srcArray;
2690
+ size_t srcOffset;
2691
+ size_t ByteCount;
2692
+ } cuMemcpyAtoH_v2_params;
2693
+
2694
+ typedef struct cuMemcpyAtoA_v2_params_st {
2695
+ CUarray dstArray;
2696
+ size_t dstOffset;
2697
+ CUarray srcArray;
2698
+ size_t srcOffset;
2699
+ size_t ByteCount;
2700
+ } cuMemcpyAtoA_v2_params;
2701
+
2702
+ typedef struct cuMemcpyHtoAAsync_v2_params_st {
2703
+ CUarray dstArray;
2704
+ size_t dstOffset;
2705
+ const void *srcHost;
2706
+ size_t ByteCount;
2707
+ CUstream hStream;
2708
+ } cuMemcpyHtoAAsync_v2_params;
2709
+
2710
+ typedef struct cuMemcpyAtoHAsync_v2_params_st {
2711
+ void *dstHost;
2712
+ CUarray srcArray;
2713
+ size_t srcOffset;
2714
+ size_t ByteCount;
2715
+ CUstream hStream;
2716
+ } cuMemcpyAtoHAsync_v2_params;
2717
+
2718
+ typedef struct cuMemcpy2D_v2_params_st {
2719
+ const CUDA_MEMCPY2D *pCopy;
2720
+ } cuMemcpy2D_v2_params;
2721
+
2722
+ typedef struct cuMemcpy2DUnaligned_v2_params_st {
2723
+ const CUDA_MEMCPY2D *pCopy;
2724
+ } cuMemcpy2DUnaligned_v2_params;
2725
+
2726
+ typedef struct cuMemcpy3D_v2_params_st {
2727
+ const CUDA_MEMCPY3D *pCopy;
2728
+ } cuMemcpy3D_v2_params;
2729
+
2730
+ typedef struct cuMemcpyHtoDAsync_v2_params_st {
2731
+ CUdeviceptr dstDevice;
2732
+ const void *srcHost;
2733
+ size_t ByteCount;
2734
+ CUstream hStream;
2735
+ } cuMemcpyHtoDAsync_v2_params;
2736
+
2737
+ typedef struct cuMemcpyDtoHAsync_v2_params_st {
2738
+ void *dstHost;
2739
+ CUdeviceptr srcDevice;
2740
+ size_t ByteCount;
2741
+ CUstream hStream;
2742
+ } cuMemcpyDtoHAsync_v2_params;
2743
+
2744
+ typedef struct cuMemcpyDtoDAsync_v2_params_st {
2745
+ CUdeviceptr dstDevice;
2746
+ CUdeviceptr srcDevice;
2747
+ size_t ByteCount;
2748
+ CUstream hStream;
2749
+ } cuMemcpyDtoDAsync_v2_params;
2750
+
2751
+ typedef struct cuMemcpy2DAsync_v2_params_st {
2752
+ const CUDA_MEMCPY2D *pCopy;
2753
+ CUstream hStream;
2754
+ } cuMemcpy2DAsync_v2_params;
2755
+
2756
+ typedef struct cuMemcpy3DAsync_v2_params_st {
2757
+ const CUDA_MEMCPY3D *pCopy;
2758
+ CUstream hStream;
2759
+ } cuMemcpy3DAsync_v2_params;
2760
+
2761
+ typedef struct cuMemsetD8_v2_params_st {
2762
+ CUdeviceptr dstDevice;
2763
+ unsigned char uc;
2764
+ size_t N;
2765
+ } cuMemsetD8_v2_params;
2766
+
2767
+ typedef struct cuMemsetD16_v2_params_st {
2768
+ CUdeviceptr dstDevice;
2769
+ unsigned short us;
2770
+ size_t N;
2771
+ } cuMemsetD16_v2_params;
2772
+
2773
+ typedef struct cuMemsetD32_v2_params_st {
2774
+ CUdeviceptr dstDevice;
2775
+ unsigned int ui;
2776
+ size_t N;
2777
+ } cuMemsetD32_v2_params;
2778
+
2779
+ typedef struct cuMemsetD2D8_v2_params_st {
2780
+ CUdeviceptr dstDevice;
2781
+ size_t dstPitch;
2782
+ unsigned char uc;
2783
+ size_t Width;
2784
+ size_t Height;
2785
+ } cuMemsetD2D8_v2_params;
2786
+
2787
+ typedef struct cuMemsetD2D16_v2_params_st {
2788
+ CUdeviceptr dstDevice;
2789
+ size_t dstPitch;
2790
+ unsigned short us;
2791
+ size_t Width;
2792
+ size_t Height;
2793
+ } cuMemsetD2D16_v2_params;
2794
+
2795
+ typedef struct cuMemsetD2D32_v2_params_st {
2796
+ CUdeviceptr dstDevice;
2797
+ size_t dstPitch;
2798
+ unsigned int ui;
2799
+ size_t Width;
2800
+ size_t Height;
2801
+ } cuMemsetD2D32_v2_params;
2802
+
2803
+ typedef struct cuMemcpy_params_st {
2804
+ CUdeviceptr dst;
2805
+ CUdeviceptr src;
2806
+ size_t ByteCount;
2807
+ } cuMemcpy_params;
2808
+
2809
+ typedef struct cuMemcpyAsync_params_st {
2810
+ CUdeviceptr dst;
2811
+ CUdeviceptr src;
2812
+ size_t ByteCount;
2813
+ CUstream hStream;
2814
+ } cuMemcpyAsync_params;
2815
+
2816
+ typedef struct cuMemcpyPeer_params_st {
2817
+ CUdeviceptr dstDevice;
2818
+ CUcontext dstContext;
2819
+ CUdeviceptr srcDevice;
2820
+ CUcontext srcContext;
2821
+ size_t ByteCount;
2822
+ } cuMemcpyPeer_params;
2823
+
2824
+ typedef struct cuMemcpyPeerAsync_params_st {
2825
+ CUdeviceptr dstDevice;
2826
+ CUcontext dstContext;
2827
+ CUdeviceptr srcDevice;
2828
+ CUcontext srcContext;
2829
+ size_t ByteCount;
2830
+ CUstream hStream;
2831
+ } cuMemcpyPeerAsync_params;
2832
+
2833
+ typedef struct cuMemcpy3DPeer_params_st {
2834
+ const CUDA_MEMCPY3D_PEER *pCopy;
2835
+ } cuMemcpy3DPeer_params;
2836
+
2837
+ typedef struct cuMemcpy3DPeerAsync_params_st {
2838
+ const CUDA_MEMCPY3D_PEER *pCopy;
2839
+ CUstream hStream;
2840
+ } cuMemcpy3DPeerAsync_params;
2841
+
2842
+ typedef struct cuMemsetD8Async_params_st {
2843
+ CUdeviceptr dstDevice;
2844
+ unsigned char uc;
2845
+ size_t N;
2846
+ CUstream hStream;
2847
+ } cuMemsetD8Async_params;
2848
+
2849
+ typedef struct cuMemsetD16Async_params_st {
2850
+ CUdeviceptr dstDevice;
2851
+ unsigned short us;
2852
+ size_t N;
2853
+ CUstream hStream;
2854
+ } cuMemsetD16Async_params;
2855
+
2856
+ typedef struct cuMemsetD32Async_params_st {
2857
+ CUdeviceptr dstDevice;
2858
+ unsigned int ui;
2859
+ size_t N;
2860
+ CUstream hStream;
2861
+ } cuMemsetD32Async_params;
2862
+
2863
+ typedef struct cuMemsetD2D8Async_params_st {
2864
+ CUdeviceptr dstDevice;
2865
+ size_t dstPitch;
2866
+ unsigned char uc;
2867
+ size_t Width;
2868
+ size_t Height;
2869
+ CUstream hStream;
2870
+ } cuMemsetD2D8Async_params;
2871
+
2872
+ typedef struct cuMemsetD2D16Async_params_st {
2873
+ CUdeviceptr dstDevice;
2874
+ size_t dstPitch;
2875
+ unsigned short us;
2876
+ size_t Width;
2877
+ size_t Height;
2878
+ CUstream hStream;
2879
+ } cuMemsetD2D16Async_params;
2880
+
2881
+ typedef struct cuMemsetD2D32Async_params_st {
2882
+ CUdeviceptr dstDevice;
2883
+ size_t dstPitch;
2884
+ unsigned int ui;
2885
+ size_t Width;
2886
+ size_t Height;
2887
+ CUstream hStream;
2888
+ } cuMemsetD2D32Async_params;
2889
+
2890
+ typedef struct cuStreamGetPriority_params_st {
2891
+ CUstream hStream;
2892
+ int *priority;
2893
+ } cuStreamGetPriority_params;
2894
+
2895
+ typedef struct cuStreamGetId_params_st {
2896
+ CUstream hStream;
2897
+ unsigned long long *streamId;
2898
+ } cuStreamGetId_params;
2899
+
2900
+ typedef struct cuStreamGetFlags_params_st {
2901
+ CUstream hStream;
2902
+ unsigned int *flags;
2903
+ } cuStreamGetFlags_params;
2904
+
2905
+ typedef struct cuStreamGetCtx_params_st {
2906
+ CUstream hStream;
2907
+ CUcontext *pctx;
2908
+ } cuStreamGetCtx_params;
2909
+
2910
+ typedef struct cuStreamWaitEvent_params_st {
2911
+ CUstream hStream;
2912
+ CUevent hEvent;
2913
+ unsigned int Flags;
2914
+ } cuStreamWaitEvent_params;
2915
+
2916
+ typedef struct cuStreamAddCallback_params_st {
2917
+ CUstream hStream;
2918
+ CUstreamCallback callback;
2919
+ void *userData;
2920
+ unsigned int flags;
2921
+ } cuStreamAddCallback_params;
2922
+
2923
+ typedef struct cuStreamAttachMemAsync_params_st {
2924
+ CUstream hStream;
2925
+ CUdeviceptr dptr;
2926
+ size_t length;
2927
+ unsigned int flags;
2928
+ } cuStreamAttachMemAsync_params;
2929
+
2930
+ typedef struct cuStreamQuery_params_st {
2931
+ CUstream hStream;
2932
+ } cuStreamQuery_params;
2933
+
2934
+ typedef struct cuStreamSynchronize_params_st {
2935
+ CUstream hStream;
2936
+ } cuStreamSynchronize_params;
2937
+
2938
+ typedef struct cuEventRecord_params_st {
2939
+ CUevent hEvent;
2940
+ CUstream hStream;
2941
+ } cuEventRecord_params;
2942
+
2943
+ typedef struct cuEventRecordWithFlags_params_st {
2944
+ CUevent hEvent;
2945
+ CUstream hStream;
2946
+ unsigned int flags;
2947
+ } cuEventRecordWithFlags_params;
2948
+
2949
+ typedef struct cuLaunchKernel_params_st {
2950
+ CUfunction f;
2951
+ unsigned int gridDimX;
2952
+ unsigned int gridDimY;
2953
+ unsigned int gridDimZ;
2954
+ unsigned int blockDimX;
2955
+ unsigned int blockDimY;
2956
+ unsigned int blockDimZ;
2957
+ unsigned int sharedMemBytes;
2958
+ CUstream hStream;
2959
+ void **kernelParams;
2960
+ void **extra;
2961
+ } cuLaunchKernel_params;
2962
+
2963
+ typedef struct cuLaunchKernelEx_params_st {
2964
+ const CUlaunchConfig *config;
2965
+ CUfunction f;
2966
+ void **kernelParams;
2967
+ void **extra;
2968
+ } cuLaunchKernelEx_params;
2969
+
2970
+ typedef struct cuLaunchHostFunc_params_st {
2971
+ CUstream hStream;
2972
+ CUhostFn fn;
2973
+ void *userData;
2974
+ } cuLaunchHostFunc_params;
2975
+
2976
+ typedef struct cuGraphicsMapResources_params_st {
2977
+ unsigned int count;
2978
+ CUgraphicsResource *resources;
2979
+ CUstream hStream;
2980
+ } cuGraphicsMapResources_params;
2981
+
2982
+ typedef struct cuGraphicsUnmapResources_params_st {
2983
+ unsigned int count;
2984
+ CUgraphicsResource *resources;
2985
+ CUstream hStream;
2986
+ } cuGraphicsUnmapResources_params;
2987
+
2988
+ typedef struct cuStreamWriteValue32_params_st {
2989
+ CUstream stream;
2990
+ CUdeviceptr addr;
2991
+ cuuint32_t value;
2992
+ unsigned int flags;
2993
+ } cuStreamWriteValue32_params;
2994
+
2995
+ typedef struct cuStreamWaitValue32_params_st {
2996
+ CUstream stream;
2997
+ CUdeviceptr addr;
2998
+ cuuint32_t value;
2999
+ unsigned int flags;
3000
+ } cuStreamWaitValue32_params;
3001
+
3002
+ typedef struct cuStreamWriteValue64_params_st {
3003
+ CUstream stream;
3004
+ CUdeviceptr addr;
3005
+ cuuint64_t value;
3006
+ unsigned int flags;
3007
+ } cuStreamWriteValue64_params;
3008
+
3009
+ typedef struct cuStreamWaitValue64_params_st {
3010
+ CUstream stream;
3011
+ CUdeviceptr addr;
3012
+ cuuint64_t value;
3013
+ unsigned int flags;
3014
+ } cuStreamWaitValue64_params;
3015
+
3016
+ typedef struct cuStreamBatchMemOp_params_st {
3017
+ CUstream stream;
3018
+ unsigned int count;
3019
+ CUstreamBatchMemOpParams *paramArray;
3020
+ unsigned int flags;
3021
+ } cuStreamBatchMemOp_params;
3022
+
3023
+ typedef struct cuStreamWriteValue32_ptsz_params_st {
3024
+ CUstream stream;
3025
+ CUdeviceptr addr;
3026
+ cuuint32_t value;
3027
+ unsigned int flags;
3028
+ } cuStreamWriteValue32_ptsz_params;
3029
+
3030
+ typedef struct cuStreamWaitValue32_ptsz_params_st {
3031
+ CUstream stream;
3032
+ CUdeviceptr addr;
3033
+ cuuint32_t value;
3034
+ unsigned int flags;
3035
+ } cuStreamWaitValue32_ptsz_params;
3036
+
3037
+ typedef struct cuStreamWriteValue64_ptsz_params_st {
3038
+ CUstream stream;
3039
+ CUdeviceptr addr;
3040
+ cuuint64_t value;
3041
+ unsigned int flags;
3042
+ } cuStreamWriteValue64_ptsz_params;
3043
+
3044
+ typedef struct cuStreamWaitValue64_ptsz_params_st {
3045
+ CUstream stream;
3046
+ CUdeviceptr addr;
3047
+ cuuint64_t value;
3048
+ unsigned int flags;
3049
+ } cuStreamWaitValue64_ptsz_params;
3050
+
3051
+ typedef struct cuStreamBatchMemOp_ptsz_params_st {
3052
+ CUstream stream;
3053
+ unsigned int count;
3054
+ CUstreamBatchMemOpParams *paramArray;
3055
+ unsigned int flags;
3056
+ } cuStreamBatchMemOp_ptsz_params;
3057
+
3058
+ typedef struct cuStreamWriteValue32_v2_params_st {
3059
+ CUstream stream;
3060
+ CUdeviceptr addr;
3061
+ cuuint32_t value;
3062
+ unsigned int flags;
3063
+ } cuStreamWriteValue32_v2_params;
3064
+
3065
+ typedef struct cuStreamWaitValue32_v2_params_st {
3066
+ CUstream stream;
3067
+ CUdeviceptr addr;
3068
+ cuuint32_t value;
3069
+ unsigned int flags;
3070
+ } cuStreamWaitValue32_v2_params;
3071
+
3072
+ typedef struct cuStreamWriteValue64_v2_params_st {
3073
+ CUstream stream;
3074
+ CUdeviceptr addr;
3075
+ cuuint64_t value;
3076
+ unsigned int flags;
3077
+ } cuStreamWriteValue64_v2_params;
3078
+
3079
+ typedef struct cuStreamWaitValue64_v2_params_st {
3080
+ CUstream stream;
3081
+ CUdeviceptr addr;
3082
+ cuuint64_t value;
3083
+ unsigned int flags;
3084
+ } cuStreamWaitValue64_v2_params;
3085
+
3086
+ typedef struct cuStreamBatchMemOp_v2_params_st {
3087
+ CUstream stream;
3088
+ unsigned int count;
3089
+ CUstreamBatchMemOpParams *paramArray;
3090
+ unsigned int flags;
3091
+ } cuStreamBatchMemOp_v2_params;
3092
+
3093
+ typedef struct cuMemPrefetchAsync_params_st {
3094
+ CUdeviceptr devPtr;
3095
+ size_t count;
3096
+ CUdevice dstDevice;
3097
+ CUstream hStream;
3098
+ } cuMemPrefetchAsync_params;
3099
+
3100
+ typedef struct cuLaunchCooperativeKernel_params_st {
3101
+ CUfunction f;
3102
+ unsigned int gridDimX;
3103
+ unsigned int gridDimY;
3104
+ unsigned int gridDimZ;
3105
+ unsigned int blockDimX;
3106
+ unsigned int blockDimY;
3107
+ unsigned int blockDimZ;
3108
+ unsigned int sharedMemBytes;
3109
+ CUstream hStream;
3110
+ void **kernelParams;
3111
+ } cuLaunchCooperativeKernel_params;
3112
+
3113
+ typedef struct cuSignalExternalSemaphoresAsync_params_st {
3114
+ const CUexternalSemaphore *extSemArray;
3115
+ const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS *paramsArray;
3116
+ unsigned int numExtSems;
3117
+ CUstream stream;
3118
+ } cuSignalExternalSemaphoresAsync_params;
3119
+
3120
+ typedef struct cuWaitExternalSemaphoresAsync_params_st {
3121
+ const CUexternalSemaphore *extSemArray;
3122
+ const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS *paramsArray;
3123
+ unsigned int numExtSems;
3124
+ CUstream stream;
3125
+ } cuWaitExternalSemaphoresAsync_params;
3126
+
3127
+ typedef struct cuStreamBeginCapture_params_st {
3128
+ CUstream hStream;
3129
+ } cuStreamBeginCapture_params;
3130
+
3131
+ typedef struct cuStreamBeginCapture_ptsz_params_st {
3132
+ CUstream hStream;
3133
+ } cuStreamBeginCapture_ptsz_params;
3134
+
3135
+ typedef struct cuStreamBeginCapture_v2_params_st {
3136
+ CUstream hStream;
3137
+ CUstreamCaptureMode mode;
3138
+ } cuStreamBeginCapture_v2_params;
3139
+
3140
+ typedef struct cuStreamEndCapture_params_st {
3141
+ CUstream hStream;
3142
+ CUgraph *phGraph;
3143
+ } cuStreamEndCapture_params;
3144
+
3145
+ typedef struct cuStreamIsCapturing_params_st {
3146
+ CUstream hStream;
3147
+ CUstreamCaptureStatus *captureStatus;
3148
+ } cuStreamIsCapturing_params;
3149
+
3150
+ typedef struct cuStreamGetCaptureInfo_params_st {
3151
+ CUstream hStream;
3152
+ CUstreamCaptureStatus *captureStatus_out;
3153
+ cuuint64_t *id_out;
3154
+ } cuStreamGetCaptureInfo_params;
3155
+
3156
+ typedef struct cuStreamGetCaptureInfo_ptsz_params_st {
3157
+ CUstream hStream;
3158
+ CUstreamCaptureStatus *captureStatus_out;
3159
+ cuuint64_t *id_out;
3160
+ } cuStreamGetCaptureInfo_ptsz_params;
3161
+
3162
+ typedef struct cuStreamGetCaptureInfo_v2_params_st {
3163
+ CUstream hStream;
3164
+ CUstreamCaptureStatus *captureStatus_out;
3165
+ cuuint64_t *id_out;
3166
+ CUgraph *graph_out;
3167
+ const CUgraphNode **dependencies_out;
3168
+ size_t *numDependencies_out;
3169
+ } cuStreamGetCaptureInfo_v2_params;
3170
+
3171
+ typedef struct cuGraphAddKernelNode_params_st {
3172
+ CUgraphNode *phGraphNode;
3173
+ CUgraph hGraph;
3174
+ const CUgraphNode *dependencies;
3175
+ size_t numDependencies;
3176
+ const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams;
3177
+ } cuGraphAddKernelNode_params;
3178
+
3179
+ typedef struct cuGraphKernelNodeGetParams_params_st {
3180
+ CUgraphNode hNode;
3181
+ CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams;
3182
+ } cuGraphKernelNodeGetParams_params;
3183
+
3184
+ typedef struct cuGraphKernelNodeSetParams_params_st {
3185
+ CUgraphNode hNode;
3186
+ const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams;
3187
+ } cuGraphKernelNodeSetParams_params;
3188
+
3189
+ typedef struct cuGraphExecKernelNodeSetParams_params_st {
3190
+ CUgraphExec hGraphExec;
3191
+ CUgraphNode hNode;
3192
+ const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams;
3193
+ } cuGraphExecKernelNodeSetParams_params;
3194
+
3195
+ typedef struct cuGraphInstantiateWithParams_params_st {
3196
+ CUgraphExec *phGraphExec;
3197
+ CUgraph hGraph;
3198
+ CUDA_GRAPH_INSTANTIATE_PARAMS *instantiateParams;
3199
+ } cuGraphInstantiateWithParams_params;
3200
+
3201
+ typedef struct cuGraphExecUpdate_params_st {
3202
+ CUgraphExec hGraphExec;
3203
+ CUgraph hGraph;
3204
+ CUgraphNode *hErrorNode_out;
3205
+ CUgraphExecUpdateResult *updateResult_out;
3206
+ } cuGraphExecUpdate_params;
3207
+
3208
+ typedef struct cuGraphUpload_params_st {
3209
+ CUgraphExec hGraph;
3210
+ CUstream hStream;
3211
+ } cuGraphUpload_params;
3212
+
3213
+ typedef struct cuGraphLaunch_params_st {
3214
+ CUgraphExec hGraph;
3215
+ CUstream hStream;
3216
+ } cuGraphLaunch_params;
3217
+
3218
+ typedef struct cuStreamCopyAttributes_params_st {
3219
+ CUstream dstStream;
3220
+ CUstream srcStream;
3221
+ } cuStreamCopyAttributes_params;
3222
+
3223
+ typedef struct cuStreamGetAttribute_params_st {
3224
+ CUstream hStream;
3225
+ CUstreamAttrID attr;
3226
+ CUstreamAttrValue *value;
3227
+ } cuStreamGetAttribute_params;
3228
+
3229
+ typedef struct cuStreamSetAttribute_params_st {
3230
+ CUstream hStream;
3231
+ CUstreamAttrID attr;
3232
+ const CUstreamAttrValue *param;
3233
+ } cuStreamSetAttribute_params;
3234
+
3235
+ typedef struct cuIpcOpenMemHandle_params_st {
3236
+ CUdeviceptr *pdptr;
3237
+ CUipcMemHandle handle;
3238
+ unsigned int Flags;
3239
+ } cuIpcOpenMemHandle_params;
3240
+
3241
+ typedef struct cuGraphInstantiate_params_st {
3242
+ CUgraphExec *phGraphExec;
3243
+ CUgraph hGraph;
3244
+ CUgraphNode *phErrorNode;
3245
+ char *logBuffer;
3246
+ size_t bufferSize;
3247
+ } cuGraphInstantiate_params;
3248
+
3249
+ typedef struct cuGraphInstantiate_v2_params_st {
3250
+ CUgraphExec *phGraphExec;
3251
+ CUgraph hGraph;
3252
+ CUgraphNode *phErrorNode;
3253
+ char *logBuffer;
3254
+ size_t bufferSize;
3255
+ } cuGraphInstantiate_v2_params;
3256
+
3257
+ typedef struct cuMemMapArrayAsync_params_st {
3258
+ CUarrayMapInfo *mapInfoList;
3259
+ unsigned int count;
3260
+ CUstream hStream;
3261
+ } cuMemMapArrayAsync_params;
3262
+
3263
+ typedef struct cuMemFreeAsync_params_st {
3264
+ CUdeviceptr dptr;
3265
+ CUstream hStream;
3266
+ } cuMemFreeAsync_params;
3267
+
3268
+ typedef struct cuMemAllocAsync_params_st {
3269
+ CUdeviceptr *dptr;
3270
+ size_t bytesize;
3271
+ CUstream hStream;
3272
+ } cuMemAllocAsync_params;
3273
+
3274
+ typedef struct cuMemAllocFromPoolAsync_params_st {
3275
+ CUdeviceptr *dptr;
3276
+ size_t bytesize;
3277
+ CUmemoryPool pool;
3278
+ CUstream hStream;
3279
+ } cuMemAllocFromPoolAsync_params;
3280
+
3281
+ typedef struct cuStreamUpdateCaptureDependencies_params_st {
3282
+ CUstream hStream;
3283
+ CUgraphNode *dependencies;
3284
+ size_t numDependencies;
3285
+ unsigned int flags;
3286
+ } cuStreamUpdateCaptureDependencies_params;
3287
+
3288
+ typedef struct cuGetProcAddress_params_st {
3289
+ const char *symbol;
3290
+ void **pfn;
3291
+ int cudaVersion;
3292
+ cuuint64_t flags;
3293
+ } cuGetProcAddress_params;
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_runtime_api_meta.h ADDED
@@ -0,0 +1,2126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // CUDA public interface, for type definitions and api function prototypes
4
+ #include "cuda_runtime_api.h"
5
+
6
+ // *************************************************************************
7
+ // Definitions of structs to hold parameters for each function
8
+ // *************************************************************************
9
+
10
+ // Currently used parameter trace structures
11
+ typedef struct cudaDeviceSetLimit_v3020_params_st {
12
+ enum cudaLimit limit;
13
+ size_t value;
14
+ } cudaDeviceSetLimit_v3020_params;
15
+
16
+ typedef struct cudaDeviceGetLimit_v3020_params_st {
17
+ size_t *pValue;
18
+ enum cudaLimit limit;
19
+ } cudaDeviceGetLimit_v3020_params;
20
+
21
+ typedef struct cudaDeviceGetTexture1DLinearMaxWidth_v11010_params_st {
22
+ size_t *maxWidthInElements;
23
+ const struct cudaChannelFormatDesc *fmtDesc;
24
+ int device;
25
+ } cudaDeviceGetTexture1DLinearMaxWidth_v11010_params;
26
+
27
+ typedef struct cudaDeviceGetCacheConfig_v3020_params_st {
28
+ enum cudaFuncCache *pCacheConfig;
29
+ } cudaDeviceGetCacheConfig_v3020_params;
30
+
31
+ typedef struct cudaDeviceGetStreamPriorityRange_v5050_params_st {
32
+ int *leastPriority;
33
+ int *greatestPriority;
34
+ } cudaDeviceGetStreamPriorityRange_v5050_params;
35
+
36
+ typedef struct cudaDeviceSetCacheConfig_v3020_params_st {
37
+ enum cudaFuncCache cacheConfig;
38
+ } cudaDeviceSetCacheConfig_v3020_params;
39
+
40
+ typedef struct cudaDeviceGetSharedMemConfig_v4020_params_st {
41
+ enum cudaSharedMemConfig *pConfig;
42
+ } cudaDeviceGetSharedMemConfig_v4020_params;
43
+
44
+ typedef struct cudaDeviceSetSharedMemConfig_v4020_params_st {
45
+ enum cudaSharedMemConfig config;
46
+ } cudaDeviceSetSharedMemConfig_v4020_params;
47
+
48
+ typedef struct cudaDeviceGetByPCIBusId_v4010_params_st {
49
+ int *device;
50
+ const char *pciBusId;
51
+ } cudaDeviceGetByPCIBusId_v4010_params;
52
+
53
+ typedef struct cudaDeviceGetPCIBusId_v4010_params_st {
54
+ char *pciBusId;
55
+ int len;
56
+ int device;
57
+ } cudaDeviceGetPCIBusId_v4010_params;
58
+
59
+ typedef struct cudaIpcGetEventHandle_v4010_params_st {
60
+ cudaIpcEventHandle_t *handle;
61
+ cudaEvent_t event;
62
+ } cudaIpcGetEventHandle_v4010_params;
63
+
64
+ typedef struct cudaIpcOpenEventHandle_v4010_params_st {
65
+ cudaEvent_t *event;
66
+ cudaIpcEventHandle_t handle;
67
+ } cudaIpcOpenEventHandle_v4010_params;
68
+
69
+ typedef struct cudaIpcGetMemHandle_v4010_params_st {
70
+ cudaIpcMemHandle_t *handle;
71
+ void *devPtr;
72
+ } cudaIpcGetMemHandle_v4010_params;
73
+
74
+ typedef struct cudaIpcOpenMemHandle_v4010_params_st {
75
+ void **devPtr;
76
+ cudaIpcMemHandle_t handle;
77
+ unsigned int flags;
78
+ } cudaIpcOpenMemHandle_v4010_params;
79
+
80
+ typedef struct cudaIpcCloseMemHandle_v4010_params_st {
81
+ void *devPtr;
82
+ } cudaIpcCloseMemHandle_v4010_params;
83
+
84
+ typedef struct cudaDeviceFlushGPUDirectRDMAWrites_v11030_params_st {
85
+ enum cudaFlushGPUDirectRDMAWritesTarget target;
86
+ enum cudaFlushGPUDirectRDMAWritesScope scope;
87
+ } cudaDeviceFlushGPUDirectRDMAWrites_v11030_params;
88
+
89
+ typedef struct cudaGetErrorName_v6050_params_st {
90
+ cudaError_t error;
91
+ } cudaGetErrorName_v6050_params;
92
+
93
+ typedef struct cudaGetErrorString_v3020_params_st {
94
+ cudaError_t error;
95
+ } cudaGetErrorString_v3020_params;
96
+
97
+ typedef struct cudaGetDeviceCount_v3020_params_st {
98
+ int *count;
99
+ } cudaGetDeviceCount_v3020_params;
100
+
101
+ typedef struct cudaGetDeviceProperties_v2_v12000_params_st {
102
+ struct cudaDeviceProp *prop;
103
+ int device;
104
+ } cudaGetDeviceProperties_v2_v12000_params;
105
+
106
+ typedef struct cudaDeviceGetAttribute_v5000_params_st {
107
+ int *value;
108
+ enum cudaDeviceAttr attr;
109
+ int device;
110
+ } cudaDeviceGetAttribute_v5000_params;
111
+
112
+ typedef struct cudaDeviceGetDefaultMemPool_v11020_params_st {
113
+ cudaMemPool_t *memPool;
114
+ int device;
115
+ } cudaDeviceGetDefaultMemPool_v11020_params;
116
+
117
+ typedef struct cudaDeviceSetMemPool_v11020_params_st {
118
+ int device;
119
+ cudaMemPool_t memPool;
120
+ } cudaDeviceSetMemPool_v11020_params;
121
+
122
+ typedef struct cudaDeviceGetMemPool_v11020_params_st {
123
+ cudaMemPool_t *memPool;
124
+ int device;
125
+ } cudaDeviceGetMemPool_v11020_params;
126
+
127
+ typedef struct cudaDeviceGetNvSciSyncAttributes_v10020_params_st {
128
+ void *nvSciSyncAttrList;
129
+ int device;
130
+ int flags;
131
+ } cudaDeviceGetNvSciSyncAttributes_v10020_params;
132
+
133
+ typedef struct cudaDeviceGetP2PAttribute_v8000_params_st {
134
+ int *value;
135
+ enum cudaDeviceP2PAttr attr;
136
+ int srcDevice;
137
+ int dstDevice;
138
+ } cudaDeviceGetP2PAttribute_v8000_params;
139
+
140
+ typedef struct cudaChooseDevice_v3020_params_st {
141
+ int *device;
142
+ const struct cudaDeviceProp *prop;
143
+ } cudaChooseDevice_v3020_params;
144
+
145
+ typedef struct cudaInitDevice_v12000_params_st {
146
+ int device;
147
+ unsigned int deviceFlags;
148
+ unsigned int flags;
149
+ } cudaInitDevice_v12000_params;
150
+
151
+ typedef struct cudaSetDevice_v3020_params_st {
152
+ int device;
153
+ } cudaSetDevice_v3020_params;
154
+
155
+ typedef struct cudaGetDevice_v3020_params_st {
156
+ int *device;
157
+ } cudaGetDevice_v3020_params;
158
+
159
+ typedef struct cudaSetValidDevices_v3020_params_st {
160
+ int *device_arr;
161
+ int len;
162
+ } cudaSetValidDevices_v3020_params;
163
+
164
+ typedef struct cudaSetDeviceFlags_v3020_params_st {
165
+ unsigned int flags;
166
+ } cudaSetDeviceFlags_v3020_params;
167
+
168
+ typedef struct cudaGetDeviceFlags_v7000_params_st {
169
+ unsigned int *flags;
170
+ } cudaGetDeviceFlags_v7000_params;
171
+
172
+ typedef struct cudaStreamCreate_v3020_params_st {
173
+ cudaStream_t *pStream;
174
+ } cudaStreamCreate_v3020_params;
175
+
176
+ typedef struct cudaStreamCreateWithFlags_v5000_params_st {
177
+ cudaStream_t *pStream;
178
+ unsigned int flags;
179
+ } cudaStreamCreateWithFlags_v5000_params;
180
+
181
+ typedef struct cudaStreamCreateWithPriority_v5050_params_st {
182
+ cudaStream_t *pStream;
183
+ unsigned int flags;
184
+ int priority;
185
+ } cudaStreamCreateWithPriority_v5050_params;
186
+
187
+ typedef struct cudaStreamGetPriority_ptsz_v7000_params_st {
188
+ cudaStream_t hStream;
189
+ int *priority;
190
+ } cudaStreamGetPriority_ptsz_v7000_params;
191
+
192
+ typedef struct cudaStreamGetFlags_ptsz_v7000_params_st {
193
+ cudaStream_t hStream;
194
+ unsigned int *flags;
195
+ } cudaStreamGetFlags_ptsz_v7000_params;
196
+
197
+ typedef struct cudaStreamGetId_ptsz_v12000_params_st {
198
+ cudaStream_t hStream;
199
+ unsigned long long *streamId;
200
+ } cudaStreamGetId_ptsz_v12000_params;
201
+
202
+ typedef struct cudaStreamCopyAttributes_ptsz_v11000_params_st {
203
+ cudaStream_t dst;
204
+ cudaStream_t src;
205
+ } cudaStreamCopyAttributes_ptsz_v11000_params;
206
+
207
+ typedef struct cudaStreamGetAttribute_ptsz_v11000_params_st {
208
+ cudaStream_t hStream;
209
+ cudaStreamAttrID attr;
210
+ cudaStreamAttrValue *value_out;
211
+ } cudaStreamGetAttribute_ptsz_v11000_params;
212
+
213
+ typedef struct cudaStreamSetAttribute_ptsz_v11000_params_st {
214
+ cudaStream_t hStream;
215
+ cudaStreamAttrID attr;
216
+ const cudaStreamAttrValue *value;
217
+ } cudaStreamSetAttribute_ptsz_v11000_params;
218
+
219
+ typedef struct cudaStreamDestroy_v5050_params_st {
220
+ cudaStream_t stream;
221
+ } cudaStreamDestroy_v5050_params;
222
+
223
+ typedef struct cudaStreamWaitEvent_ptsz_v7000_params_st {
224
+ cudaStream_t stream;
225
+ cudaEvent_t event;
226
+ unsigned int flags;
227
+ } cudaStreamWaitEvent_ptsz_v7000_params;
228
+
229
+ typedef struct cudaStreamAddCallback_ptsz_v7000_params_st {
230
+ cudaStream_t stream;
231
+ cudaStreamCallback_t callback;
232
+ void *userData;
233
+ unsigned int flags;
234
+ } cudaStreamAddCallback_ptsz_v7000_params;
235
+
236
+ typedef struct cudaStreamSynchronize_ptsz_v7000_params_st {
237
+ cudaStream_t stream;
238
+ } cudaStreamSynchronize_ptsz_v7000_params;
239
+
240
+ typedef struct cudaStreamQuery_ptsz_v7000_params_st {
241
+ cudaStream_t stream;
242
+ } cudaStreamQuery_ptsz_v7000_params;
243
+
244
+ typedef struct cudaStreamAttachMemAsync_ptsz_v7000_params_st {
245
+ cudaStream_t stream;
246
+ void *devPtr;
247
+ size_t length;
248
+ unsigned int flags;
249
+ } cudaStreamAttachMemAsync_ptsz_v7000_params;
250
+
251
+ typedef struct cudaStreamBeginCapture_ptsz_v10000_params_st {
252
+ cudaStream_t stream;
253
+ enum cudaStreamCaptureMode mode;
254
+ } cudaStreamBeginCapture_ptsz_v10000_params;
255
+
256
+ typedef struct cudaThreadExchangeStreamCaptureMode_v10010_params_st {
257
+ enum cudaStreamCaptureMode *mode;
258
+ } cudaThreadExchangeStreamCaptureMode_v10010_params;
259
+
260
+ typedef struct cudaStreamEndCapture_ptsz_v10000_params_st {
261
+ cudaStream_t stream;
262
+ cudaGraph_t *pGraph;
263
+ } cudaStreamEndCapture_ptsz_v10000_params;
264
+
265
+ typedef struct cudaStreamIsCapturing_ptsz_v10000_params_st {
266
+ cudaStream_t stream;
267
+ enum cudaStreamCaptureStatus *pCaptureStatus;
268
+ } cudaStreamIsCapturing_ptsz_v10000_params;
269
+
270
+ typedef struct cudaStreamGetCaptureInfo_v2_ptsz_v11030_params_st {
271
+ cudaStream_t stream;
272
+ enum cudaStreamCaptureStatus *captureStatus_out;
273
+ unsigned long long *id_out;
274
+ cudaGraph_t *graph_out;
275
+ const cudaGraphNode_t **dependencies_out;
276
+ size_t *numDependencies_out;
277
+ } cudaStreamGetCaptureInfo_v2_ptsz_v11030_params;
278
+
279
+ typedef struct cudaStreamUpdateCaptureDependencies_v11030_params_st {
280
+ cudaStream_t stream;
281
+ cudaGraphNode_t *dependencies;
282
+ size_t numDependencies;
283
+ unsigned int flags;
284
+ } cudaStreamUpdateCaptureDependencies_v11030_params;
285
+
286
+ typedef struct cudaEventCreate_v3020_params_st {
287
+ cudaEvent_t *event;
288
+ } cudaEventCreate_v3020_params;
289
+
290
+ typedef struct cudaEventCreateWithFlags_v3020_params_st {
291
+ cudaEvent_t *event;
292
+ unsigned int flags;
293
+ } cudaEventCreateWithFlags_v3020_params;
294
+
295
+ typedef struct cudaEventRecord_ptsz_v7000_params_st {
296
+ cudaEvent_t event;
297
+ cudaStream_t stream;
298
+ } cudaEventRecord_ptsz_v7000_params;
299
+
300
+ typedef struct cudaEventRecordWithFlags_ptsz_v11010_params_st {
301
+ cudaEvent_t event;
302
+ cudaStream_t stream;
303
+ unsigned int flags;
304
+ } cudaEventRecordWithFlags_ptsz_v11010_params;
305
+
306
+ typedef struct cudaEventQuery_v3020_params_st {
307
+ cudaEvent_t event;
308
+ } cudaEventQuery_v3020_params;
309
+
310
+ typedef struct cudaEventSynchronize_v3020_params_st {
311
+ cudaEvent_t event;
312
+ } cudaEventSynchronize_v3020_params;
313
+
314
+ typedef struct cudaEventDestroy_v3020_params_st {
315
+ cudaEvent_t event;
316
+ } cudaEventDestroy_v3020_params;
317
+
318
+ typedef struct cudaEventElapsedTime_v3020_params_st {
319
+ float *ms;
320
+ cudaEvent_t start;
321
+ cudaEvent_t end;
322
+ } cudaEventElapsedTime_v3020_params;
323
+
324
+ typedef struct cudaImportExternalMemory_v10000_params_st {
325
+ cudaExternalMemory_t *extMem_out;
326
+ const struct cudaExternalMemoryHandleDesc *memHandleDesc;
327
+ } cudaImportExternalMemory_v10000_params;
328
+
329
+ typedef struct cudaExternalMemoryGetMappedBuffer_v10000_params_st {
330
+ void **devPtr;
331
+ cudaExternalMemory_t extMem;
332
+ const struct cudaExternalMemoryBufferDesc *bufferDesc;
333
+ } cudaExternalMemoryGetMappedBuffer_v10000_params;
334
+
335
+ typedef struct cudaExternalMemoryGetMappedMipmappedArray_v10000_params_st {
336
+ cudaMipmappedArray_t *mipmap;
337
+ cudaExternalMemory_t extMem;
338
+ const struct cudaExternalMemoryMipmappedArrayDesc *mipmapDesc;
339
+ } cudaExternalMemoryGetMappedMipmappedArray_v10000_params;
340
+
341
+ typedef struct cudaDestroyExternalMemory_v10000_params_st {
342
+ cudaExternalMemory_t extMem;
343
+ } cudaDestroyExternalMemory_v10000_params;
344
+
345
+ typedef struct cudaImportExternalSemaphore_v10000_params_st {
346
+ cudaExternalSemaphore_t *extSem_out;
347
+ const struct cudaExternalSemaphoreHandleDesc *semHandleDesc;
348
+ } cudaImportExternalSemaphore_v10000_params;
349
+
350
+ typedef struct cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020_params_st {
351
+ const cudaExternalSemaphore_t *extSemArray;
352
+ const struct cudaExternalSemaphoreSignalParams *paramsArray;
353
+ unsigned int numExtSems;
354
+ cudaStream_t stream;
355
+ } cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020_params;
356
+
357
+ typedef struct cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020_params_st {
358
+ const cudaExternalSemaphore_t *extSemArray;
359
+ const struct cudaExternalSemaphoreWaitParams *paramsArray;
360
+ unsigned int numExtSems;
361
+ cudaStream_t stream;
362
+ } cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020_params;
363
+
364
+ typedef struct cudaDestroyExternalSemaphore_v10000_params_st {
365
+ cudaExternalSemaphore_t extSem;
366
+ } cudaDestroyExternalSemaphore_v10000_params;
367
+
368
+ typedef struct cudaLaunchKernel_ptsz_v7000_params_st {
369
+ const void *func;
370
+ dim3 gridDim;
371
+ dim3 blockDim;
372
+ void **args;
373
+ size_t sharedMem;
374
+ cudaStream_t stream;
375
+ } cudaLaunchKernel_ptsz_v7000_params;
376
+
377
+ typedef struct cudaLaunchKernelExC_ptsz_v11060_params_st {
378
+ const cudaLaunchConfig_t *config;
379
+ const void *func;
380
+ void **args;
381
+ } cudaLaunchKernelExC_ptsz_v11060_params;
382
+
383
+ typedef struct cudaLaunchCooperativeKernel_ptsz_v9000_params_st {
384
+ const void *func;
385
+ dim3 gridDim;
386
+ dim3 blockDim;
387
+ void **args;
388
+ size_t sharedMem;
389
+ cudaStream_t stream;
390
+ } cudaLaunchCooperativeKernel_ptsz_v9000_params;
391
+
392
+ typedef struct cudaLaunchCooperativeKernelMultiDevice_v9000_params_st {
393
+ struct cudaLaunchParams *launchParamsList;
394
+ unsigned int numDevices;
395
+ unsigned int flags;
396
+ } cudaLaunchCooperativeKernelMultiDevice_v9000_params;
397
+
398
+ typedef struct cudaFuncSetCacheConfig_v3020_params_st {
399
+ const void *func;
400
+ enum cudaFuncCache cacheConfig;
401
+ } cudaFuncSetCacheConfig_v3020_params;
402
+
403
+ typedef struct cudaFuncSetSharedMemConfig_v4020_params_st {
404
+ const void *func;
405
+ enum cudaSharedMemConfig config;
406
+ } cudaFuncSetSharedMemConfig_v4020_params;
407
+
408
+ typedef struct cudaFuncGetAttributes_v3020_params_st {
409
+ struct cudaFuncAttributes *attr;
410
+ const void *func;
411
+ } cudaFuncGetAttributes_v3020_params;
412
+
413
+ typedef struct cudaFuncSetAttribute_v9000_params_st {
414
+ const void *func;
415
+ enum cudaFuncAttribute attr;
416
+ int value;
417
+ } cudaFuncSetAttribute_v9000_params;
418
+
419
+ typedef struct cudaLaunchHostFunc_ptsz_v10000_params_st {
420
+ cudaStream_t stream;
421
+ cudaHostFn_t fn;
422
+ void *userData;
423
+ } cudaLaunchHostFunc_ptsz_v10000_params;
424
+
425
+ typedef struct cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050_params_st {
426
+ int *numBlocks;
427
+ const void *func;
428
+ int blockSize;
429
+ size_t dynamicSMemSize;
430
+ } cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050_params;
431
+
432
+ typedef struct cudaOccupancyAvailableDynamicSMemPerBlock_v10200_params_st {
433
+ size_t *dynamicSmemSize;
434
+ const void *func;
435
+ int numBlocks;
436
+ int blockSize;
437
+ } cudaOccupancyAvailableDynamicSMemPerBlock_v10200_params;
438
+
439
+ typedef struct cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000_params_st {
440
+ int *numBlocks;
441
+ const void *func;
442
+ int blockSize;
443
+ size_t dynamicSMemSize;
444
+ unsigned int flags;
445
+ } cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000_params;
446
+
447
+ typedef struct cudaOccupancyMaxPotentialClusterSize_v11070_params_st {
448
+ int *clusterSize;
449
+ const void *func;
450
+ const cudaLaunchConfig_t *launchConfig;
451
+ } cudaOccupancyMaxPotentialClusterSize_v11070_params;
452
+
453
+ typedef struct cudaOccupancyMaxActiveClusters_v11070_params_st {
454
+ int *numClusters;
455
+ const void *func;
456
+ const cudaLaunchConfig_t *launchConfig;
457
+ } cudaOccupancyMaxActiveClusters_v11070_params;
458
+
459
+ typedef struct cudaMallocManaged_v6000_params_st {
460
+ void **devPtr;
461
+ size_t size;
462
+ unsigned int flags;
463
+ } cudaMallocManaged_v6000_params;
464
+
465
+ typedef struct cudaMalloc_v3020_params_st {
466
+ void **devPtr;
467
+ size_t size;
468
+ } cudaMalloc_v3020_params;
469
+
470
+ typedef struct cudaMallocHost_v3020_params_st {
471
+ void **ptr;
472
+ size_t size;
473
+ } cudaMallocHost_v3020_params;
474
+
475
+ typedef struct cudaMallocPitch_v3020_params_st {
476
+ void **devPtr;
477
+ size_t *pitch;
478
+ size_t width;
479
+ size_t height;
480
+ } cudaMallocPitch_v3020_params;
481
+
482
+ typedef struct cudaMallocArray_v3020_params_st {
483
+ cudaArray_t *array;
484
+ const struct cudaChannelFormatDesc *desc;
485
+ size_t width;
486
+ size_t height;
487
+ unsigned int flags;
488
+ } cudaMallocArray_v3020_params;
489
+
490
+ typedef struct cudaFree_v3020_params_st {
491
+ void *devPtr;
492
+ } cudaFree_v3020_params;
493
+
494
+ typedef struct cudaFreeHost_v3020_params_st {
495
+ void *ptr;
496
+ } cudaFreeHost_v3020_params;
497
+
498
+ typedef struct cudaFreeArray_v3020_params_st {
499
+ cudaArray_t array;
500
+ } cudaFreeArray_v3020_params;
501
+
502
+ typedef struct cudaFreeMipmappedArray_v5000_params_st {
503
+ cudaMipmappedArray_t mipmappedArray;
504
+ } cudaFreeMipmappedArray_v5000_params;
505
+
506
+ typedef struct cudaHostAlloc_v3020_params_st {
507
+ void **pHost;
508
+ size_t size;
509
+ unsigned int flags;
510
+ } cudaHostAlloc_v3020_params;
511
+
512
+ typedef struct cudaHostRegister_v4000_params_st {
513
+ void *ptr;
514
+ size_t size;
515
+ unsigned int flags;
516
+ } cudaHostRegister_v4000_params;
517
+
518
+ typedef struct cudaHostUnregister_v4000_params_st {
519
+ void *ptr;
520
+ } cudaHostUnregister_v4000_params;
521
+
522
+ typedef struct cudaHostGetDevicePointer_v3020_params_st {
523
+ void **pDevice;
524
+ void *pHost;
525
+ unsigned int flags;
526
+ } cudaHostGetDevicePointer_v3020_params;
527
+
528
+ typedef struct cudaHostGetFlags_v3020_params_st {
529
+ unsigned int *pFlags;
530
+ void *pHost;
531
+ } cudaHostGetFlags_v3020_params;
532
+
533
+ typedef struct cudaMalloc3D_v3020_params_st {
534
+ struct cudaPitchedPtr *pitchedDevPtr;
535
+ struct cudaExtent extent;
536
+ } cudaMalloc3D_v3020_params;
537
+
538
+ typedef struct cudaMalloc3DArray_v3020_params_st {
539
+ cudaArray_t *array;
540
+ const struct cudaChannelFormatDesc *desc;
541
+ struct cudaExtent extent;
542
+ unsigned int flags;
543
+ } cudaMalloc3DArray_v3020_params;
544
+
545
+ typedef struct cudaMallocMipmappedArray_v5000_params_st {
546
+ cudaMipmappedArray_t *mipmappedArray;
547
+ const struct cudaChannelFormatDesc *desc;
548
+ struct cudaExtent extent;
549
+ unsigned int numLevels;
550
+ unsigned int flags;
551
+ } cudaMallocMipmappedArray_v5000_params;
552
+
553
+ typedef struct cudaGetMipmappedArrayLevel_v5000_params_st {
554
+ cudaArray_t *levelArray;
555
+ cudaMipmappedArray_const_t mipmappedArray;
556
+ unsigned int level;
557
+ } cudaGetMipmappedArrayLevel_v5000_params;
558
+
559
+ typedef struct cudaMemcpy3D_ptds_v7000_params_st {
560
+ const struct cudaMemcpy3DParms *p;
561
+ } cudaMemcpy3D_ptds_v7000_params;
562
+
563
+ typedef struct cudaMemcpy3DPeer_ptds_v7000_params_st {
564
+ const struct cudaMemcpy3DPeerParms *p;
565
+ } cudaMemcpy3DPeer_ptds_v7000_params;
566
+
567
+ typedef struct cudaMemcpy3DAsync_ptsz_v7000_params_st {
568
+ const struct cudaMemcpy3DParms *p;
569
+ cudaStream_t stream;
570
+ } cudaMemcpy3DAsync_ptsz_v7000_params;
571
+
572
+ typedef struct cudaMemcpy3DPeerAsync_ptsz_v7000_params_st {
573
+ const struct cudaMemcpy3DPeerParms *p;
574
+ cudaStream_t stream;
575
+ } cudaMemcpy3DPeerAsync_ptsz_v7000_params;
576
+
577
+ typedef struct cudaMemGetInfo_v3020_params_st {
578
+ size_t *free;
579
+ size_t *total;
580
+ } cudaMemGetInfo_v3020_params;
581
+
582
+ typedef struct cudaArrayGetInfo_v4010_params_st {
583
+ struct cudaChannelFormatDesc *desc;
584
+ struct cudaExtent *extent;
585
+ unsigned int *flags;
586
+ cudaArray_t array;
587
+ } cudaArrayGetInfo_v4010_params;
588
+
589
+ typedef struct cudaArrayGetPlane_v11020_params_st {
590
+ cudaArray_t *pPlaneArray;
591
+ cudaArray_t hArray;
592
+ unsigned int planeIdx;
593
+ } cudaArrayGetPlane_v11020_params;
594
+
595
+ typedef struct cudaArrayGetMemoryRequirements_v11060_params_st {
596
+ struct cudaArrayMemoryRequirements *memoryRequirements;
597
+ cudaArray_t array;
598
+ int device;
599
+ } cudaArrayGetMemoryRequirements_v11060_params;
600
+
601
+ typedef struct cudaMipmappedArrayGetMemoryRequirements_v11060_params_st {
602
+ struct cudaArrayMemoryRequirements *memoryRequirements;
603
+ cudaMipmappedArray_t mipmap;
604
+ int device;
605
+ } cudaMipmappedArrayGetMemoryRequirements_v11060_params;
606
+
607
+ typedef struct cudaArrayGetSparseProperties_v11010_params_st {
608
+ struct cudaArraySparseProperties *sparseProperties;
609
+ cudaArray_t array;
610
+ } cudaArrayGetSparseProperties_v11010_params;
611
+
612
+ typedef struct cudaMipmappedArrayGetSparseProperties_v11010_params_st {
613
+ struct cudaArraySparseProperties *sparseProperties;
614
+ cudaMipmappedArray_t mipmap;
615
+ } cudaMipmappedArrayGetSparseProperties_v11010_params;
616
+
617
+ typedef struct cudaMemcpy_ptds_v7000_params_st {
618
+ void *dst;
619
+ const void *src;
620
+ size_t count;
621
+ enum cudaMemcpyKind kind;
622
+ } cudaMemcpy_ptds_v7000_params;
623
+
624
+ typedef struct cudaMemcpyPeer_v4000_params_st {
625
+ void *dst;
626
+ int dstDevice;
627
+ const void *src;
628
+ int srcDevice;
629
+ size_t count;
630
+ } cudaMemcpyPeer_v4000_params;
631
+
632
+ typedef struct cudaMemcpy2D_ptds_v7000_params_st {
633
+ void *dst;
634
+ size_t dpitch;
635
+ const void *src;
636
+ size_t spitch;
637
+ size_t width;
638
+ size_t height;
639
+ enum cudaMemcpyKind kind;
640
+ } cudaMemcpy2D_ptds_v7000_params;
641
+
642
+ typedef struct cudaMemcpy2DToArray_ptds_v7000_params_st {
643
+ cudaArray_t dst;
644
+ size_t wOffset;
645
+ size_t hOffset;
646
+ const void *src;
647
+ size_t spitch;
648
+ size_t width;
649
+ size_t height;
650
+ enum cudaMemcpyKind kind;
651
+ } cudaMemcpy2DToArray_ptds_v7000_params;
652
+
653
+ typedef struct cudaMemcpy2DFromArray_ptds_v7000_params_st {
654
+ void *dst;
655
+ size_t dpitch;
656
+ cudaArray_const_t src;
657
+ size_t wOffset;
658
+ size_t hOffset;
659
+ size_t width;
660
+ size_t height;
661
+ enum cudaMemcpyKind kind;
662
+ } cudaMemcpy2DFromArray_ptds_v7000_params;
663
+
664
+ typedef struct cudaMemcpy2DArrayToArray_ptds_v7000_params_st {
665
+ cudaArray_t dst;
666
+ size_t wOffsetDst;
667
+ size_t hOffsetDst;
668
+ cudaArray_const_t src;
669
+ size_t wOffsetSrc;
670
+ size_t hOffsetSrc;
671
+ size_t width;
672
+ size_t height;
673
+ enum cudaMemcpyKind kind;
674
+ } cudaMemcpy2DArrayToArray_ptds_v7000_params;
675
+
676
+ typedef struct cudaMemcpyToSymbol_ptds_v7000_params_st {
677
+ const void *symbol;
678
+ const void *src;
679
+ size_t count;
680
+ size_t offset;
681
+ enum cudaMemcpyKind kind;
682
+ } cudaMemcpyToSymbol_ptds_v7000_params;
683
+
684
+ typedef struct cudaMemcpyFromSymbol_ptds_v7000_params_st {
685
+ void *dst;
686
+ const void *symbol;
687
+ size_t count;
688
+ size_t offset;
689
+ enum cudaMemcpyKind kind;
690
+ } cudaMemcpyFromSymbol_ptds_v7000_params;
691
+
692
+ typedef struct cudaMemcpyAsync_ptsz_v7000_params_st {
693
+ void *dst;
694
+ const void *src;
695
+ size_t count;
696
+ enum cudaMemcpyKind kind;
697
+ cudaStream_t stream;
698
+ } cudaMemcpyAsync_ptsz_v7000_params;
699
+
700
+ typedef struct cudaMemcpyPeerAsync_v4000_params_st {
701
+ void *dst;
702
+ int dstDevice;
703
+ const void *src;
704
+ int srcDevice;
705
+ size_t count;
706
+ cudaStream_t stream;
707
+ } cudaMemcpyPeerAsync_v4000_params;
708
+
709
+ typedef struct cudaMemcpy2DAsync_ptsz_v7000_params_st {
710
+ void *dst;
711
+ size_t dpitch;
712
+ const void *src;
713
+ size_t spitch;
714
+ size_t width;
715
+ size_t height;
716
+ enum cudaMemcpyKind kind;
717
+ cudaStream_t stream;
718
+ } cudaMemcpy2DAsync_ptsz_v7000_params;
719
+
720
+ typedef struct cudaMemcpy2DToArrayAsync_ptsz_v7000_params_st {
721
+ cudaArray_t dst;
722
+ size_t wOffset;
723
+ size_t hOffset;
724
+ const void *src;
725
+ size_t spitch;
726
+ size_t width;
727
+ size_t height;
728
+ enum cudaMemcpyKind kind;
729
+ cudaStream_t stream;
730
+ } cudaMemcpy2DToArrayAsync_ptsz_v7000_params;
731
+
732
+ typedef struct cudaMemcpy2DFromArrayAsync_ptsz_v7000_params_st {
733
+ void *dst;
734
+ size_t dpitch;
735
+ cudaArray_const_t src;
736
+ size_t wOffset;
737
+ size_t hOffset;
738
+ size_t width;
739
+ size_t height;
740
+ enum cudaMemcpyKind kind;
741
+ cudaStream_t stream;
742
+ } cudaMemcpy2DFromArrayAsync_ptsz_v7000_params;
743
+
744
+ typedef struct cudaMemcpyToSymbolAsync_ptsz_v7000_params_st {
745
+ const void *symbol;
746
+ const void *src;
747
+ size_t count;
748
+ size_t offset;
749
+ enum cudaMemcpyKind kind;
750
+ cudaStream_t stream;
751
+ } cudaMemcpyToSymbolAsync_ptsz_v7000_params;
752
+
753
+ typedef struct cudaMemcpyFromSymbolAsync_ptsz_v7000_params_st {
754
+ void *dst;
755
+ const void *symbol;
756
+ size_t count;
757
+ size_t offset;
758
+ enum cudaMemcpyKind kind;
759
+ cudaStream_t stream;
760
+ } cudaMemcpyFromSymbolAsync_ptsz_v7000_params;
761
+
762
+ typedef struct cudaMemset_ptds_v7000_params_st {
763
+ void *devPtr;
764
+ int value;
765
+ size_t count;
766
+ } cudaMemset_ptds_v7000_params;
767
+
768
+ typedef struct cudaMemset2D_ptds_v7000_params_st {
769
+ void *devPtr;
770
+ size_t pitch;
771
+ int value;
772
+ size_t width;
773
+ size_t height;
774
+ } cudaMemset2D_ptds_v7000_params;
775
+
776
+ typedef struct cudaMemset3D_ptds_v7000_params_st {
777
+ struct cudaPitchedPtr pitchedDevPtr;
778
+ int value;
779
+ struct cudaExtent extent;
780
+ } cudaMemset3D_ptds_v7000_params;
781
+
782
+ typedef struct cudaMemsetAsync_ptsz_v7000_params_st {
783
+ void *devPtr;
784
+ int value;
785
+ size_t count;
786
+ cudaStream_t stream;
787
+ } cudaMemsetAsync_ptsz_v7000_params;
788
+
789
+ typedef struct cudaMemset2DAsync_ptsz_v7000_params_st {
790
+ void *devPtr;
791
+ size_t pitch;
792
+ int value;
793
+ size_t width;
794
+ size_t height;
795
+ cudaStream_t stream;
796
+ } cudaMemset2DAsync_ptsz_v7000_params;
797
+
798
+ typedef struct cudaMemset3DAsync_ptsz_v7000_params_st {
799
+ struct cudaPitchedPtr pitchedDevPtr;
800
+ int value;
801
+ struct cudaExtent extent;
802
+ cudaStream_t stream;
803
+ } cudaMemset3DAsync_ptsz_v7000_params;
804
+
805
+ typedef struct cudaGetSymbolAddress_v3020_params_st {
806
+ void **devPtr;
807
+ const void *symbol;
808
+ } cudaGetSymbolAddress_v3020_params;
809
+
810
+ typedef struct cudaGetSymbolSize_v3020_params_st {
811
+ size_t *size;
812
+ const void *symbol;
813
+ } cudaGetSymbolSize_v3020_params;
814
+
815
+ typedef struct cudaMemPrefetchAsync_ptsz_v8000_params_st {
816
+ const void *devPtr;
817
+ size_t count;
818
+ int dstDevice;
819
+ cudaStream_t stream;
820
+ } cudaMemPrefetchAsync_ptsz_v8000_params;
821
+
822
+ typedef struct cudaMemAdvise_v8000_params_st {
823
+ const void *devPtr;
824
+ size_t count;
825
+ enum cudaMemoryAdvise advice;
826
+ int device;
827
+ } cudaMemAdvise_v8000_params;
828
+
829
+ typedef struct cudaMemRangeGetAttribute_v8000_params_st {
830
+ void *data;
831
+ size_t dataSize;
832
+ enum cudaMemRangeAttribute attribute;
833
+ const void *devPtr;
834
+ size_t count;
835
+ } cudaMemRangeGetAttribute_v8000_params;
836
+
837
+ typedef struct cudaMemRangeGetAttributes_v8000_params_st {
838
+ void **data;
839
+ size_t *dataSizes;
840
+ enum cudaMemRangeAttribute *attributes;
841
+ size_t numAttributes;
842
+ const void *devPtr;
843
+ size_t count;
844
+ } cudaMemRangeGetAttributes_v8000_params;
845
+
846
+ typedef struct cudaMemcpyToArray_ptds_v7000_params_st {
847
+ cudaArray_t dst;
848
+ size_t wOffset;
849
+ size_t hOffset;
850
+ const void *src;
851
+ size_t count;
852
+ enum cudaMemcpyKind kind;
853
+ } cudaMemcpyToArray_ptds_v7000_params;
854
+
855
+ typedef struct cudaMemcpyFromArray_ptds_v7000_params_st {
856
+ void *dst;
857
+ cudaArray_const_t src;
858
+ size_t wOffset;
859
+ size_t hOffset;
860
+ size_t count;
861
+ enum cudaMemcpyKind kind;
862
+ } cudaMemcpyFromArray_ptds_v7000_params;
863
+
864
+ typedef struct cudaMemcpyArrayToArray_ptds_v7000_params_st {
865
+ cudaArray_t dst;
866
+ size_t wOffsetDst;
867
+ size_t hOffsetDst;
868
+ cudaArray_const_t src;
869
+ size_t wOffsetSrc;
870
+ size_t hOffsetSrc;
871
+ size_t count;
872
+ enum cudaMemcpyKind kind;
873
+ } cudaMemcpyArrayToArray_ptds_v7000_params;
874
+
875
+ typedef struct cudaMemcpyToArrayAsync_ptsz_v7000_params_st {
876
+ cudaArray_t dst;
877
+ size_t wOffset;
878
+ size_t hOffset;
879
+ const void *src;
880
+ size_t count;
881
+ enum cudaMemcpyKind kind;
882
+ cudaStream_t stream;
883
+ } cudaMemcpyToArrayAsync_ptsz_v7000_params;
884
+
885
+ typedef struct cudaMemcpyFromArrayAsync_ptsz_v7000_params_st {
886
+ void *dst;
887
+ cudaArray_const_t src;
888
+ size_t wOffset;
889
+ size_t hOffset;
890
+ size_t count;
891
+ enum cudaMemcpyKind kind;
892
+ cudaStream_t stream;
893
+ } cudaMemcpyFromArrayAsync_ptsz_v7000_params;
894
+
895
+ typedef struct cudaMallocAsync_ptsz_v11020_params_st {
896
+ void **devPtr;
897
+ size_t size;
898
+ cudaStream_t hStream;
899
+ } cudaMallocAsync_ptsz_v11020_params;
900
+
901
+ typedef struct cudaFreeAsync_ptsz_v11020_params_st {
902
+ void *devPtr;
903
+ cudaStream_t hStream;
904
+ } cudaFreeAsync_ptsz_v11020_params;
905
+
906
+ typedef struct cudaMemPoolTrimTo_v11020_params_st {
907
+ cudaMemPool_t memPool;
908
+ size_t minBytesToKeep;
909
+ } cudaMemPoolTrimTo_v11020_params;
910
+
911
+ typedef struct cudaMemPoolSetAttribute_v11020_params_st {
912
+ cudaMemPool_t memPool;
913
+ enum cudaMemPoolAttr attr;
914
+ void *value;
915
+ } cudaMemPoolSetAttribute_v11020_params;
916
+
917
+ typedef struct cudaMemPoolGetAttribute_v11020_params_st {
918
+ cudaMemPool_t memPool;
919
+ enum cudaMemPoolAttr attr;
920
+ void *value;
921
+ } cudaMemPoolGetAttribute_v11020_params;
922
+
923
+ typedef struct cudaMemPoolSetAccess_v11020_params_st {
924
+ cudaMemPool_t memPool;
925
+ const struct cudaMemAccessDesc *descList;
926
+ size_t count;
927
+ } cudaMemPoolSetAccess_v11020_params;
928
+
929
+ typedef struct cudaMemPoolGetAccess_v11020_params_st {
930
+ enum cudaMemAccessFlags *flags;
931
+ cudaMemPool_t memPool;
932
+ struct cudaMemLocation *location;
933
+ } cudaMemPoolGetAccess_v11020_params;
934
+
935
+ typedef struct cudaMemPoolCreate_v11020_params_st {
936
+ cudaMemPool_t *memPool;
937
+ const struct cudaMemPoolProps *poolProps;
938
+ } cudaMemPoolCreate_v11020_params;
939
+
940
+ typedef struct cudaMemPoolDestroy_v11020_params_st {
941
+ cudaMemPool_t memPool;
942
+ } cudaMemPoolDestroy_v11020_params;
943
+
944
+ typedef struct cudaMallocFromPoolAsync_ptsz_v11020_params_st {
945
+ void **ptr;
946
+ size_t size;
947
+ cudaMemPool_t memPool;
948
+ cudaStream_t stream;
949
+ } cudaMallocFromPoolAsync_ptsz_v11020_params;
950
+
951
+ typedef struct cudaMemPoolExportToShareableHandle_v11020_params_st {
952
+ void *shareableHandle;
953
+ cudaMemPool_t memPool;
954
+ enum cudaMemAllocationHandleType handleType;
955
+ unsigned int flags;
956
+ } cudaMemPoolExportToShareableHandle_v11020_params;
957
+
958
+ typedef struct cudaMemPoolImportFromShareableHandle_v11020_params_st {
959
+ cudaMemPool_t *memPool;
960
+ void *shareableHandle;
961
+ enum cudaMemAllocationHandleType handleType;
962
+ unsigned int flags;
963
+ } cudaMemPoolImportFromShareableHandle_v11020_params;
964
+
965
+ typedef struct cudaMemPoolExportPointer_v11020_params_st {
966
+ struct cudaMemPoolPtrExportData *exportData;
967
+ void *ptr;
968
+ } cudaMemPoolExportPointer_v11020_params;
969
+
970
+ typedef struct cudaMemPoolImportPointer_v11020_params_st {
971
+ void **ptr;
972
+ cudaMemPool_t memPool;
973
+ struct cudaMemPoolPtrExportData *exportData;
974
+ } cudaMemPoolImportPointer_v11020_params;
975
+
976
+ typedef struct cudaPointerGetAttributes_v4000_params_st {
977
+ struct cudaPointerAttributes *attributes;
978
+ const void *ptr;
979
+ } cudaPointerGetAttributes_v4000_params;
980
+
981
+ typedef struct cudaDeviceCanAccessPeer_v4000_params_st {
982
+ int *canAccessPeer;
983
+ int device;
984
+ int peerDevice;
985
+ } cudaDeviceCanAccessPeer_v4000_params;
986
+
987
+ typedef struct cudaDeviceEnablePeerAccess_v4000_params_st {
988
+ int peerDevice;
989
+ unsigned int flags;
990
+ } cudaDeviceEnablePeerAccess_v4000_params;
991
+
992
+ typedef struct cudaDeviceDisablePeerAccess_v4000_params_st {
993
+ int peerDevice;
994
+ } cudaDeviceDisablePeerAccess_v4000_params;
995
+
996
+ typedef struct cudaGraphicsUnregisterResource_v3020_params_st {
997
+ cudaGraphicsResource_t resource;
998
+ } cudaGraphicsUnregisterResource_v3020_params;
999
+
1000
+ typedef struct cudaGraphicsResourceSetMapFlags_v3020_params_st {
1001
+ cudaGraphicsResource_t resource;
1002
+ unsigned int flags;
1003
+ } cudaGraphicsResourceSetMapFlags_v3020_params;
1004
+
1005
+ typedef struct cudaGraphicsMapResources_v3020_params_st {
1006
+ int count;
1007
+ cudaGraphicsResource_t *resources;
1008
+ cudaStream_t stream;
1009
+ } cudaGraphicsMapResources_v3020_params;
1010
+
1011
+ typedef struct cudaGraphicsUnmapResources_v3020_params_st {
1012
+ int count;
1013
+ cudaGraphicsResource_t *resources;
1014
+ cudaStream_t stream;
1015
+ } cudaGraphicsUnmapResources_v3020_params;
1016
+
1017
+ typedef struct cudaGraphicsResourceGetMappedPointer_v3020_params_st {
1018
+ void **devPtr;
1019
+ size_t *size;
1020
+ cudaGraphicsResource_t resource;
1021
+ } cudaGraphicsResourceGetMappedPointer_v3020_params;
1022
+
1023
+ typedef struct cudaGraphicsSubResourceGetMappedArray_v3020_params_st {
1024
+ cudaArray_t *array;
1025
+ cudaGraphicsResource_t resource;
1026
+ unsigned int arrayIndex;
1027
+ unsigned int mipLevel;
1028
+ } cudaGraphicsSubResourceGetMappedArray_v3020_params;
1029
+
1030
+ typedef struct cudaGraphicsResourceGetMappedMipmappedArray_v5000_params_st {
1031
+ cudaMipmappedArray_t *mipmappedArray;
1032
+ cudaGraphicsResource_t resource;
1033
+ } cudaGraphicsResourceGetMappedMipmappedArray_v5000_params;
1034
+
1035
+ typedef struct cudaGetChannelDesc_v3020_params_st {
1036
+ struct cudaChannelFormatDesc *desc;
1037
+ cudaArray_const_t array;
1038
+ } cudaGetChannelDesc_v3020_params;
1039
+
1040
+ typedef struct cudaCreateChannelDesc_v3020_params_st {
1041
+ int x;
1042
+ int y;
1043
+ int z;
1044
+ int w;
1045
+ enum cudaChannelFormatKind f;
1046
+ } cudaCreateChannelDesc_v3020_params;
1047
+
1048
+ typedef struct cudaCreateTextureObject_v5000_params_st {
1049
+ cudaTextureObject_t *pTexObject;
1050
+ const struct cudaResourceDesc *pResDesc;
1051
+ const struct cudaTextureDesc *pTexDesc;
1052
+ const struct cudaResourceViewDesc *pResViewDesc;
1053
+ } cudaCreateTextureObject_v5000_params;
1054
+
1055
+ typedef struct cudaDestroyTextureObject_v5000_params_st {
1056
+ cudaTextureObject_t texObject;
1057
+ } cudaDestroyTextureObject_v5000_params;
1058
+
1059
+ typedef struct cudaGetTextureObjectResourceDesc_v5000_params_st {
1060
+ struct cudaResourceDesc *pResDesc;
1061
+ cudaTextureObject_t texObject;
1062
+ } cudaGetTextureObjectResourceDesc_v5000_params;
1063
+
1064
+ typedef struct cudaGetTextureObjectTextureDesc_v5000_params_st {
1065
+ struct cudaTextureDesc *pTexDesc;
1066
+ cudaTextureObject_t texObject;
1067
+ } cudaGetTextureObjectTextureDesc_v5000_params;
1068
+
1069
+ typedef struct cudaGetTextureObjectResourceViewDesc_v5000_params_st {
1070
+ struct cudaResourceViewDesc *pResViewDesc;
1071
+ cudaTextureObject_t texObject;
1072
+ } cudaGetTextureObjectResourceViewDesc_v5000_params;
1073
+
1074
+ typedef struct cudaCreateSurfaceObject_v5000_params_st {
1075
+ cudaSurfaceObject_t *pSurfObject;
1076
+ const struct cudaResourceDesc *pResDesc;
1077
+ } cudaCreateSurfaceObject_v5000_params;
1078
+
1079
+ typedef struct cudaDestroySurfaceObject_v5000_params_st {
1080
+ cudaSurfaceObject_t surfObject;
1081
+ } cudaDestroySurfaceObject_v5000_params;
1082
+
1083
+ typedef struct cudaGetSurfaceObjectResourceDesc_v5000_params_st {
1084
+ struct cudaResourceDesc *pResDesc;
1085
+ cudaSurfaceObject_t surfObject;
1086
+ } cudaGetSurfaceObjectResourceDesc_v5000_params;
1087
+
1088
+ typedef struct cudaDriverGetVersion_v3020_params_st {
1089
+ int *driverVersion;
1090
+ } cudaDriverGetVersion_v3020_params;
1091
+
1092
+ typedef struct cudaRuntimeGetVersion_v3020_params_st {
1093
+ int *runtimeVersion;
1094
+ } cudaRuntimeGetVersion_v3020_params;
1095
+
1096
+ typedef struct cudaGraphCreate_v10000_params_st {
1097
+ cudaGraph_t *pGraph;
1098
+ unsigned int flags;
1099
+ } cudaGraphCreate_v10000_params;
1100
+
1101
+ typedef struct cudaGraphAddKernelNode_v10000_params_st {
1102
+ cudaGraphNode_t *pGraphNode;
1103
+ cudaGraph_t graph;
1104
+ const cudaGraphNode_t *pDependencies;
1105
+ size_t numDependencies;
1106
+ const struct cudaKernelNodeParams *pNodeParams;
1107
+ } cudaGraphAddKernelNode_v10000_params;
1108
+
1109
+ typedef struct cudaGraphKernelNodeGetParams_v10000_params_st {
1110
+ cudaGraphNode_t node;
1111
+ struct cudaKernelNodeParams *pNodeParams;
1112
+ } cudaGraphKernelNodeGetParams_v10000_params;
1113
+
1114
+ typedef struct cudaGraphKernelNodeSetParams_v10000_params_st {
1115
+ cudaGraphNode_t node;
1116
+ const struct cudaKernelNodeParams *pNodeParams;
1117
+ } cudaGraphKernelNodeSetParams_v10000_params;
1118
+
1119
+ typedef struct cudaGraphKernelNodeCopyAttributes_v11000_params_st {
1120
+ cudaGraphNode_t hSrc;
1121
+ cudaGraphNode_t hDst;
1122
+ } cudaGraphKernelNodeCopyAttributes_v11000_params;
1123
+
1124
+ typedef struct cudaGraphKernelNodeGetAttribute_v11000_params_st {
1125
+ cudaGraphNode_t hNode;
1126
+ cudaKernelNodeAttrID attr;
1127
+ cudaKernelNodeAttrValue *value_out;
1128
+ } cudaGraphKernelNodeGetAttribute_v11000_params;
1129
+
1130
+ typedef struct cudaGraphKernelNodeSetAttribute_v11000_params_st {
1131
+ cudaGraphNode_t hNode;
1132
+ cudaKernelNodeAttrID attr;
1133
+ const cudaKernelNodeAttrValue *value;
1134
+ } cudaGraphKernelNodeSetAttribute_v11000_params;
1135
+
1136
+ typedef struct cudaGraphAddMemcpyNode_v10000_params_st {
1137
+ cudaGraphNode_t *pGraphNode;
1138
+ cudaGraph_t graph;
1139
+ const cudaGraphNode_t *pDependencies;
1140
+ size_t numDependencies;
1141
+ const struct cudaMemcpy3DParms *pCopyParams;
1142
+ } cudaGraphAddMemcpyNode_v10000_params;
1143
+
1144
+ typedef struct cudaGraphAddMemcpyNodeToSymbol_v11010_params_st {
1145
+ cudaGraphNode_t *pGraphNode;
1146
+ cudaGraph_t graph;
1147
+ const cudaGraphNode_t *pDependencies;
1148
+ size_t numDependencies;
1149
+ const void *symbol;
1150
+ const void *src;
1151
+ size_t count;
1152
+ size_t offset;
1153
+ enum cudaMemcpyKind kind;
1154
+ } cudaGraphAddMemcpyNodeToSymbol_v11010_params;
1155
+
1156
+ typedef struct cudaGraphAddMemcpyNodeFromSymbol_v11010_params_st {
1157
+ cudaGraphNode_t *pGraphNode;
1158
+ cudaGraph_t graph;
1159
+ const cudaGraphNode_t *pDependencies;
1160
+ size_t numDependencies;
1161
+ void *dst;
1162
+ const void *symbol;
1163
+ size_t count;
1164
+ size_t offset;
1165
+ enum cudaMemcpyKind kind;
1166
+ } cudaGraphAddMemcpyNodeFromSymbol_v11010_params;
1167
+
1168
+ typedef struct cudaGraphAddMemcpyNode1D_v11010_params_st {
1169
+ cudaGraphNode_t *pGraphNode;
1170
+ cudaGraph_t graph;
1171
+ const cudaGraphNode_t *pDependencies;
1172
+ size_t numDependencies;
1173
+ void *dst;
1174
+ const void *src;
1175
+ size_t count;
1176
+ enum cudaMemcpyKind kind;
1177
+ } cudaGraphAddMemcpyNode1D_v11010_params;
1178
+
1179
+ typedef struct cudaGraphMemcpyNodeGetParams_v10000_params_st {
1180
+ cudaGraphNode_t node;
1181
+ struct cudaMemcpy3DParms *pNodeParams;
1182
+ } cudaGraphMemcpyNodeGetParams_v10000_params;
1183
+
1184
+ typedef struct cudaGraphMemcpyNodeSetParams_v10000_params_st {
1185
+ cudaGraphNode_t node;
1186
+ const struct cudaMemcpy3DParms *pNodeParams;
1187
+ } cudaGraphMemcpyNodeSetParams_v10000_params;
1188
+
1189
+ typedef struct cudaGraphMemcpyNodeSetParamsToSymbol_v11010_params_st {
1190
+ cudaGraphNode_t node;
1191
+ const void *symbol;
1192
+ const void *src;
1193
+ size_t count;
1194
+ size_t offset;
1195
+ enum cudaMemcpyKind kind;
1196
+ } cudaGraphMemcpyNodeSetParamsToSymbol_v11010_params;
1197
+
1198
+ typedef struct cudaGraphMemcpyNodeSetParamsFromSymbol_v11010_params_st {
1199
+ cudaGraphNode_t node;
1200
+ void *dst;
1201
+ const void *symbol;
1202
+ size_t count;
1203
+ size_t offset;
1204
+ enum cudaMemcpyKind kind;
1205
+ } cudaGraphMemcpyNodeSetParamsFromSymbol_v11010_params;
1206
+
1207
+ typedef struct cudaGraphMemcpyNodeSetParams1D_v11010_params_st {
1208
+ cudaGraphNode_t node;
1209
+ void *dst;
1210
+ const void *src;
1211
+ size_t count;
1212
+ enum cudaMemcpyKind kind;
1213
+ } cudaGraphMemcpyNodeSetParams1D_v11010_params;
1214
+
1215
+ typedef struct cudaGraphAddMemsetNode_v10000_params_st {
1216
+ cudaGraphNode_t *pGraphNode;
1217
+ cudaGraph_t graph;
1218
+ const cudaGraphNode_t *pDependencies;
1219
+ size_t numDependencies;
1220
+ const struct cudaMemsetParams *pMemsetParams;
1221
+ } cudaGraphAddMemsetNode_v10000_params;
1222
+
1223
+ typedef struct cudaGraphMemsetNodeGetParams_v10000_params_st {
1224
+ cudaGraphNode_t node;
1225
+ struct cudaMemsetParams *pNodeParams;
1226
+ } cudaGraphMemsetNodeGetParams_v10000_params;
1227
+
1228
+ typedef struct cudaGraphMemsetNodeSetParams_v10000_params_st {
1229
+ cudaGraphNode_t node;
1230
+ const struct cudaMemsetParams *pNodeParams;
1231
+ } cudaGraphMemsetNodeSetParams_v10000_params;
1232
+
1233
+ typedef struct cudaGraphAddHostNode_v10000_params_st {
1234
+ cudaGraphNode_t *pGraphNode;
1235
+ cudaGraph_t graph;
1236
+ const cudaGraphNode_t *pDependencies;
1237
+ size_t numDependencies;
1238
+ const struct cudaHostNodeParams *pNodeParams;
1239
+ } cudaGraphAddHostNode_v10000_params;
1240
+
1241
+ typedef struct cudaGraphHostNodeGetParams_v10000_params_st {
1242
+ cudaGraphNode_t node;
1243
+ struct cudaHostNodeParams *pNodeParams;
1244
+ } cudaGraphHostNodeGetParams_v10000_params;
1245
+
1246
+ typedef struct cudaGraphHostNodeSetParams_v10000_params_st {
1247
+ cudaGraphNode_t node;
1248
+ const struct cudaHostNodeParams *pNodeParams;
1249
+ } cudaGraphHostNodeSetParams_v10000_params;
1250
+
1251
+ typedef struct cudaGraphAddChildGraphNode_v10000_params_st {
1252
+ cudaGraphNode_t *pGraphNode;
1253
+ cudaGraph_t graph;
1254
+ const cudaGraphNode_t *pDependencies;
1255
+ size_t numDependencies;
1256
+ cudaGraph_t childGraph;
1257
+ } cudaGraphAddChildGraphNode_v10000_params;
1258
+
1259
+ typedef struct cudaGraphChildGraphNodeGetGraph_v10000_params_st {
1260
+ cudaGraphNode_t node;
1261
+ cudaGraph_t *pGraph;
1262
+ } cudaGraphChildGraphNodeGetGraph_v10000_params;
1263
+
1264
+ typedef struct cudaGraphAddEmptyNode_v10000_params_st {
1265
+ cudaGraphNode_t *pGraphNode;
1266
+ cudaGraph_t graph;
1267
+ const cudaGraphNode_t *pDependencies;
1268
+ size_t numDependencies;
1269
+ } cudaGraphAddEmptyNode_v10000_params;
1270
+
1271
+ typedef struct cudaGraphAddEventRecordNode_v11010_params_st {
1272
+ cudaGraphNode_t *pGraphNode;
1273
+ cudaGraph_t graph;
1274
+ const cudaGraphNode_t *pDependencies;
1275
+ size_t numDependencies;
1276
+ cudaEvent_t event;
1277
+ } cudaGraphAddEventRecordNode_v11010_params;
1278
+
1279
+ typedef struct cudaGraphEventRecordNodeGetEvent_v11010_params_st {
1280
+ cudaGraphNode_t node;
1281
+ cudaEvent_t *event_out;
1282
+ } cudaGraphEventRecordNodeGetEvent_v11010_params;
1283
+
1284
+ typedef struct cudaGraphEventRecordNodeSetEvent_v11010_params_st {
1285
+ cudaGraphNode_t node;
1286
+ cudaEvent_t event;
1287
+ } cudaGraphEventRecordNodeSetEvent_v11010_params;
1288
+
1289
+ typedef struct cudaGraphAddEventWaitNode_v11010_params_st {
1290
+ cudaGraphNode_t *pGraphNode;
1291
+ cudaGraph_t graph;
1292
+ const cudaGraphNode_t *pDependencies;
1293
+ size_t numDependencies;
1294
+ cudaEvent_t event;
1295
+ } cudaGraphAddEventWaitNode_v11010_params;
1296
+
1297
+ typedef struct cudaGraphEventWaitNodeGetEvent_v11010_params_st {
1298
+ cudaGraphNode_t node;
1299
+ cudaEvent_t *event_out;
1300
+ } cudaGraphEventWaitNodeGetEvent_v11010_params;
1301
+
1302
+ typedef struct cudaGraphEventWaitNodeSetEvent_v11010_params_st {
1303
+ cudaGraphNode_t node;
1304
+ cudaEvent_t event;
1305
+ } cudaGraphEventWaitNodeSetEvent_v11010_params;
1306
+
1307
+ typedef struct cudaGraphAddExternalSemaphoresSignalNode_v11020_params_st {
1308
+ cudaGraphNode_t *pGraphNode;
1309
+ cudaGraph_t graph;
1310
+ const cudaGraphNode_t *pDependencies;
1311
+ size_t numDependencies;
1312
+ const struct cudaExternalSemaphoreSignalNodeParams *nodeParams;
1313
+ } cudaGraphAddExternalSemaphoresSignalNode_v11020_params;
1314
+
1315
+ typedef struct cudaGraphExternalSemaphoresSignalNodeGetParams_v11020_params_st {
1316
+ cudaGraphNode_t hNode;
1317
+ struct cudaExternalSemaphoreSignalNodeParams *params_out;
1318
+ } cudaGraphExternalSemaphoresSignalNodeGetParams_v11020_params;
1319
+
1320
+ typedef struct cudaGraphExternalSemaphoresSignalNodeSetParams_v11020_params_st {
1321
+ cudaGraphNode_t hNode;
1322
+ const struct cudaExternalSemaphoreSignalNodeParams *nodeParams;
1323
+ } cudaGraphExternalSemaphoresSignalNodeSetParams_v11020_params;
1324
+
1325
+ typedef struct cudaGraphAddExternalSemaphoresWaitNode_v11020_params_st {
1326
+ cudaGraphNode_t *pGraphNode;
1327
+ cudaGraph_t graph;
1328
+ const cudaGraphNode_t *pDependencies;
1329
+ size_t numDependencies;
1330
+ const struct cudaExternalSemaphoreWaitNodeParams *nodeParams;
1331
+ } cudaGraphAddExternalSemaphoresWaitNode_v11020_params;
1332
+
1333
+ typedef struct cudaGraphExternalSemaphoresWaitNodeGetParams_v11020_params_st {
1334
+ cudaGraphNode_t hNode;
1335
+ struct cudaExternalSemaphoreWaitNodeParams *params_out;
1336
+ } cudaGraphExternalSemaphoresWaitNodeGetParams_v11020_params;
1337
+
1338
+ typedef struct cudaGraphExternalSemaphoresWaitNodeSetParams_v11020_params_st {
1339
+ cudaGraphNode_t hNode;
1340
+ const struct cudaExternalSemaphoreWaitNodeParams *nodeParams;
1341
+ } cudaGraphExternalSemaphoresWaitNodeSetParams_v11020_params;
1342
+
1343
+ typedef struct cudaGraphAddMemAllocNode_v11040_params_st {
1344
+ cudaGraphNode_t *pGraphNode;
1345
+ cudaGraph_t graph;
1346
+ const cudaGraphNode_t *pDependencies;
1347
+ size_t numDependencies;
1348
+ struct cudaMemAllocNodeParams *nodeParams;
1349
+ } cudaGraphAddMemAllocNode_v11040_params;
1350
+
1351
+ typedef struct cudaGraphMemAllocNodeGetParams_v11040_params_st {
1352
+ cudaGraphNode_t node;
1353
+ struct cudaMemAllocNodeParams *params_out;
1354
+ } cudaGraphMemAllocNodeGetParams_v11040_params;
1355
+
1356
+ typedef struct cudaGraphAddMemFreeNode_v11040_params_st {
1357
+ cudaGraphNode_t *pGraphNode;
1358
+ cudaGraph_t graph;
1359
+ const cudaGraphNode_t *pDependencies;
1360
+ size_t numDependencies;
1361
+ void *dptr;
1362
+ } cudaGraphAddMemFreeNode_v11040_params;
1363
+
1364
+ typedef struct cudaGraphMemFreeNodeGetParams_v11040_params_st {
1365
+ cudaGraphNode_t node;
1366
+ void *dptr_out;
1367
+ } cudaGraphMemFreeNodeGetParams_v11040_params;
1368
+
1369
+ typedef struct cudaDeviceGraphMemTrim_v11040_params_st {
1370
+ int device;
1371
+ } cudaDeviceGraphMemTrim_v11040_params;
1372
+
1373
+ typedef struct cudaDeviceGetGraphMemAttribute_v11040_params_st {
1374
+ int device;
1375
+ enum cudaGraphMemAttributeType attr;
1376
+ void *value;
1377
+ } cudaDeviceGetGraphMemAttribute_v11040_params;
1378
+
1379
+ typedef struct cudaDeviceSetGraphMemAttribute_v11040_params_st {
1380
+ int device;
1381
+ enum cudaGraphMemAttributeType attr;
1382
+ void *value;
1383
+ } cudaDeviceSetGraphMemAttribute_v11040_params;
1384
+
1385
+ typedef struct cudaGraphClone_v10000_params_st {
1386
+ cudaGraph_t *pGraphClone;
1387
+ cudaGraph_t originalGraph;
1388
+ } cudaGraphClone_v10000_params;
1389
+
1390
+ typedef struct cudaGraphNodeFindInClone_v10000_params_st {
1391
+ cudaGraphNode_t *pNode;
1392
+ cudaGraphNode_t originalNode;
1393
+ cudaGraph_t clonedGraph;
1394
+ } cudaGraphNodeFindInClone_v10000_params;
1395
+
1396
+ typedef struct cudaGraphNodeGetType_v10000_params_st {
1397
+ cudaGraphNode_t node;
1398
+ enum cudaGraphNodeType *pType;
1399
+ } cudaGraphNodeGetType_v10000_params;
1400
+
1401
+ typedef struct cudaGraphGetNodes_v10000_params_st {
1402
+ cudaGraph_t graph;
1403
+ cudaGraphNode_t *nodes;
1404
+ size_t *numNodes;
1405
+ } cudaGraphGetNodes_v10000_params;
1406
+
1407
+ typedef struct cudaGraphGetRootNodes_v10000_params_st {
1408
+ cudaGraph_t graph;
1409
+ cudaGraphNode_t *pRootNodes;
1410
+ size_t *pNumRootNodes;
1411
+ } cudaGraphGetRootNodes_v10000_params;
1412
+
1413
+ typedef struct cudaGraphGetEdges_v10000_params_st {
1414
+ cudaGraph_t graph;
1415
+ cudaGraphNode_t *from;
1416
+ cudaGraphNode_t *to;
1417
+ size_t *numEdges;
1418
+ } cudaGraphGetEdges_v10000_params;
1419
+
1420
+ typedef struct cudaGraphNodeGetDependencies_v10000_params_st {
1421
+ cudaGraphNode_t node;
1422
+ cudaGraphNode_t *pDependencies;
1423
+ size_t *pNumDependencies;
1424
+ } cudaGraphNodeGetDependencies_v10000_params;
1425
+
1426
+ typedef struct cudaGraphNodeGetDependentNodes_v10000_params_st {
1427
+ cudaGraphNode_t node;
1428
+ cudaGraphNode_t *pDependentNodes;
1429
+ size_t *pNumDependentNodes;
1430
+ } cudaGraphNodeGetDependentNodes_v10000_params;
1431
+
1432
+ typedef struct cudaGraphAddDependencies_v10000_params_st {
1433
+ cudaGraph_t graph;
1434
+ const cudaGraphNode_t *from;
1435
+ const cudaGraphNode_t *to;
1436
+ size_t numDependencies;
1437
+ } cudaGraphAddDependencies_v10000_params;
1438
+
1439
+ typedef struct cudaGraphRemoveDependencies_v10000_params_st {
1440
+ cudaGraph_t graph;
1441
+ const cudaGraphNode_t *from;
1442
+ const cudaGraphNode_t *to;
1443
+ size_t numDependencies;
1444
+ } cudaGraphRemoveDependencies_v10000_params;
1445
+
1446
+ typedef struct cudaGraphDestroyNode_v10000_params_st {
1447
+ cudaGraphNode_t node;
1448
+ } cudaGraphDestroyNode_v10000_params;
1449
+
1450
+ typedef struct cudaGraphInstantiate_v12000_params_st {
1451
+ cudaGraphExec_t *pGraphExec;
1452
+ cudaGraph_t graph;
1453
+ unsigned long long flags;
1454
+ } cudaGraphInstantiate_v12000_params;
1455
+
1456
+ typedef struct cudaGraphInstantiateWithFlags_v11040_params_st {
1457
+ cudaGraphExec_t *pGraphExec;
1458
+ cudaGraph_t graph;
1459
+ unsigned long long flags;
1460
+ } cudaGraphInstantiateWithFlags_v11040_params;
1461
+
1462
+ typedef struct cudaGraphInstantiateWithParams_ptsz_v12000_params_st {
1463
+ cudaGraphExec_t *pGraphExec;
1464
+ cudaGraph_t graph;
1465
+ cudaGraphInstantiateParams *instantiateParams;
1466
+ } cudaGraphInstantiateWithParams_ptsz_v12000_params;
1467
+
1468
+ typedef struct cudaGraphExecGetFlags_v12000_params_st {
1469
+ cudaGraphExec_t graphExec;
1470
+ unsigned long long *flags;
1471
+ } cudaGraphExecGetFlags_v12000_params;
1472
+
1473
+ typedef struct cudaGraphExecKernelNodeSetParams_v10010_params_st {
1474
+ cudaGraphExec_t hGraphExec;
1475
+ cudaGraphNode_t node;
1476
+ const struct cudaKernelNodeParams *pNodeParams;
1477
+ } cudaGraphExecKernelNodeSetParams_v10010_params;
1478
+
1479
+ typedef struct cudaGraphExecMemcpyNodeSetParams_v10020_params_st {
1480
+ cudaGraphExec_t hGraphExec;
1481
+ cudaGraphNode_t node;
1482
+ const struct cudaMemcpy3DParms *pNodeParams;
1483
+ } cudaGraphExecMemcpyNodeSetParams_v10020_params;
1484
+
1485
+ typedef struct cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010_params_st {
1486
+ cudaGraphExec_t hGraphExec;
1487
+ cudaGraphNode_t node;
1488
+ const void *symbol;
1489
+ const void *src;
1490
+ size_t count;
1491
+ size_t offset;
1492
+ enum cudaMemcpyKind kind;
1493
+ } cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010_params;
1494
+
1495
+ typedef struct cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010_params_st {
1496
+ cudaGraphExec_t hGraphExec;
1497
+ cudaGraphNode_t node;
1498
+ void *dst;
1499
+ const void *symbol;
1500
+ size_t count;
1501
+ size_t offset;
1502
+ enum cudaMemcpyKind kind;
1503
+ } cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010_params;
1504
+
1505
+ typedef struct cudaGraphExecMemcpyNodeSetParams1D_v11010_params_st {
1506
+ cudaGraphExec_t hGraphExec;
1507
+ cudaGraphNode_t node;
1508
+ void *dst;
1509
+ const void *src;
1510
+ size_t count;
1511
+ enum cudaMemcpyKind kind;
1512
+ } cudaGraphExecMemcpyNodeSetParams1D_v11010_params;
1513
+
1514
+ typedef struct cudaGraphExecMemsetNodeSetParams_v10020_params_st {
1515
+ cudaGraphExec_t hGraphExec;
1516
+ cudaGraphNode_t node;
1517
+ const struct cudaMemsetParams *pNodeParams;
1518
+ } cudaGraphExecMemsetNodeSetParams_v10020_params;
1519
+
1520
+ typedef struct cudaGraphExecHostNodeSetParams_v10020_params_st {
1521
+ cudaGraphExec_t hGraphExec;
1522
+ cudaGraphNode_t node;
1523
+ const struct cudaHostNodeParams *pNodeParams;
1524
+ } cudaGraphExecHostNodeSetParams_v10020_params;
1525
+
1526
+ typedef struct cudaGraphExecChildGraphNodeSetParams_v11010_params_st {
1527
+ cudaGraphExec_t hGraphExec;
1528
+ cudaGraphNode_t node;
1529
+ cudaGraph_t childGraph;
1530
+ } cudaGraphExecChildGraphNodeSetParams_v11010_params;
1531
+
1532
+ typedef struct cudaGraphExecEventRecordNodeSetEvent_v11010_params_st {
1533
+ cudaGraphExec_t hGraphExec;
1534
+ cudaGraphNode_t hNode;
1535
+ cudaEvent_t event;
1536
+ } cudaGraphExecEventRecordNodeSetEvent_v11010_params;
1537
+
1538
+ typedef struct cudaGraphExecEventWaitNodeSetEvent_v11010_params_st {
1539
+ cudaGraphExec_t hGraphExec;
1540
+ cudaGraphNode_t hNode;
1541
+ cudaEvent_t event;
1542
+ } cudaGraphExecEventWaitNodeSetEvent_v11010_params;
1543
+
1544
+ typedef struct cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020_params_st {
1545
+ cudaGraphExec_t hGraphExec;
1546
+ cudaGraphNode_t hNode;
1547
+ const struct cudaExternalSemaphoreSignalNodeParams *nodeParams;
1548
+ } cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020_params;
1549
+
1550
+ typedef struct cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020_params_st {
1551
+ cudaGraphExec_t hGraphExec;
1552
+ cudaGraphNode_t hNode;
1553
+ const struct cudaExternalSemaphoreWaitNodeParams *nodeParams;
1554
+ } cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020_params;
1555
+
1556
+ typedef struct cudaGraphNodeSetEnabled_v11060_params_st {
1557
+ cudaGraphExec_t hGraphExec;
1558
+ cudaGraphNode_t hNode;
1559
+ unsigned int isEnabled;
1560
+ } cudaGraphNodeSetEnabled_v11060_params;
1561
+
1562
+ typedef struct cudaGraphNodeGetEnabled_v11060_params_st {
1563
+ cudaGraphExec_t hGraphExec;
1564
+ cudaGraphNode_t hNode;
1565
+ unsigned int *isEnabled;
1566
+ } cudaGraphNodeGetEnabled_v11060_params;
1567
+
1568
+ typedef struct cudaGraphExecUpdate_v10020_params_st {
1569
+ cudaGraphExec_t hGraphExec;
1570
+ cudaGraph_t hGraph;
1571
+ cudaGraphExecUpdateResultInfo *resultInfo;
1572
+ } cudaGraphExecUpdate_v10020_params;
1573
+
1574
+ typedef struct cudaGraphUpload_ptsz_v10000_params_st {
1575
+ cudaGraphExec_t graphExec;
1576
+ cudaStream_t stream;
1577
+ } cudaGraphUpload_ptsz_v10000_params;
1578
+
1579
+ typedef struct cudaGraphLaunch_ptsz_v10000_params_st {
1580
+ cudaGraphExec_t graphExec;
1581
+ cudaStream_t stream;
1582
+ } cudaGraphLaunch_ptsz_v10000_params;
1583
+
1584
+ typedef struct cudaGraphExecDestroy_v10000_params_st {
1585
+ cudaGraphExec_t graphExec;
1586
+ } cudaGraphExecDestroy_v10000_params;
1587
+
1588
+ typedef struct cudaGraphDestroy_v10000_params_st {
1589
+ cudaGraph_t graph;
1590
+ } cudaGraphDestroy_v10000_params;
1591
+
1592
+ typedef struct cudaGraphDebugDotPrint_v11030_params_st {
1593
+ cudaGraph_t graph;
1594
+ const char *path;
1595
+ unsigned int flags;
1596
+ } cudaGraphDebugDotPrint_v11030_params;
1597
+
1598
+ typedef struct cudaUserObjectCreate_v11030_params_st {
1599
+ cudaUserObject_t *object_out;
1600
+ void *ptr;
1601
+ cudaHostFn_t destroy;
1602
+ unsigned int initialRefcount;
1603
+ unsigned int flags;
1604
+ } cudaUserObjectCreate_v11030_params;
1605
+
1606
+ typedef struct cudaUserObjectRetain_v11030_params_st {
1607
+ cudaUserObject_t object;
1608
+ unsigned int count;
1609
+ } cudaUserObjectRetain_v11030_params;
1610
+
1611
+ typedef struct cudaUserObjectRelease_v11030_params_st {
1612
+ cudaUserObject_t object;
1613
+ unsigned int count;
1614
+ } cudaUserObjectRelease_v11030_params;
1615
+
1616
+ typedef struct cudaGraphRetainUserObject_v11030_params_st {
1617
+ cudaGraph_t graph;
1618
+ cudaUserObject_t object;
1619
+ unsigned int count;
1620
+ unsigned int flags;
1621
+ } cudaGraphRetainUserObject_v11030_params;
1622
+
1623
+ typedef struct cudaGraphReleaseUserObject_v11030_params_st {
1624
+ cudaGraph_t graph;
1625
+ cudaUserObject_t object;
1626
+ unsigned int count;
1627
+ } cudaGraphReleaseUserObject_v11030_params;
1628
+
1629
+ typedef struct cudaGetDriverEntryPoint_ptsz_v11030_params_st {
1630
+ const char *symbol;
1631
+ void **funcPtr;
1632
+ unsigned long long flags;
1633
+ enum cudaDriverEntryPointQueryResult *driverStatus;
1634
+ } cudaGetDriverEntryPoint_ptsz_v11030_params;
1635
+
1636
+ typedef struct cudaGetFuncBySymbol_v11000_params_st {
1637
+ cudaFunction_t *functionPtr;
1638
+ const void *symbolPtr;
1639
+ } cudaGetFuncBySymbol_v11000_params;
1640
+
1641
+ typedef struct cudaGetKernel_v12000_params_st {
1642
+ cudaKernel_t *kernelPtr;
1643
+ const void *entryFuncAddr;
1644
+ } cudaGetKernel_v12000_params;
1645
+
1646
+ typedef struct cudaMemcpy_v3020_params_st {
1647
+ void *dst;
1648
+ const void *src;
1649
+ size_t count;
1650
+ enum cudaMemcpyKind kind;
1651
+ } cudaMemcpy_v3020_params;
1652
+
1653
+ typedef struct cudaMemcpyToSymbol_v3020_params_st {
1654
+ const void *symbol;
1655
+ const void *src;
1656
+ size_t count;
1657
+ size_t offset;
1658
+ enum cudaMemcpyKind kind;
1659
+ } cudaMemcpyToSymbol_v3020_params;
1660
+
1661
+ typedef struct cudaMemcpyFromSymbol_v3020_params_st {
1662
+ void *dst;
1663
+ const void *symbol;
1664
+ size_t count;
1665
+ size_t offset;
1666
+ enum cudaMemcpyKind kind;
1667
+ } cudaMemcpyFromSymbol_v3020_params;
1668
+
1669
+ typedef struct cudaMemcpy2D_v3020_params_st {
1670
+ void *dst;
1671
+ size_t dpitch;
1672
+ const void *src;
1673
+ size_t spitch;
1674
+ size_t width;
1675
+ size_t height;
1676
+ enum cudaMemcpyKind kind;
1677
+ } cudaMemcpy2D_v3020_params;
1678
+
1679
+ typedef struct cudaMemcpyToArray_v3020_params_st {
1680
+ cudaArray_t dst;
1681
+ size_t wOffset;
1682
+ size_t hOffset;
1683
+ const void *src;
1684
+ size_t count;
1685
+ enum cudaMemcpyKind kind;
1686
+ } cudaMemcpyToArray_v3020_params;
1687
+
1688
+ typedef struct cudaMemcpy2DToArray_v3020_params_st {
1689
+ cudaArray_t dst;
1690
+ size_t wOffset;
1691
+ size_t hOffset;
1692
+ const void *src;
1693
+ size_t spitch;
1694
+ size_t width;
1695
+ size_t height;
1696
+ enum cudaMemcpyKind kind;
1697
+ } cudaMemcpy2DToArray_v3020_params;
1698
+
1699
+ typedef struct cudaMemcpyFromArray_v3020_params_st {
1700
+ void *dst;
1701
+ cudaArray_const_t src;
1702
+ size_t wOffset;
1703
+ size_t hOffset;
1704
+ size_t count;
1705
+ enum cudaMemcpyKind kind;
1706
+ } cudaMemcpyFromArray_v3020_params;
1707
+
1708
+ typedef struct cudaMemcpy2DFromArray_v3020_params_st {
1709
+ void *dst;
1710
+ size_t dpitch;
1711
+ cudaArray_const_t src;
1712
+ size_t wOffset;
1713
+ size_t hOffset;
1714
+ size_t width;
1715
+ size_t height;
1716
+ enum cudaMemcpyKind kind;
1717
+ } cudaMemcpy2DFromArray_v3020_params;
1718
+
1719
+ typedef struct cudaMemcpyArrayToArray_v3020_params_st {
1720
+ cudaArray_t dst;
1721
+ size_t wOffsetDst;
1722
+ size_t hOffsetDst;
1723
+ cudaArray_const_t src;
1724
+ size_t wOffsetSrc;
1725
+ size_t hOffsetSrc;
1726
+ size_t count;
1727
+ enum cudaMemcpyKind kind;
1728
+ } cudaMemcpyArrayToArray_v3020_params;
1729
+
1730
+ typedef struct cudaMemcpy2DArrayToArray_v3020_params_st {
1731
+ cudaArray_t dst;
1732
+ size_t wOffsetDst;
1733
+ size_t hOffsetDst;
1734
+ cudaArray_const_t src;
1735
+ size_t wOffsetSrc;
1736
+ size_t hOffsetSrc;
1737
+ size_t width;
1738
+ size_t height;
1739
+ enum cudaMemcpyKind kind;
1740
+ } cudaMemcpy2DArrayToArray_v3020_params;
1741
+
1742
+ typedef struct cudaMemcpy3D_v3020_params_st {
1743
+ const struct cudaMemcpy3DParms *p;
1744
+ } cudaMemcpy3D_v3020_params;
1745
+
1746
+ typedef struct cudaMemcpy3DPeer_v4000_params_st {
1747
+ const struct cudaMemcpy3DPeerParms *p;
1748
+ } cudaMemcpy3DPeer_v4000_params;
1749
+
1750
+ typedef struct cudaMemset_v3020_params_st {
1751
+ void *devPtr;
1752
+ int value;
1753
+ size_t count;
1754
+ } cudaMemset_v3020_params;
1755
+
1756
+ typedef struct cudaMemset2D_v3020_params_st {
1757
+ void *devPtr;
1758
+ size_t pitch;
1759
+ int value;
1760
+ size_t width;
1761
+ size_t height;
1762
+ } cudaMemset2D_v3020_params;
1763
+
1764
+ typedef struct cudaMemset3D_v3020_params_st {
1765
+ struct cudaPitchedPtr pitchedDevPtr;
1766
+ int value;
1767
+ struct cudaExtent extent;
1768
+ } cudaMemset3D_v3020_params;
1769
+
1770
+ typedef struct cudaMemcpyAsync_v3020_params_st {
1771
+ void *dst;
1772
+ const void *src;
1773
+ size_t count;
1774
+ enum cudaMemcpyKind kind;
1775
+ cudaStream_t stream;
1776
+ } cudaMemcpyAsync_v3020_params;
1777
+
1778
+ typedef struct cudaMemcpyToSymbolAsync_v3020_params_st {
1779
+ const void *symbol;
1780
+ const void *src;
1781
+ size_t count;
1782
+ size_t offset;
1783
+ enum cudaMemcpyKind kind;
1784
+ cudaStream_t stream;
1785
+ } cudaMemcpyToSymbolAsync_v3020_params;
1786
+
1787
+ typedef struct cudaMemcpyFromSymbolAsync_v3020_params_st {
1788
+ void *dst;
1789
+ const void *symbol;
1790
+ size_t count;
1791
+ size_t offset;
1792
+ enum cudaMemcpyKind kind;
1793
+ cudaStream_t stream;
1794
+ } cudaMemcpyFromSymbolAsync_v3020_params;
1795
+
1796
+ typedef struct cudaMemcpy2DAsync_v3020_params_st {
1797
+ void *dst;
1798
+ size_t dpitch;
1799
+ const void *src;
1800
+ size_t spitch;
1801
+ size_t width;
1802
+ size_t height;
1803
+ enum cudaMemcpyKind kind;
1804
+ cudaStream_t stream;
1805
+ } cudaMemcpy2DAsync_v3020_params;
1806
+
1807
+ typedef struct cudaMemcpyToArrayAsync_v3020_params_st {
1808
+ cudaArray_t dst;
1809
+ size_t wOffset;
1810
+ size_t hOffset;
1811
+ const void *src;
1812
+ size_t count;
1813
+ enum cudaMemcpyKind kind;
1814
+ cudaStream_t stream;
1815
+ } cudaMemcpyToArrayAsync_v3020_params;
1816
+
1817
+ typedef struct cudaMemcpy2DToArrayAsync_v3020_params_st {
1818
+ cudaArray_t dst;
1819
+ size_t wOffset;
1820
+ size_t hOffset;
1821
+ const void *src;
1822
+ size_t spitch;
1823
+ size_t width;
1824
+ size_t height;
1825
+ enum cudaMemcpyKind kind;
1826
+ cudaStream_t stream;
1827
+ } cudaMemcpy2DToArrayAsync_v3020_params;
1828
+
1829
+ typedef struct cudaMemcpyFromArrayAsync_v3020_params_st {
1830
+ void *dst;
1831
+ cudaArray_const_t src;
1832
+ size_t wOffset;
1833
+ size_t hOffset;
1834
+ size_t count;
1835
+ enum cudaMemcpyKind kind;
1836
+ cudaStream_t stream;
1837
+ } cudaMemcpyFromArrayAsync_v3020_params;
1838
+
1839
+ typedef struct cudaMemcpy2DFromArrayAsync_v3020_params_st {
1840
+ void *dst;
1841
+ size_t dpitch;
1842
+ cudaArray_const_t src;
1843
+ size_t wOffset;
1844
+ size_t hOffset;
1845
+ size_t width;
1846
+ size_t height;
1847
+ enum cudaMemcpyKind kind;
1848
+ cudaStream_t stream;
1849
+ } cudaMemcpy2DFromArrayAsync_v3020_params;
1850
+
1851
+ typedef struct cudaMemcpy3DAsync_v3020_params_st {
1852
+ const struct cudaMemcpy3DParms *p;
1853
+ cudaStream_t stream;
1854
+ } cudaMemcpy3DAsync_v3020_params;
1855
+
1856
+ typedef struct cudaMemcpy3DPeerAsync_v4000_params_st {
1857
+ const struct cudaMemcpy3DPeerParms *p;
1858
+ cudaStream_t stream;
1859
+ } cudaMemcpy3DPeerAsync_v4000_params;
1860
+
1861
+ typedef struct cudaMemsetAsync_v3020_params_st {
1862
+ void *devPtr;
1863
+ int value;
1864
+ size_t count;
1865
+ cudaStream_t stream;
1866
+ } cudaMemsetAsync_v3020_params;
1867
+
1868
+ typedef struct cudaMemset2DAsync_v3020_params_st {
1869
+ void *devPtr;
1870
+ size_t pitch;
1871
+ int value;
1872
+ size_t width;
1873
+ size_t height;
1874
+ cudaStream_t stream;
1875
+ } cudaMemset2DAsync_v3020_params;
1876
+
1877
+ typedef struct cudaMemset3DAsync_v3020_params_st {
1878
+ struct cudaPitchedPtr pitchedDevPtr;
1879
+ int value;
1880
+ struct cudaExtent extent;
1881
+ cudaStream_t stream;
1882
+ } cudaMemset3DAsync_v3020_params;
1883
+
1884
+ typedef struct cudaStreamQuery_v3020_params_st {
1885
+ cudaStream_t stream;
1886
+ } cudaStreamQuery_v3020_params;
1887
+
1888
+ typedef struct cudaStreamGetFlags_v5050_params_st {
1889
+ cudaStream_t hStream;
1890
+ unsigned int *flags;
1891
+ } cudaStreamGetFlags_v5050_params;
1892
+
1893
+ typedef struct cudaStreamGetId_v12000_params_st {
1894
+ cudaStream_t hStream;
1895
+ unsigned long long *streamId;
1896
+ } cudaStreamGetId_v12000_params;
1897
+
1898
+ typedef struct cudaStreamGetPriority_v5050_params_st {
1899
+ cudaStream_t hStream;
1900
+ int *priority;
1901
+ } cudaStreamGetPriority_v5050_params;
1902
+
1903
+ typedef struct cudaEventRecord_v3020_params_st {
1904
+ cudaEvent_t event;
1905
+ cudaStream_t stream;
1906
+ } cudaEventRecord_v3020_params;
1907
+
1908
+ typedef struct cudaEventRecordWithFlags_v11010_params_st {
1909
+ cudaEvent_t event;
1910
+ cudaStream_t stream;
1911
+ unsigned int flags;
1912
+ } cudaEventRecordWithFlags_v11010_params;
1913
+
1914
+ typedef struct cudaStreamWaitEvent_v3020_params_st {
1915
+ cudaStream_t stream;
1916
+ cudaEvent_t event;
1917
+ unsigned int flags;
1918
+ } cudaStreamWaitEvent_v3020_params;
1919
+
1920
+ typedef struct cudaStreamAddCallback_v5000_params_st {
1921
+ cudaStream_t stream;
1922
+ cudaStreamCallback_t callback;
1923
+ void *userData;
1924
+ unsigned int flags;
1925
+ } cudaStreamAddCallback_v5000_params;
1926
+
1927
+ typedef struct cudaStreamAttachMemAsync_v6000_params_st {
1928
+ cudaStream_t stream;
1929
+ void *devPtr;
1930
+ size_t length;
1931
+ unsigned int flags;
1932
+ } cudaStreamAttachMemAsync_v6000_params;
1933
+
1934
+ typedef struct cudaStreamSynchronize_v3020_params_st {
1935
+ cudaStream_t stream;
1936
+ } cudaStreamSynchronize_v3020_params;
1937
+
1938
+ typedef struct cudaLaunchKernel_v7000_params_st {
1939
+ const void *func;
1940
+ dim3 gridDim;
1941
+ dim3 blockDim;
1942
+ void **args;
1943
+ size_t sharedMem;
1944
+ cudaStream_t stream;
1945
+ } cudaLaunchKernel_v7000_params;
1946
+
1947
+ typedef struct cudaLaunchKernelExC_v11060_params_st {
1948
+ const cudaLaunchConfig_t *config;
1949
+ const void *func;
1950
+ void **args;
1951
+ } cudaLaunchKernelExC_v11060_params;
1952
+
1953
+ typedef struct cudaLaunchCooperativeKernel_v9000_params_st {
1954
+ const void *func;
1955
+ dim3 gridDim;
1956
+ dim3 blockDim;
1957
+ void **args;
1958
+ size_t sharedMem;
1959
+ cudaStream_t stream;
1960
+ } cudaLaunchCooperativeKernel_v9000_params;
1961
+
1962
+ typedef struct cudaLaunchHostFunc_v10000_params_st {
1963
+ cudaStream_t stream;
1964
+ cudaHostFn_t fn;
1965
+ void *userData;
1966
+ } cudaLaunchHostFunc_v10000_params;
1967
+
1968
+ typedef struct cudaMemPrefetchAsync_v8000_params_st {
1969
+ const void *devPtr;
1970
+ size_t count;
1971
+ int dstDevice;
1972
+ cudaStream_t stream;
1973
+ } cudaMemPrefetchAsync_v8000_params;
1974
+
1975
+ typedef struct cudaSignalExternalSemaphoresAsync_v10000_params_st {
1976
+ const cudaExternalSemaphore_t *extSemArray;
1977
+ const struct cudaExternalSemaphoreSignalParams_v1 *paramsArray;
1978
+ unsigned int numExtSems;
1979
+ cudaStream_t stream;
1980
+ } cudaSignalExternalSemaphoresAsync_v10000_params;
1981
+
1982
+ typedef struct cudaSignalExternalSemaphoresAsync_ptsz_v10000_params_st {
1983
+ const cudaExternalSemaphore_t *extSemArray;
1984
+ const struct cudaExternalSemaphoreSignalParams_v1 *paramsArray;
1985
+ unsigned int numExtSems;
1986
+ cudaStream_t stream;
1987
+ } cudaSignalExternalSemaphoresAsync_ptsz_v10000_params;
1988
+
1989
+ typedef struct cudaSignalExternalSemaphoresAsync_v2_v11020_params_st {
1990
+ const cudaExternalSemaphore_t *extSemArray;
1991
+ const struct cudaExternalSemaphoreSignalParams *paramsArray;
1992
+ unsigned int numExtSems;
1993
+ cudaStream_t stream;
1994
+ } cudaSignalExternalSemaphoresAsync_v2_v11020_params;
1995
+
1996
+ typedef struct cudaWaitExternalSemaphoresAsync_v10000_params_st {
1997
+ const cudaExternalSemaphore_t *extSemArray;
1998
+ const struct cudaExternalSemaphoreWaitParams_v1 *paramsArray;
1999
+ unsigned int numExtSems;
2000
+ cudaStream_t stream;
2001
+ } cudaWaitExternalSemaphoresAsync_v10000_params;
2002
+
2003
+ typedef struct cudaWaitExternalSemaphoresAsync_ptsz_v10000_params_st {
2004
+ const cudaExternalSemaphore_t *extSemArray;
2005
+ const struct cudaExternalSemaphoreWaitParams_v1 *paramsArray;
2006
+ unsigned int numExtSems;
2007
+ cudaStream_t stream;
2008
+ } cudaWaitExternalSemaphoresAsync_ptsz_v10000_params;
2009
+
2010
+ typedef struct cudaWaitExternalSemaphoresAsync_v2_v11020_params_st {
2011
+ const cudaExternalSemaphore_t *extSemArray;
2012
+ const struct cudaExternalSemaphoreWaitParams *paramsArray;
2013
+ unsigned int numExtSems;
2014
+ cudaStream_t stream;
2015
+ } cudaWaitExternalSemaphoresAsync_v2_v11020_params;
2016
+
2017
+ typedef struct cudaGraphInstantiateWithParams_v12000_params_st {
2018
+ cudaGraphExec_t *pGraphExec;
2019
+ cudaGraph_t graph;
2020
+ cudaGraphInstantiateParams *instantiateParams;
2021
+ } cudaGraphInstantiateWithParams_v12000_params;
2022
+
2023
+ typedef struct cudaGraphUpload_v10000_params_st {
2024
+ cudaGraphExec_t graphExec;
2025
+ cudaStream_t stream;
2026
+ } cudaGraphUpload_v10000_params;
2027
+
2028
+ typedef struct cudaGraphLaunch_v10000_params_st {
2029
+ cudaGraphExec_t graphExec;
2030
+ cudaStream_t stream;
2031
+ } cudaGraphLaunch_v10000_params;
2032
+
2033
+ typedef struct cudaStreamBeginCapture_v10000_params_st {
2034
+ cudaStream_t stream;
2035
+ enum cudaStreamCaptureMode mode;
2036
+ } cudaStreamBeginCapture_v10000_params;
2037
+
2038
+ typedef struct cudaStreamEndCapture_v10000_params_st {
2039
+ cudaStream_t stream;
2040
+ cudaGraph_t *pGraph;
2041
+ } cudaStreamEndCapture_v10000_params;
2042
+
2043
+ typedef struct cudaStreamIsCapturing_v10000_params_st {
2044
+ cudaStream_t stream;
2045
+ enum cudaStreamCaptureStatus *pCaptureStatus;
2046
+ } cudaStreamIsCapturing_v10000_params;
2047
+
2048
+ typedef struct cudaStreamGetCaptureInfo_v10010_params_st {
2049
+ cudaStream_t stream;
2050
+ enum cudaStreamCaptureStatus *captureStatus_out;
2051
+ unsigned long long *id_out;
2052
+ } cudaStreamGetCaptureInfo_v10010_params;
2053
+
2054
+ typedef struct cudaStreamGetCaptureInfo_ptsz_v10010_params_st {
2055
+ cudaStream_t stream;
2056
+ enum cudaStreamCaptureStatus *captureStatus_out;
2057
+ unsigned long long *id_out;
2058
+ } cudaStreamGetCaptureInfo_ptsz_v10010_params;
2059
+
2060
+ typedef struct cudaStreamGetCaptureInfo_v2_v11030_params_st {
2061
+ cudaStream_t stream;
2062
+ enum cudaStreamCaptureStatus *captureStatus_out;
2063
+ unsigned long long *id_out;
2064
+ cudaGraph_t *graph_out;
2065
+ const cudaGraphNode_t **dependencies_out;
2066
+ size_t *numDependencies_out;
2067
+ } cudaStreamGetCaptureInfo_v2_v11030_params;
2068
+
2069
+ typedef struct cudaStreamUpdateCaptureDependencies_ptsz_v11030_params_st {
2070
+ cudaStream_t stream;
2071
+ cudaGraphNode_t *dependencies;
2072
+ size_t numDependencies;
2073
+ unsigned int flags;
2074
+ } cudaStreamUpdateCaptureDependencies_ptsz_v11030_params;
2075
+
2076
+ typedef struct cudaStreamCopyAttributes_v11000_params_st {
2077
+ cudaStream_t dstStream;
2078
+ cudaStream_t srcStream;
2079
+ } cudaStreamCopyAttributes_v11000_params;
2080
+
2081
+ typedef struct cudaStreamGetAttribute_v11000_params_st {
2082
+ cudaStream_t stream;
2083
+ cudaStreamAttrID attr;
2084
+ cudaStreamAttrValue *value;
2085
+ } cudaStreamGetAttribute_v11000_params;
2086
+
2087
+ typedef struct cudaStreamSetAttribute_v11000_params_st {
2088
+ cudaStream_t stream;
2089
+ cudaStreamAttrID attr;
2090
+ const cudaStreamAttrValue *param;
2091
+ } cudaStreamSetAttribute_v11000_params;
2092
+
2093
+ typedef struct cudaMallocAsync_v11020_params_st {
2094
+ void **devPtr;
2095
+ size_t size;
2096
+ cudaStream_t hStream;
2097
+ } cudaMallocAsync_v11020_params;
2098
+
2099
+ typedef struct cudaFreeAsync_v11020_params_st {
2100
+ void *devPtr;
2101
+ cudaStream_t hStream;
2102
+ } cudaFreeAsync_v11020_params;
2103
+
2104
+ typedef struct cudaMallocFromPoolAsync_v11020_params_st {
2105
+ void **ptr;
2106
+ size_t size;
2107
+ cudaMemPool_t memPool;
2108
+ cudaStream_t stream;
2109
+ } cudaMallocFromPoolAsync_v11020_params;
2110
+
2111
+ typedef struct cudaGetDriverEntryPoint_v11030_params_st {
2112
+ const char *symbol;
2113
+ void **funcPtr;
2114
+ unsigned long long flags;
2115
+ enum cudaDriverEntryPointQueryResult *driverStatus;
2116
+ } cudaGetDriverEntryPoint_v11030_params;
2117
+
2118
+ typedef struct cudaGetDeviceProperties_v3020_params_st {
2119
+ struct cudaDeviceProp *prop;
2120
+ int device;
2121
+ } cudaGetDeviceProperties_v3020_params;
2122
+
2123
+ // Parameter trace structures for removed functions
2124
+
2125
+
2126
+ // End of parameter trace structures
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cuda_vdpau_interop_meta.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // CUDA public interface, for type definitions and api function prototypes
4
+ #include "cuda_vdpau_interop.h"
5
+
6
+ // *************************************************************************
7
+ // Definitions of structs to hold parameters for each function
8
+ // *************************************************************************
9
+
10
+ // Currently used parameter trace structures
11
+ typedef struct cudaVDPAUGetDevice_v3020_params_st {
12
+ int *device;
13
+ VdpDevice vdpDevice;
14
+ VdpGetProcAddress *vdpGetProcAddress;
15
+ } cudaVDPAUGetDevice_v3020_params;
16
+
17
+ typedef struct cudaVDPAUSetVDPAUDevice_v3020_params_st {
18
+ int device;
19
+ VdpDevice vdpDevice;
20
+ VdpGetProcAddress *vdpGetProcAddress;
21
+ } cudaVDPAUSetVDPAUDevice_v3020_params;
22
+
23
+ typedef struct cudaGraphicsVDPAURegisterVideoSurface_v3020_params_st {
24
+ struct cudaGraphicsResource **resource;
25
+ VdpVideoSurface vdpSurface;
26
+ unsigned int flags;
27
+ } cudaGraphicsVDPAURegisterVideoSurface_v3020_params;
28
+
29
+ typedef struct cudaGraphicsVDPAURegisterOutputSurface_v3020_params_st {
30
+ struct cudaGraphicsResource **resource;
31
+ VdpOutputSurface vdpSurface;
32
+ unsigned int flags;
33
+ } cudaGraphicsVDPAURegisterOutputSurface_v3020_params;
34
+
35
+ // Parameter trace structures for removed functions
36
+
37
+
38
+ // End of parameter trace structures
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_cudart_removed_meta.h ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is generated. Any changes you make will be lost during the next clean build.
2
+
3
+ // CUDA public interface, for type definitions and api function prototypes
4
+ #include "cudart_removed.h"
5
+
6
+ // *************************************************************************
7
+ // Definitions of structs to hold parameters for each function
8
+ // *************************************************************************
9
+
10
+ // Currently used parameter trace structures
11
+ typedef struct cudaStreamDestroy_v3020_params_st {
12
+ cudaStream_t stream;
13
+ } cudaStreamDestroy_v3020_params;
14
+
15
+ typedef struct cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000_params_st {
16
+ int *numBlocks;
17
+ const void *func;
18
+ size_t numDynamicSmemBytes;
19
+ } cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000_params;
20
+
21
+ typedef struct cudaConfigureCall_v3020_params_st {
22
+ dim3 gridDim;
23
+ dim3 blockDim;
24
+ size_t sharedMem __dv;
25
+ cudaStream_t stream __dv;
26
+ } cudaConfigureCall_v3020_params;
27
+
28
+ typedef struct cudaSetupArgument_v3020_params_st {
29
+ const void *arg;
30
+ size_t size;
31
+ size_t offset;
32
+ } cudaSetupArgument_v3020_params;
33
+
34
+ typedef struct cudaLaunch_v3020_params_st {
35
+ const void *func;
36
+ } cudaLaunch_v3020_params;
37
+
38
+ typedef struct cudaLaunch_ptsz_v7000_params_st {
39
+ const void *func;
40
+ } cudaLaunch_ptsz_v7000_params;
41
+
42
+ typedef struct cudaStreamSetFlags_v10200_params_st {
43
+ cudaStream_t hStream;
44
+ unsigned int flags;
45
+ } cudaStreamSetFlags_v10200_params;
46
+
47
+ typedef struct cudaStreamSetFlags_ptsz_v10200_params_st {
48
+ cudaStream_t hStream;
49
+ unsigned int flags;
50
+ } cudaStreamSetFlags_ptsz_v10200_params;
51
+
52
+ typedef struct cudaProfilerInitialize_v4000_params_st {
53
+ const char *configFile;
54
+ const char *outputFile;
55
+ cudaOutputMode_t outputMode;
56
+ } cudaProfilerInitialize_v4000_params;
57
+
58
+ typedef struct cudaThreadSetLimit_v3020_params_st {
59
+ enum cudaLimit limit;
60
+ size_t value;
61
+ } cudaThreadSetLimit_v3020_params;
62
+
63
+ typedef struct cudaThreadGetLimit_v3020_params_st {
64
+ size_t *pValue;
65
+ enum cudaLimit limit;
66
+ } cudaThreadGetLimit_v3020_params;
67
+
68
+ typedef struct cudaThreadGetCacheConfig_v3020_params_st {
69
+ enum cudaFuncCache *pCacheConfig;
70
+ } cudaThreadGetCacheConfig_v3020_params;
71
+
72
+ typedef struct cudaThreadSetCacheConfig_v3020_params_st {
73
+ enum cudaFuncCache cacheConfig;
74
+ } cudaThreadSetCacheConfig_v3020_params;
75
+
76
+ typedef struct cudaSetDoubleForDevice_v3020_params_st {
77
+ double *d;
78
+ } cudaSetDoubleForDevice_v3020_params;
79
+
80
+ typedef struct cudaSetDoubleForHost_v3020_params_st {
81
+ double *d;
82
+ } cudaSetDoubleForHost_v3020_params;
83
+
84
+ typedef struct cudaCreateTextureObject_v2_v11080_params_st {
85
+ cudaTextureObject_t *pTexObject;
86
+ const struct cudaResourceDesc *pResDesc;
87
+ const struct cudaTextureDesc *pTexDesc;
88
+ const struct cudaResourceViewDesc *pResViewDesc;
89
+ } cudaCreateTextureObject_v2_v11080_params;
90
+
91
+ typedef struct cudaGetTextureObjectTextureDesc_v2_v11080_params_st {
92
+ struct cudaTextureDesc *pTexDesc;
93
+ cudaTextureObject_t texObject;
94
+ } cudaGetTextureObjectTextureDesc_v2_v11080_params;
95
+
96
+ typedef struct cudaBindTexture_v3020_params_st {
97
+ size_t *offset;
98
+ const struct textureReference *texref;
99
+ const void *devPtr;
100
+ const struct cudaChannelFormatDesc *desc;
101
+ size_t size __dv;
102
+ } cudaBindTexture_v3020_params;
103
+
104
+ typedef struct cudaBindTexture2D_v3020_params_st {
105
+ size_t *offset;
106
+ const struct textureReference *texref;
107
+ const void *devPtr;
108
+ const struct cudaChannelFormatDesc *desc;
109
+ size_t width;
110
+ size_t height;
111
+ size_t pitch;
112
+ } cudaBindTexture2D_v3020_params;
113
+
114
+ typedef struct cudaBindTextureToArray_v3020_params_st {
115
+ const struct textureReference *texref;
116
+ cudaArray_const_t array;
117
+ const struct cudaChannelFormatDesc *desc;
118
+ } cudaBindTextureToArray_v3020_params;
119
+
120
+ typedef struct cudaBindTextureToMipmappedArray_v5000_params_st {
121
+ const struct textureReference *texref;
122
+ cudaMipmappedArray_const_t mipmappedArray;
123
+ const struct cudaChannelFormatDesc *desc;
124
+ } cudaBindTextureToMipmappedArray_v5000_params;
125
+
126
+ typedef struct cudaUnbindTexture_v3020_params_st {
127
+ const struct textureReference *texref;
128
+ } cudaUnbindTexture_v3020_params;
129
+
130
+ typedef struct cudaGetTextureAlignmentOffset_v3020_params_st {
131
+ size_t *offset;
132
+ const struct textureReference *texref;
133
+ } cudaGetTextureAlignmentOffset_v3020_params;
134
+
135
+ typedef struct cudaGetTextureReference_v3020_params_st {
136
+ const struct textureReference **texref;
137
+ const void *symbol;
138
+ } cudaGetTextureReference_v3020_params;
139
+
140
+ typedef struct cudaBindSurfaceToArray_v3020_params_st {
141
+ const struct surfaceReference *surfref;
142
+ cudaArray_const_t array;
143
+ const struct cudaChannelFormatDesc *desc;
144
+ } cudaBindSurfaceToArray_v3020_params;
145
+
146
+ typedef struct cudaGetSurfaceReference_v3020_params_st {
147
+ const struct surfaceReference **surfref;
148
+ const void *symbol;
149
+ } cudaGetSurfaceReference_v3020_params;
150
+
151
+ typedef struct cudaGraphInstantiate_v10000_params_st {
152
+ cudaGraphExec_t *pGraphExec;
153
+ cudaGraph_t graph;
154
+ cudaGraphNode_t *pErrorNode;
155
+ char *pLogBuffer;
156
+ size_t bufferSize;
157
+ } cudaGraphInstantiate_v10000_params;
158
+
159
+ // Parameter trace structures for removed functions
160
+
161
+
162
+ // End of parameter trace structures
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/generated_nvtx_meta.h ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2013-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
51
+ #pragma GCC visibility push(default)
52
+ #endif
53
+
54
+ // *************************************************************************
55
+ // Definitions of structs to hold parameters for each function
56
+ // *************************************************************************
57
+
58
+ typedef struct nvtxMarkEx_params_st {
59
+ const nvtxEventAttributes_t* eventAttrib;
60
+ } nvtxMarkEx_params;
61
+
62
+ typedef struct nvtxMarkA_params_st {
63
+ const char* message;
64
+ } nvtxMarkA_params;
65
+
66
+ typedef struct nvtxMarkW_params_st {
67
+ const wchar_t* message;
68
+ } nvtxMarkW_params;
69
+
70
+ typedef struct nvtxRangeStartEx_params_st {
71
+ const nvtxEventAttributes_t* eventAttrib;
72
+ } nvtxRangeStartEx_params;
73
+
74
+ typedef struct nvtxRangeStartA_params_st {
75
+ const char* message;
76
+ } nvtxRangeStartA_params;
77
+
78
+ typedef struct nvtxRangeStartW_params_st {
79
+ const wchar_t* message;
80
+ } nvtxRangeStartW_params;
81
+
82
+ typedef struct nvtxRangeEnd_params_st {
83
+ nvtxRangeId_t id;
84
+ } nvtxRangeEnd_params;
85
+
86
+ typedef struct nvtxRangePushEx_params_st {
87
+ const nvtxEventAttributes_t* eventAttrib;
88
+ } nvtxRangePushEx_params;
89
+
90
+ typedef struct nvtxRangePushA_params_st {
91
+ const char* message;
92
+ } nvtxRangePushA_params;
93
+
94
+ typedef struct nvtxRangePushW_params_st {
95
+ const wchar_t* message;
96
+ } nvtxRangePushW_params;
97
+
98
+ typedef struct nvtxRangePop_params_st {
99
+ /* WAR: Windows compiler doesn't allow empty structs */
100
+ /* This field shouldn't be used */
101
+ void *dummy;
102
+ } nvtxRangePop_params;
103
+
104
+ typedef struct nvtxNameCategoryA_params_st {
105
+ uint32_t category;
106
+ const char* name;
107
+ } nvtxNameCategoryA_params;
108
+
109
+ typedef struct nvtxNameCategoryW_params_st {
110
+ uint32_t category;
111
+ const wchar_t* name;
112
+ } nvtxNameCategoryW_params;
113
+
114
+ typedef struct nvtxNameOsThreadA_params_st {
115
+ uint32_t threadId;
116
+ const char* name;
117
+ } nvtxNameOsThreadA_params;
118
+
119
+ typedef struct nvtxNameOsThreadW_params_st {
120
+ uint32_t threadId;
121
+ const wchar_t* name;
122
+ } nvtxNameOsThreadW_params;
123
+
124
+ typedef struct nvtxNameCuDeviceA_params_st {
125
+ CUdevice device;
126
+ const char* name;
127
+ } nvtxNameCuDeviceA_params;
128
+
129
+ typedef struct nvtxNameCuDeviceW_params_st {
130
+ CUdevice device;
131
+ const wchar_t* name;
132
+ } nvtxNameCuDeviceW_params;
133
+
134
+ typedef struct nvtxNameCuContextA_params_st {
135
+ CUcontext context;
136
+ const char* name;
137
+ } nvtxNameCuContextA_params;
138
+
139
+ typedef struct nvtxNameCuContextW_params_st {
140
+ CUcontext context;
141
+ const wchar_t* name;
142
+ } nvtxNameCuContextW_params;
143
+
144
+ typedef struct nvtxNameCuStreamA_params_st {
145
+ CUstream stream;
146
+ const char* name;
147
+ } nvtxNameCuStreamA_params;
148
+
149
+ typedef struct nvtxNameCuStreamW_params_st {
150
+ CUstream stream;
151
+ const wchar_t* name;
152
+ } nvtxNameCuStreamW_params;
153
+
154
+ typedef struct nvtxNameCuEventA_params_st {
155
+ CUevent event;
156
+ const char* name;
157
+ } nvtxNameCuEventA_params;
158
+
159
+ typedef struct nvtxNameCuEventW_params_st {
160
+ CUevent event;
161
+ const wchar_t* name;
162
+ } nvtxNameCuEventW_params;
163
+
164
+ typedef struct nvtxNameCudaDeviceA_params_st {
165
+ int device;
166
+ const char* name;
167
+ } nvtxNameCudaDeviceA_params;
168
+
169
+ typedef struct nvtxNameCudaDeviceW_params_st {
170
+ int device;
171
+ const wchar_t* name;
172
+ } nvtxNameCudaDeviceW_params;
173
+
174
+ typedef struct nvtxNameCudaStreamA_params_st {
175
+ cudaStream_t stream;
176
+ const char* name;
177
+ } nvtxNameCudaStreamA_params;
178
+
179
+ typedef struct nvtxNameCudaStreamW_params_st {
180
+ cudaStream_t stream;
181
+ const wchar_t* name;
182
+ } nvtxNameCudaStreamW_params;
183
+
184
+ typedef struct nvtxNameCudaEventA_params_st {
185
+ cudaEvent_t event;
186
+ const char* name;
187
+ } nvtxNameCudaEventA_params;
188
+
189
+ typedef struct nvtxNameCudaEventW_params_st {
190
+ cudaEvent_t event;
191
+ const wchar_t* name;
192
+ } nvtxNameCudaEventW_params;
193
+
194
+ typedef struct nvtxDomainCreateA_params_st {
195
+ const char* name;
196
+ } nvtxDomainCreateA_params;
197
+
198
+ typedef struct nvtxDomainDestroy_params_st {
199
+ nvtxDomainHandle_t domain;
200
+ } nvtxDomainDestroy_params;
201
+
202
+ typedef struct nvtxDomainMarkEx_params_st {
203
+ nvtxDomainHandle_t domain;
204
+ nvtxMarkEx_params core;
205
+ } nvtxDomainMarkEx_params;
206
+
207
+ typedef struct nvtxDomainRangeStartEx_params_st {
208
+ nvtxDomainHandle_t domain;
209
+ nvtxRangeStartEx_params core;
210
+ } nvtxDomainRangeStartEx_params;
211
+
212
+ typedef struct nvtxDomainRangeEnd_params_st {
213
+ nvtxDomainHandle_t domain;
214
+ nvtxRangeEnd_params core;
215
+ } nvtxDomainRangeEnd_params;
216
+
217
+ typedef struct nvtxDomainRangePushEx_params_st {
218
+ nvtxDomainHandle_t domain;
219
+ nvtxRangePushEx_params core;
220
+ } nvtxDomainRangePushEx_params;
221
+
222
+ typedef struct nvtxDomainRangePop_params_st {
223
+ nvtxDomainHandle_t domain;
224
+ } nvtxDomainRangePop_params;
225
+
226
+ typedef struct nvtxSyncUserCreate_params_st {
227
+ nvtxDomainHandle_t domain;
228
+ const nvtxSyncUserAttributes_t* attribs;
229
+ } nvtxSyncUserCreate_params;
230
+
231
+ typedef struct nvtxSyncUserCommon_params_st {
232
+ nvtxSyncUser_t handle;
233
+ } nvtxSyncUserCommon_params;
234
+
235
+ typedef struct nvtxDomainRegisterStringA_params_st {
236
+ nvtxDomainHandle_t domain;
237
+ const char* string;
238
+ } nvtxDomainRegisterStringA_params;
239
+
240
+ typedef struct nvtxDomainRegisterStringW_params_st {
241
+ nvtxDomainHandle_t domain;
242
+ const char* string;
243
+ } nvtxDomainRegisterStringW_params;
244
+
245
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
246
+ #pragma GCC visibility pop
247
+ #endif
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_cuda_host.h ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NVPERF_CUDA_HOST_H
2
+ #define NVPERF_CUDA_HOST_H
3
+
4
+ /*
5
+ * Copyright 2014-2022 NVIDIA Corporation. All rights reserved.
6
+ *
7
+ * NOTICE TO USER:
8
+ *
9
+ * This source code is subject to NVIDIA ownership rights under U.S. and
10
+ * international Copyright laws.
11
+ *
12
+ * This software and the information contained herein is PROPRIETARY and
13
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
14
+ * of a form of NVIDIA software license agreement.
15
+ *
16
+ * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
17
+ * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
18
+ * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
19
+ * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
20
+ * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
21
+ * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
22
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
23
+ * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
24
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
25
+ * OR PERFORMANCE OF THIS SOURCE CODE.
26
+ *
27
+ * U.S. Government End Users. This source code is a "commercial item" as
28
+ * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
29
+ * "commercial computer software" and "commercial computer software
30
+ * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
31
+ * and is provided to the U.S. Government only as a commercial end item.
32
+ * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
33
+ * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
34
+ * source code with only those rights set forth herein.
35
+ *
36
+ * Any use of this source code in individual and commercial software must
37
+ * include, in the user documentation and internal comments to the code,
38
+ * the above Disclaimer and U.S. Government End Users Notice.
39
+ */
40
+
41
+ #include <stddef.h>
42
+ #include <stdint.h>
43
+ #include "nvperf_common.h"
44
+ #include "nvperf_host.h"
45
+
46
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
47
+ #pragma GCC visibility push(default)
48
+ #if !defined(NVPW_LOCAL)
49
+ #define NVPW_LOCAL __attribute__ ((visibility ("hidden")))
50
+ #endif
51
+ #else
52
+ #if !defined(NVPW_LOCAL)
53
+ #define NVPW_LOCAL
54
+ #endif
55
+ #endif
56
+
57
+ #ifdef __cplusplus
58
+ extern "C" {
59
+ #endif
60
+
61
+ /**
62
+ * @file nvperf_cuda_host.h
63
+ */
64
+
65
+ /// 'NVPA_MetricsContext' and its APIs are deprecated, please use 'NVPW_MetricsEvaluator' and its APIs instead.
66
+ typedef struct NVPA_MetricsContext NVPA_MetricsContext;
67
+
68
+ typedef struct NVPW_CUDA_MetricsContext_Create_Params
69
+ {
70
+ /// [in]
71
+ size_t structSize;
72
+ /// [in] assign to NULL
73
+ void* pPriv;
74
+ /// [in]
75
+ const char* pChipName;
76
+ /// [out]
77
+ struct NVPA_MetricsContext* pMetricsContext;
78
+ } NVPW_CUDA_MetricsContext_Create_Params;
79
+ #define NVPW_CUDA_MetricsContext_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_MetricsContext_Create_Params, pMetricsContext)
80
+
81
+ NVPA_Status NVPW_CUDA_MetricsContext_Create(NVPW_CUDA_MetricsContext_Create_Params* pParams);
82
+
83
+ typedef struct NVPW_CUDA_RawMetricsConfig_Create_Params
84
+ {
85
+ /// [in]
86
+ size_t structSize;
87
+ /// [in] assign to NULL
88
+ void* pPriv;
89
+ /// [in]
90
+ NVPA_ActivityKind activityKind;
91
+ /// [in]
92
+ const char* pChipName;
93
+ /// [out] new NVPA_RawMetricsConfig object
94
+ struct NVPA_RawMetricsConfig* pRawMetricsConfig;
95
+ } NVPW_CUDA_RawMetricsConfig_Create_Params;
96
+ #define NVPW_CUDA_RawMetricsConfig_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_RawMetricsConfig_Create_Params, pRawMetricsConfig)
97
+
98
+ NVPA_Status NVPW_CUDA_RawMetricsConfig_Create(NVPW_CUDA_RawMetricsConfig_Create_Params* pParams);
99
+
100
+ typedef struct NVPW_CUDA_RawMetricsConfig_Create_V2_Params
101
+ {
102
+ /// [in]
103
+ size_t structSize;
104
+ /// [in] assign to NULL
105
+ void* pPriv;
106
+ /// [in]
107
+ NVPA_ActivityKind activityKind;
108
+ /// [in] accepted for chips supported at the time-of-release.
109
+ const char* pChipName;
110
+ /// [in] buffer with counter availability image - required for future chip support
111
+ const uint8_t* pCounterAvailabilityImage;
112
+ /// [out] new NVPA_RawMetricsConfig object
113
+ struct NVPA_RawMetricsConfig* pRawMetricsConfig;
114
+ } NVPW_CUDA_RawMetricsConfig_Create_V2_Params;
115
+ #define NVPW_CUDA_RawMetricsConfig_Create_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_RawMetricsConfig_Create_V2_Params, pRawMetricsConfig)
116
+
117
+ /// Use either 'pChipName' or 'pCounterAvailabilityImage'.
118
+ NVPA_Status NVPW_CUDA_RawMetricsConfig_Create_V2(NVPW_CUDA_RawMetricsConfig_Create_V2_Params* pParams);
119
+
120
+ typedef struct NVPW_CUDA_CounterDataBuilder_Create_Params
121
+ {
122
+ /// [in]
123
+ size_t structSize;
124
+ /// [in] assign to NULL
125
+ void* pPriv;
126
+ /// [in] accepted for chips supported at the time-of-release.
127
+ const char* pChipName;
128
+ /// [in] buffer with counter availability image - required for future chip support
129
+ const uint8_t* pCounterAvailabilityImage;
130
+ /// [out] new NVPA_CounterDataBuilder object
131
+ struct NVPA_CounterDataBuilder* pCounterDataBuilder;
132
+ } NVPW_CUDA_CounterDataBuilder_Create_Params;
133
+ #define NVPW_CUDA_CounterDataBuilder_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_CounterDataBuilder_Create_Params, pCounterDataBuilder)
134
+
135
+ /// Use either 'pChipName' or 'pCounterAvailabilityImage'.
136
+ NVPA_Status NVPW_CUDA_CounterDataBuilder_Create(NVPW_CUDA_CounterDataBuilder_Create_Params* pParams);
137
+
138
+ typedef struct NVPW_MetricsEvaluator NVPW_MetricsEvaluator;
139
+
140
+ typedef struct NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params
141
+ {
142
+ /// [in]
143
+ size_t structSize;
144
+ /// [in] assign to NULL
145
+ void* pPriv;
146
+ /// [in] accepted for chips supported at the time-of-release.
147
+ const char* pChipName;
148
+ /// [in] buffer with counter availability image - required for future chip support
149
+ const uint8_t* pCounterAvailabilityImage;
150
+ /// [out]
151
+ size_t scratchBufferSize;
152
+ } NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params;
153
+ #define NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params, scratchBufferSize)
154
+
155
+ /// Use either 'pChipName' or 'pCounterAvailabilityImage'.
156
+ NVPA_Status NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize(NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params* pParams);
157
+
158
+ typedef struct NVPW_CUDA_MetricsEvaluator_Initialize_Params
159
+ {
160
+ /// [in]
161
+ size_t structSize;
162
+ /// [in] assign to NULL
163
+ void* pPriv;
164
+ /// [in]
165
+ uint8_t* pScratchBuffer;
166
+ /// [in] the size of the 'pScratchBuffer' array, should be at least the size of the 'scratchBufferSize' returned
167
+ /// by 'NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize'
168
+ size_t scratchBufferSize;
169
+ /// [in] accepted for chips supported at the time-of-release.
170
+ const char* pChipName;
171
+ /// [in] buffer with counter availability image - required for future chip support
172
+ const uint8_t* pCounterAvailabilityImage;
173
+ /// [in]
174
+ const uint8_t* pCounterDataImage;
175
+ /// [in] must be provided if 'pCounterDataImage' is not NULL
176
+ size_t counterDataImageSize;
177
+ /// [out]
178
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
179
+ } NVPW_CUDA_MetricsEvaluator_Initialize_Params;
180
+ #define NVPW_CUDA_MetricsEvaluator_Initialize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_MetricsEvaluator_Initialize_Params, pMetricsEvaluator)
181
+
182
+ /// Use one of 'pChipName', 'pCounterAvailabilityImage', or 'pCounterDataImage'. 'pChipName' or
183
+ /// 'pCounterAvailabilityImage' will create a metrics evaluator based on a virtual device while 'pCounterDataImage'
184
+ /// will create a metrics evaluator based on the actual device.
185
+ NVPA_Status NVPW_CUDA_MetricsEvaluator_Initialize(NVPW_CUDA_MetricsEvaluator_Initialize_Params* pParams);
186
+
187
+
188
+
189
+ #ifdef __cplusplus
190
+ } // extern "C"
191
+ #endif
192
+
193
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
194
+ #pragma GCC visibility pop
195
+ #endif
196
+
197
+ #endif // NVPERF_CUDA_HOST_H
moondream/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_host.h ADDED
@@ -0,0 +1,1528 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NVPERF_HOST_H
2
+ #define NVPERF_HOST_H
3
+
4
+ /*
5
+ * Copyright 2014-2022 NVIDIA Corporation. All rights reserved.
6
+ *
7
+ * NOTICE TO USER:
8
+ *
9
+ * This source code is subject to NVIDIA ownership rights under U.S. and
10
+ * international Copyright laws.
11
+ *
12
+ * This software and the information contained herein is PROPRIETARY and
13
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
14
+ * of a form of NVIDIA software license agreement.
15
+ *
16
+ * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
17
+ * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
18
+ * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
19
+ * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
20
+ * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
21
+ * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
22
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
23
+ * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
24
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
25
+ * OR PERFORMANCE OF THIS SOURCE CODE.
26
+ *
27
+ * U.S. Government End Users. This source code is a "commercial item" as
28
+ * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
29
+ * "commercial computer software" and "commercial computer software
30
+ * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
31
+ * and is provided to the U.S. Government only as a commercial end item.
32
+ * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
33
+ * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
34
+ * source code with only those rights set forth herein.
35
+ *
36
+ * Any use of this source code in individual and commercial software must
37
+ * include, in the user documentation and internal comments to the code,
38
+ * the above Disclaimer and U.S. Government End Users Notice.
39
+ */
40
+
41
+ #include <stddef.h>
42
+ #include <stdint.h>
43
+ #include "nvperf_common.h"
44
+
45
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
46
+ #pragma GCC visibility push(default)
47
+ #if !defined(NVPW_LOCAL)
48
+ #define NVPW_LOCAL __attribute__ ((visibility ("hidden")))
49
+ #endif
50
+ #else
51
+ #if !defined(NVPW_LOCAL)
52
+ #define NVPW_LOCAL
53
+ #endif
54
+ #endif
55
+
56
+ #ifdef __cplusplus
57
+ extern "C" {
58
+ #endif
59
+
60
+ /**
61
+ * @file nvperf_host.h
62
+ */
63
+
64
+
65
+ // Guard against multiple definition of NvPerf host types
66
+ #ifndef NVPERF_HOST_API_DEFINED
67
+ #define NVPERF_HOST_API_DEFINED
68
+
69
+
70
+ /***************************************************************************//**
71
+ * @name Host Configuration
72
+ * @{
73
+ */
74
+
75
+ typedef struct NVPW_InitializeHost_Params
76
+ {
77
+ /// [in]
78
+ size_t structSize;
79
+ /// [in] assign to NULL
80
+ void* pPriv;
81
+ } NVPW_InitializeHost_Params;
82
+ #define NVPW_InitializeHost_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_InitializeHost_Params, pPriv)
83
+
84
+ /// Load the host library.
85
+ NVPA_Status NVPW_InitializeHost(NVPW_InitializeHost_Params* pParams);
86
+
87
+ typedef struct NVPW_CounterData_CalculateCounterDataImageCopySize_Params
88
+ {
89
+ /// [in]
90
+ size_t structSize;
91
+ /// [in] assign to NULL
92
+ void* pPriv;
93
+ /// The CounterDataPrefix generated from e.g. nvperf2 initdata or
94
+ /// NVPW_CounterDataBuilder_GetCounterDataPrefix(). Must be align(8).
95
+ const uint8_t* pCounterDataPrefix;
96
+ size_t counterDataPrefixSize;
97
+ /// max number of ranges that can be profiled
98
+ uint32_t maxNumRanges;
99
+ /// max number of RangeTree nodes; must be >= maxNumRanges
100
+ uint32_t maxNumRangeTreeNodes;
101
+ /// max string length of each RangeName, including the trailing NUL character
102
+ uint32_t maxRangeNameLength;
103
+ const uint8_t* pCounterDataSrc;
104
+ /// [out] required size of the copy buffer
105
+ size_t copyDataImageCounterSize;
106
+ } NVPW_CounterData_CalculateCounterDataImageCopySize_Params;
107
+ #define NVPW_CounterData_CalculateCounterDataImageCopySize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_CalculateCounterDataImageCopySize_Params, copyDataImageCounterSize)
108
+
109
+ NVPA_Status NVPW_CounterData_CalculateCounterDataImageCopySize(NVPW_CounterData_CalculateCounterDataImageCopySize_Params* pParams);
110
+
111
+ typedef struct NVPW_CounterData_InitializeCounterDataImageCopy_Params
112
+ {
113
+ /// [in]
114
+ size_t structSize;
115
+ /// [in] assign to NULL
116
+ void* pPriv;
117
+ /// The CounterDataPrefix generated from e.g. nvperf2 initdata or
118
+ /// NVPW_CounterDataBuilder_GetCounterDataPrefix(). Must be align(8).
119
+ const uint8_t* pCounterDataPrefix;
120
+ size_t counterDataPrefixSize;
121
+ /// max number of ranges that can be profiled
122
+ uint32_t maxNumRanges;
123
+ /// max number of RangeTree nodes; must be >= maxNumRanges
124
+ uint32_t maxNumRangeTreeNodes;
125
+ /// max string length of each RangeName, including the trailing NUL character
126
+ uint32_t maxRangeNameLength;
127
+ const uint8_t* pCounterDataSrc;
128
+ uint8_t* pCounterDataDst;
129
+ } NVPW_CounterData_InitializeCounterDataImageCopy_Params;
130
+ #define NVPW_CounterData_InitializeCounterDataImageCopy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_InitializeCounterDataImageCopy_Params, pCounterDataDst)
131
+
132
+ NVPA_Status NVPW_CounterData_InitializeCounterDataImageCopy(NVPW_CounterData_InitializeCounterDataImageCopy_Params* pParams);
133
+
134
+ typedef struct NVPA_CounterDataCombiner NVPA_CounterDataCombiner;
135
+
136
+ typedef struct NVPW_CounterDataCombiner_Create_Params
137
+ {
138
+ /// [in]
139
+ size_t structSize;
140
+ /// [in] assign to NULL
141
+ void* pPriv;
142
+ /// The destination counter data into which the source datas will be combined
143
+ uint8_t* pCounterDataDst;
144
+ /// [out] The created counter data combiner
145
+ NVPA_CounterDataCombiner* pCounterDataCombiner;
146
+ } NVPW_CounterDataCombiner_Create_Params;
147
+ #define NVPW_CounterDataCombiner_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_Create_Params, pCounterDataCombiner)
148
+
149
+ NVPA_Status NVPW_CounterDataCombiner_Create(NVPW_CounterDataCombiner_Create_Params* pParams);
150
+
151
+ typedef struct NVPW_CounterDataCombiner_Destroy_Params
152
+ {
153
+ /// [in]
154
+ size_t structSize;
155
+ /// [in] assign to NULL
156
+ void* pPriv;
157
+ NVPA_CounterDataCombiner* pCounterDataCombiner;
158
+ } NVPW_CounterDataCombiner_Destroy_Params;
159
+ #define NVPW_CounterDataCombiner_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_Destroy_Params, pCounterDataCombiner)
160
+
161
+ NVPA_Status NVPW_CounterDataCombiner_Destroy(NVPW_CounterDataCombiner_Destroy_Params* pParams);
162
+
163
+ typedef struct NVPW_CounterDataCombiner_CreateRange_Params
164
+ {
165
+ /// [in]
166
+ size_t structSize;
167
+ /// [in] assign to NULL
168
+ void* pPriv;
169
+ NVPA_CounterDataCombiner* pCounterDataCombiner;
170
+ size_t numDescriptions;
171
+ const char* const* ppDescriptions;
172
+ /// [out]
173
+ size_t rangeIndexDst;
174
+ } NVPW_CounterDataCombiner_CreateRange_Params;
175
+ #define NVPW_CounterDataCombiner_CreateRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_CreateRange_Params, rangeIndexDst)
176
+
177
+ NVPA_Status NVPW_CounterDataCombiner_CreateRange(NVPW_CounterDataCombiner_CreateRange_Params* pParams);
178
+
179
+ typedef struct NVPW_CounterDataCombiner_CopyIntoRange_Params
180
+ {
181
+ /// [in]
182
+ size_t structSize;
183
+ /// [in] assign to NULL
184
+ void* pPriv;
185
+ /// [in]
186
+ NVPA_CounterDataCombiner* pCounterDataCombiner;
187
+ /// [in]
188
+ size_t rangeIndexDst;
189
+ /// [in]
190
+ const uint8_t* pCounterDataSrc;
191
+ /// [in]
192
+ size_t rangeIndexSrc;
193
+ } NVPW_CounterDataCombiner_CopyIntoRange_Params;
194
+ #define NVPW_CounterDataCombiner_CopyIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_CopyIntoRange_Params, rangeIndexSrc)
195
+
196
+ /// In order to use this API, the source counter data and the destination counter data must have identical counters
197
+ NVPA_Status NVPW_CounterDataCombiner_CopyIntoRange(NVPW_CounterDataCombiner_CopyIntoRange_Params* pParams);
198
+
199
+ typedef struct NVPW_CounterDataCombiner_AccumulateIntoRange_Params
200
+ {
201
+ /// [in]
202
+ size_t structSize;
203
+ /// [in] assign to NULL
204
+ void* pPriv;
205
+ NVPA_CounterDataCombiner* pCounterDataCombiner;
206
+ size_t rangeIndexDst;
207
+ uint32_t dstMultiplier;
208
+ const uint8_t* pCounterDataSrc;
209
+ size_t rangeIndexSrc;
210
+ uint32_t srcMultiplier;
211
+ } NVPW_CounterDataCombiner_AccumulateIntoRange_Params;
212
+ #define NVPW_CounterDataCombiner_AccumulateIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_AccumulateIntoRange_Params, srcMultiplier)
213
+
214
+ NVPA_Status NVPW_CounterDataCombiner_AccumulateIntoRange(NVPW_CounterDataCombiner_AccumulateIntoRange_Params* pParams);
215
+
216
+ typedef struct NVPW_CounterDataCombiner_SumIntoRange_Params
217
+ {
218
+ /// [in]
219
+ size_t structSize;
220
+ /// [in] assign to NULL
221
+ void* pPriv;
222
+ NVPA_CounterDataCombiner* pCounterDataCombiner;
223
+ size_t rangeIndexDst;
224
+ const uint8_t* pCounterDataSrc;
225
+ size_t rangeIndexSrc;
226
+ } NVPW_CounterDataCombiner_SumIntoRange_Params;
227
+ #define NVPW_CounterDataCombiner_SumIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_SumIntoRange_Params, rangeIndexSrc)
228
+
229
+ NVPA_Status NVPW_CounterDataCombiner_SumIntoRange(NVPW_CounterDataCombiner_SumIntoRange_Params* pParams);
230
+
231
+ typedef struct NVPW_CounterDataCombiner_WeightedSumIntoRange_Params
232
+ {
233
+ /// [in]
234
+ size_t structSize;
235
+ /// [in] assign to NULL
236
+ void* pPriv;
237
+ NVPA_CounterDataCombiner* pCounterDataCombiner;
238
+ size_t rangeIndexDst;
239
+ double dstMultiplier;
240
+ const uint8_t* pCounterDataSrc;
241
+ size_t rangeIndexSrc;
242
+ double srcMultiplier;
243
+ } NVPW_CounterDataCombiner_WeightedSumIntoRange_Params;
244
+ #define NVPW_CounterDataCombiner_WeightedSumIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_WeightedSumIntoRange_Params, srcMultiplier)
245
+
246
+ NVPA_Status NVPW_CounterDataCombiner_WeightedSumIntoRange(NVPW_CounterDataCombiner_WeightedSumIntoRange_Params* pParams);
247
+
248
+ /**
249
+ * @}
250
+ ******************************************************************************/
251
+
252
+ /***************************************************************************//**
253
+ * @name Metrics Configuration
254
+ * @{
255
+ */
256
+
257
+ typedef struct NVPA_RawMetricsConfig NVPA_RawMetricsConfig;
258
+
259
+ typedef struct NVPA_RawMetricRequest
260
+ {
261
+ /// [in]
262
+ size_t structSize;
263
+ /// [in] assign to NULL
264
+ void* pPriv;
265
+ /// in
266
+ const char* pMetricName;
267
+ /// in
268
+ NVPA_Bool isolated;
269
+ /// in; ignored by AddMetric but observed by CounterData initialization
270
+ NVPA_Bool keepInstances;
271
+ } NVPA_RawMetricRequest;
272
+ #define NVPA_RAW_METRIC_REQUEST_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPA_RawMetricRequest, keepInstances)
273
+
274
+ typedef struct NVPW_GetSupportedChipNames_Params
275
+ {
276
+ /// [in]
277
+ size_t structSize;
278
+ /// [in] assign to NULL
279
+ void* pPriv;
280
+ /// [out]
281
+ const char* const* ppChipNames;
282
+ /// [out]
283
+ size_t numChipNames;
284
+ } NVPW_GetSupportedChipNames_Params;
285
+ #define NVPW_GetSupportedChipNames_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_GetSupportedChipNames_Params, numChipNames)
286
+
287
+ NVPA_Status NVPW_GetSupportedChipNames(NVPW_GetSupportedChipNames_Params* pParams);
288
+
289
+ typedef struct NVPW_RawMetricsConfig_Destroy_Params
290
+ {
291
+ /// [in]
292
+ size_t structSize;
293
+ /// [in] assign to NULL
294
+ void* pPriv;
295
+ NVPA_RawMetricsConfig* pRawMetricsConfig;
296
+ } NVPW_RawMetricsConfig_Destroy_Params;
297
+ #define NVPW_RawMetricsConfig_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_Destroy_Params, pRawMetricsConfig)
298
+
299
+ NVPA_Status NVPW_RawMetricsConfig_Destroy(NVPW_RawMetricsConfig_Destroy_Params* pParams);
300
+
301
+ typedef struct NVPW_RawMetricsConfig_SetCounterAvailability_Params
302
+ {
303
+ /// [in]
304
+ size_t structSize;
305
+ /// [in] assign to NULL
306
+ void* pPriv;
307
+ NVPA_RawMetricsConfig* pRawMetricsConfig;
308
+ /// [in] buffer with counter availability image
309
+ const uint8_t* pCounterAvailabilityImage;
310
+ } NVPW_RawMetricsConfig_SetCounterAvailability_Params;
311
+ #define NVPW_RawMetricsConfig_SetCounterAvailability_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_SetCounterAvailability_Params, pCounterAvailabilityImage)
312
+
313
+ NVPA_Status NVPW_RawMetricsConfig_SetCounterAvailability(NVPW_RawMetricsConfig_SetCounterAvailability_Params* pParams);
314
+
315
+ typedef struct NVPW_RawMetricsConfig_BeginPassGroup_Params
316
+ {
317
+ /// [in]
318
+ size_t structSize;
319
+ /// [in] assign to NULL
320
+ void* pPriv;
321
+ NVPA_RawMetricsConfig* pRawMetricsConfig;
322
+ size_t maxPassCount;
323
+ } NVPW_RawMetricsConfig_BeginPassGroup_Params;
324
+ #define NVPW_RawMetricsConfig_BeginPassGroup_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_BeginPassGroup_Params, maxPassCount)
325
+
326
+ NVPA_Status NVPW_RawMetricsConfig_BeginPassGroup(NVPW_RawMetricsConfig_BeginPassGroup_Params* pParams);
327
+
328
+ typedef struct NVPW_RawMetricsConfig_EndPassGroup_Params
329
+ {
330
+ /// [in]
331
+ size_t structSize;
332
+ /// [in] assign to NULL
333
+ void* pPriv;
334
+ NVPA_RawMetricsConfig* pRawMetricsConfig;
335
+ } NVPW_RawMetricsConfig_EndPassGroup_Params;
336
+ #define NVPW_RawMetricsConfig_EndPassGroup_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_EndPassGroup_Params, pRawMetricsConfig)
337
+
338
+ NVPA_Status NVPW_RawMetricsConfig_EndPassGroup(NVPW_RawMetricsConfig_EndPassGroup_Params* pParams);
339
+
340
+ typedef struct NVPW_RawMetricsConfig_GetNumMetrics_Params
341
+ {
342
+ /// [in]
343
+ size_t structSize;
344
+ /// [in] assign to NULL
345
+ void* pPriv;
346
+ const NVPA_RawMetricsConfig* pRawMetricsConfig;
347
+ /// [out]
348
+ size_t numMetrics;
349
+ } NVPW_RawMetricsConfig_GetNumMetrics_Params;
350
+ #define NVPW_RawMetricsConfig_GetNumMetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetNumMetrics_Params, numMetrics)
351
+
352
+ NVPA_Status NVPW_RawMetricsConfig_GetNumMetrics(NVPW_RawMetricsConfig_GetNumMetrics_Params* pParams);
353
+
354
+ typedef struct NVPW_RawMetricsConfig_GetMetricProperties_Params
355
+ {
356
+ /// [in]
357
+ size_t structSize;
358
+ /// [in] assign to NULL
359
+ void* pPriv;
360
+ const NVPA_RawMetricsConfig* pRawMetricsConfig;
361
+ size_t metricIndex;
362
+ /// [out]
363
+ const char* pMetricName;
364
+ /// [out]
365
+ NVPA_Bool supportsPipelined;
366
+ /// [out]
367
+ NVPA_Bool supportsIsolated;
368
+ } NVPW_RawMetricsConfig_GetMetricProperties_Params;
369
+ #define NVPW_RawMetricsConfig_GetMetricProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetMetricProperties_Params, supportsIsolated)
370
+
371
+ NVPA_Status NVPW_RawMetricsConfig_GetMetricProperties(NVPW_RawMetricsConfig_GetMetricProperties_Params* pParams);
372
+
373
+ typedef struct NVPW_RawMetricsConfig_GetMetricProperties_V2_Params
374
+ {
375
+ /// [in]
376
+ size_t structSize;
377
+ /// [in] assign to NULL
378
+ void* pPriv;
379
+ const NVPA_RawMetricsConfig* pRawMetricsConfig;
380
+ size_t metricIndex;
381
+ /// [out]
382
+ const char* pMetricName;
383
+ } NVPW_RawMetricsConfig_GetMetricProperties_V2_Params;
384
+ #define NVPW_RawMetricsConfig_GetMetricProperties_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetMetricProperties_V2_Params, pMetricName)
385
+
386
+ NVPA_Status NVPW_RawMetricsConfig_GetMetricProperties_V2(NVPW_RawMetricsConfig_GetMetricProperties_V2_Params* pParams);
387
+
388
+ typedef struct NVPW_RawMetricsConfig_AddMetrics_Params
389
+ {
390
+ /// [in]
391
+ size_t structSize;
392
+ /// [in] assign to NULL
393
+ void* pPriv;
394
+ NVPA_RawMetricsConfig* pRawMetricsConfig;
395
+ const NVPA_RawMetricRequest* pRawMetricRequests;
396
+ size_t numMetricRequests;
397
+ } NVPW_RawMetricsConfig_AddMetrics_Params;
398
+ #define NVPW_RawMetricsConfig_AddMetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_AddMetrics_Params, numMetricRequests)
399
+
400
+ NVPA_Status NVPW_RawMetricsConfig_AddMetrics(NVPW_RawMetricsConfig_AddMetrics_Params* pParams);
401
+
402
+ typedef struct NVPW_RawMetricsConfig_IsAddMetricsPossible_Params
403
+ {
404
+ /// [in]
405
+ size_t structSize;
406
+ /// [in] assign to NULL
407
+ void* pPriv;
408
+ const NVPA_RawMetricsConfig* pRawMetricsConfig;
409
+ const NVPA_RawMetricRequest* pRawMetricRequests;
410
+ size_t numMetricRequests;
411
+ /// [out]
412
+ NVPA_Bool isPossible;
413
+ } NVPW_RawMetricsConfig_IsAddMetricsPossible_Params;
414
+ #define NVPW_RawMetricsConfig_IsAddMetricsPossible_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_IsAddMetricsPossible_Params, isPossible)
415
+
416
+ NVPA_Status NVPW_RawMetricsConfig_IsAddMetricsPossible(NVPW_RawMetricsConfig_IsAddMetricsPossible_Params* pParams);
417
+
418
+ typedef struct NVPW_RawMetricsConfig_GenerateConfigImage_Params
419
+ {
420
+ /// [in]
421
+ size_t structSize;
422
+ /// [in] assign to NULL
423
+ void* pPriv;
424
+ NVPA_RawMetricsConfig* pRawMetricsConfig;
425
+ /// [in] If true, all existing pass groups may be merged to reduce number of passes.
426
+ /// If merge was successful, distribution of counters in passes may be updated as a side-effect. The effects
427
+ /// will be persistent in pRawMetricsConfig.
428
+ NVPA_Bool mergeAllPassGroups;
429
+ } NVPW_RawMetricsConfig_GenerateConfigImage_Params;
430
+ #define NVPW_RawMetricsConfig_GenerateConfigImage_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GenerateConfigImage_Params, mergeAllPassGroups)
431
+
432
+ /// This API may fail if called inside a pass group with `mergeAllPassGroups` = true.
433
+ NVPA_Status NVPW_RawMetricsConfig_GenerateConfigImage(NVPW_RawMetricsConfig_GenerateConfigImage_Params* pParams);
434
+
435
+ typedef struct NVPW_RawMetricsConfig_GetConfigImage_Params
436
+ {
437
+ /// [in]
438
+ size_t structSize;
439
+ /// [in] assign to NULL
440
+ void* pPriv;
441
+ const NVPA_RawMetricsConfig* pRawMetricsConfig;
442
+ /// [in] Number of bytes allocated for pBuffer
443
+ size_t bytesAllocated;
444
+ /// [out] [optional] Buffer receiving the config image
445
+ uint8_t* pBuffer;
446
+ /// [out] Count of bytes that would be copied into pBuffer
447
+ size_t bytesCopied;
448
+ } NVPW_RawMetricsConfig_GetConfigImage_Params;
449
+ #define NVPW_RawMetricsConfig_GetConfigImage_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetConfigImage_Params, bytesCopied)
450
+
451
+ NVPA_Status NVPW_RawMetricsConfig_GetConfigImage(NVPW_RawMetricsConfig_GetConfigImage_Params* pParams);
452
+
453
+ typedef struct NVPW_RawMetricsConfig_GetNumPasses_Params
454
+ {
455
+ /// [in]
456
+ size_t structSize;
457
+ /// [in] assign to NULL
458
+ void* pPriv;
459
+ const NVPA_RawMetricsConfig* pRawMetricsConfig;
460
+ /// [out]
461
+ size_t numPipelinedPasses;
462
+ /// [out]
463
+ size_t numIsolatedPasses;
464
+ } NVPW_RawMetricsConfig_GetNumPasses_Params;
465
+ #define NVPW_RawMetricsConfig_GetNumPasses_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetNumPasses_Params, numIsolatedPasses)
466
+
467
+ /// Total num passes = numPipelinedPasses + numIsolatedPasses * numNestingLevels
468
+ NVPA_Status NVPW_RawMetricsConfig_GetNumPasses(NVPW_RawMetricsConfig_GetNumPasses_Params* pParams);
469
+
470
+ typedef struct NVPW_RawMetricsConfig_GetNumPasses_V2_Params
471
+ {
472
+ /// [in]
473
+ size_t structSize;
474
+ /// [in] assign to NULL
475
+ void* pPriv;
476
+ /// [in]
477
+ const NVPA_RawMetricsConfig* pRawMetricsConfig;
478
+ /// [out]
479
+ size_t numPasses;
480
+ } NVPW_RawMetricsConfig_GetNumPasses_V2_Params;
481
+ #define NVPW_RawMetricsConfig_GetNumPasses_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetNumPasses_V2_Params, numPasses)
482
+
483
+ /// Total num passes = numPasses * numNestingLevels
484
+ NVPA_Status NVPW_RawMetricsConfig_GetNumPasses_V2(NVPW_RawMetricsConfig_GetNumPasses_V2_Params* pParams);
485
+
486
+ typedef struct NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params
487
+ {
488
+ /// [in]
489
+ size_t structSize;
490
+ /// [in] assign to NULL
491
+ void* pPriv;
492
+ /// [in] Typically created by e.g. NVPW_RawMetricsConfig_GetConfigImage(), must be align(8).
493
+ const uint8_t* pConfig;
494
+ /// [in]
495
+ size_t configSize;
496
+ /// [out]
497
+ size_t sampleSize;
498
+ } NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params;
499
+ #define NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params, sampleSize)
500
+
501
+ /// Estimate per sample records size based on a virtual device
502
+ NVPA_Status NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize(NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params* pParams);
503
+
504
+ typedef struct NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params
505
+ {
506
+ /// [in]
507
+ size_t structSize;
508
+ /// [in] assign to NULL
509
+ void* pPriv;
510
+ /// [in] Typically created by e.g. NVPW_RawMetricsConfig_GetConfigImage(), must be align(8).
511
+ const uint8_t* pConfig;
512
+ /// [in]
513
+ size_t configSize;
514
+ /// [out]
515
+ size_t sampleSize;
516
+ } NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params;
517
+ #define NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params, sampleSize)
518
+
519
+ /// Estimate per sample records size based on a virtual device
520
+ NVPA_Status NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize(NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params* pParams);
521
+
522
+ /**
523
+ * @}
524
+ ******************************************************************************/
525
+
526
+ /***************************************************************************//**
527
+ * @name CounterData Creation
528
+ * @{
529
+ */
530
+
531
+ typedef struct NVPA_CounterDataBuilder NVPA_CounterDataBuilder;
532
+
533
+ typedef struct NVPW_CounterDataBuilder_Create_Params
534
+ {
535
+ /// [in]
536
+ size_t structSize;
537
+ /// [in] assign to NULL
538
+ void* pPriv;
539
+ /// [out]
540
+ NVPA_CounterDataBuilder* pCounterDataBuilder;
541
+ const char* pChipName;
542
+ } NVPW_CounterDataBuilder_Create_Params;
543
+ #define NVPW_CounterDataBuilder_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_Create_Params, pChipName)
544
+
545
+ NVPA_Status NVPW_CounterDataBuilder_Create(NVPW_CounterDataBuilder_Create_Params* pParams);
546
+
547
+ typedef struct NVPW_CounterDataBuilder_Destroy_Params
548
+ {
549
+ /// [in]
550
+ size_t structSize;
551
+ /// [in] assign to NULL
552
+ void* pPriv;
553
+ NVPA_CounterDataBuilder* pCounterDataBuilder;
554
+ } NVPW_CounterDataBuilder_Destroy_Params;
555
+ #define NVPW_CounterDataBuilder_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_Destroy_Params, pCounterDataBuilder)
556
+
557
+ NVPA_Status NVPW_CounterDataBuilder_Destroy(NVPW_CounterDataBuilder_Destroy_Params* pParams);
558
+
559
+ typedef struct NVPW_CounterDataBuilder_AddMetrics_Params
560
+ {
561
+ /// [in]
562
+ size_t structSize;
563
+ /// [in] assign to NULL
564
+ void* pPriv;
565
+ NVPA_CounterDataBuilder* pCounterDataBuilder;
566
+ const NVPA_RawMetricRequest* pRawMetricRequests;
567
+ size_t numMetricRequests;
568
+ } NVPW_CounterDataBuilder_AddMetrics_Params;
569
+ #define NVPW_CounterDataBuilder_AddMetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_AddMetrics_Params, numMetricRequests)
570
+
571
+ NVPA_Status NVPW_CounterDataBuilder_AddMetrics(NVPW_CounterDataBuilder_AddMetrics_Params* pParams);
572
+
573
+ typedef struct NVPW_CounterDataBuilder_GetCounterDataPrefix_Params
574
+ {
575
+ /// [in]
576
+ size_t structSize;
577
+ /// [in] assign to NULL
578
+ void* pPriv;
579
+ NVPA_CounterDataBuilder* pCounterDataBuilder;
580
+ /// [in] Number of bytes allocated for pBuffer
581
+ size_t bytesAllocated;
582
+ /// [out] [optional] Buffer receiving the counter data prefix
583
+ uint8_t* pBuffer;
584
+ /// [out] Count of bytes that would be copied to pBuffer
585
+ size_t bytesCopied;
586
+ } NVPW_CounterDataBuilder_GetCounterDataPrefix_Params;
587
+ #define NVPW_CounterDataBuilder_GetCounterDataPrefix_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_GetCounterDataPrefix_Params, bytesCopied)
588
+
589
+ NVPA_Status NVPW_CounterDataBuilder_GetCounterDataPrefix(NVPW_CounterDataBuilder_GetCounterDataPrefix_Params* pParams);
590
+
591
+ /**
592
+ * @}
593
+ ******************************************************************************/
594
+
595
+ /***************************************************************************//**
596
+ * @name MetricsContext - metric configuration and evaluation
597
+ * @{
598
+ */
599
+
600
+ /// 'NVPA_MetricsContext' and its APIs are deprecated, please use 'NVPW_MetricsEvaluator' and its APIs instead.
601
+ typedef struct NVPA_MetricsContext NVPA_MetricsContext;
602
+
603
+ typedef enum NVPA_MetricDetailLevel
604
+ {
605
+ NVPA_METRIC_DETAIL_LEVEL_INVALID,
606
+ NVPA_METRIC_DETAIL_LEVEL_GPU,
607
+ NVPA_METRIC_DETAIL_LEVEL_ALL,
608
+ NVPA_METRIC_DETAIL_LEVEL_GPU_AND_LEAF_INSTANCES,
609
+ NVPA_METRIC_DETAIL_LEVEL__COUNT
610
+ } NVPA_MetricDetailLevel;
611
+
612
+ typedef struct NVPW_MetricsContext_Destroy_Params
613
+ {
614
+ /// [in]
615
+ size_t structSize;
616
+ /// [in] assign to NULL
617
+ void* pPriv;
618
+ NVPA_MetricsContext* pMetricsContext;
619
+ } NVPW_MetricsContext_Destroy_Params;
620
+ #define NVPW_MetricsContext_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_Destroy_Params, pMetricsContext)
621
+
622
+ NVPA_Status NVPW_MetricsContext_Destroy(NVPW_MetricsContext_Destroy_Params* pParams);
623
+
624
+ typedef struct NVPW_MetricsContext_RunScript_Params
625
+ {
626
+ /// [in]
627
+ size_t structSize;
628
+ /// [in] assign to NULL
629
+ void* pPriv;
630
+ NVPA_MetricsContext* pMetricsContext;
631
+ /// in : if true, upon error, calls PyErr_Print() which causes exceptions to be logged to stderr
632
+ NVPA_Bool printErrors;
633
+ /// in : the script source code
634
+ const char* pSource;
635
+ /// in : the filename reported in stack traces; if NULL, uses an auto-generated name
636
+ const char* pFileName;
637
+ } NVPW_MetricsContext_RunScript_Params;
638
+ #define NVPW_MetricsContext_RunScript_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_RunScript_Params, pFileName)
639
+
640
+ /// Runs code in the metrics module. Additional metrics can be added through this interface.
641
+ /// If printErrors is true, calls PyErr_Print() which causes exceptions to be logged to stderr.
642
+ /// Equivalent to:
643
+ /// exec(source, metrics.__dict__, metrics.__dict__)
644
+ NVPA_Status NVPW_MetricsContext_RunScript(NVPW_MetricsContext_RunScript_Params* pParams);
645
+
646
+ typedef struct NVPW_MetricsContext_ExecScript_Begin_Params
647
+ {
648
+ /// [in]
649
+ size_t structSize;
650
+ /// [in] assign to NULL
651
+ void* pPriv;
652
+ NVPA_MetricsContext* pMetricsContext;
653
+ /// in : if true, treats pSource as a statement to be eval'd; otherwise, calls exec.
654
+ NVPA_Bool isStatement;
655
+ /// in : if true, upon error, calls PyErr_Print() which causes exceptions to be logged to stderr
656
+ NVPA_Bool printErrors;
657
+ /// in : the script source code
658
+ const char* pSource;
659
+ /// in : the filename reported in stack traces; if NULL, uses an auto-generated name
660
+ const char* pFileName;
661
+ /// out: if isStatement, points at a string form of the evaluation; if !isStatement, points at
662
+ /// str(locals()['result'])
663
+ const char* pResultStr;
664
+ } NVPW_MetricsContext_ExecScript_Begin_Params;
665
+ #define NVPW_MetricsContext_ExecScript_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_ExecScript_Begin_Params, pResultStr)
666
+
667
+ /// Executes a script in the metrics module, but does not modify its contents (for ordinary queries).
668
+ /// Equivalent to one of:
669
+ /// eval(source, metrics.__dict__, {}) # isStatement true
670
+ /// exec(source, metrics.__dict__, {}) # isStatement false
671
+ NVPA_Status NVPW_MetricsContext_ExecScript_Begin(NVPW_MetricsContext_ExecScript_Begin_Params* pParams);
672
+
673
+ typedef struct NVPW_MetricsContext_ExecScript_End_Params
674
+ {
675
+ /// [in]
676
+ size_t structSize;
677
+ /// [in] assign to NULL
678
+ void* pPriv;
679
+ NVPA_MetricsContext* pMetricsContext;
680
+ } NVPW_MetricsContext_ExecScript_End_Params;
681
+ #define NVPW_MetricsContext_ExecScript_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_ExecScript_End_Params, pMetricsContext)
682
+
683
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_ExecScript_Begin.
684
+ NVPA_Status NVPW_MetricsContext_ExecScript_End(NVPW_MetricsContext_ExecScript_End_Params* pParams);
685
+
686
+ typedef struct NVPW_MetricsContext_GetCounterNames_Begin_Params
687
+ {
688
+ /// [in]
689
+ size_t structSize;
690
+ /// [in] assign to NULL
691
+ void* pPriv;
692
+ NVPA_MetricsContext* pMetricsContext;
693
+ /// [out]
694
+ size_t numCounters;
695
+ /// [out]
696
+ const char* const* ppCounterNames;
697
+ } NVPW_MetricsContext_GetCounterNames_Begin_Params;
698
+ #define NVPW_MetricsContext_GetCounterNames_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetCounterNames_Begin_Params, ppCounterNames)
699
+
700
+ /// Outputs (size, pointer) to an array of "const char* pCounterName". The lifetime of the array is tied to
701
+ /// MetricsContext. The names are sorted.
702
+ /// Impl: lazily creates list
703
+ NVPA_Status NVPW_MetricsContext_GetCounterNames_Begin(NVPW_MetricsContext_GetCounterNames_Begin_Params* pParams);
704
+
705
+ typedef struct NVPW_MetricsContext_GetCounterNames_End_Params
706
+ {
707
+ /// [in]
708
+ size_t structSize;
709
+ /// [in] assign to NULL
710
+ void* pPriv;
711
+ NVPA_MetricsContext* pMetricsContext;
712
+ } NVPW_MetricsContext_GetCounterNames_End_Params;
713
+ #define NVPW_MetricsContext_GetCounterNames_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetCounterNames_End_Params, pMetricsContext)
714
+
715
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetCounterNames_Begin.
716
+ NVPA_Status NVPW_MetricsContext_GetCounterNames_End(NVPW_MetricsContext_GetCounterNames_End_Params* pParams);
717
+
718
+ typedef struct NVPW_MetricsContext_GetThroughputNames_Begin_Params
719
+ {
720
+ /// [in]
721
+ size_t structSize;
722
+ /// [in] assign to NULL
723
+ void* pPriv;
724
+ NVPA_MetricsContext* pMetricsContext;
725
+ /// [out]
726
+ size_t numThroughputs;
727
+ /// [out]
728
+ const char* const* ppThroughputNames;
729
+ } NVPW_MetricsContext_GetThroughputNames_Begin_Params;
730
+ #define NVPW_MetricsContext_GetThroughputNames_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetThroughputNames_Begin_Params, ppThroughputNames)
731
+
732
+ /// Outputs (size, pointer) to an array of "const char* pThroughputName". The lifetime of the array is tied to
733
+ /// MetricsContext. The names are sorted.
734
+ /// Impl: lazily creates list
735
+ NVPA_Status NVPW_MetricsContext_GetThroughputNames_Begin(NVPW_MetricsContext_GetThroughputNames_Begin_Params* pParams);
736
+
737
+ typedef struct NVPW_MetricsContext_GetThroughputNames_End_Params
738
+ {
739
+ /// [in]
740
+ size_t structSize;
741
+ /// [in] assign to NULL
742
+ void* pPriv;
743
+ NVPA_MetricsContext* pMetricsContext;
744
+ } NVPW_MetricsContext_GetThroughputNames_End_Params;
745
+ #define NVPW_MetricsContext_GetThroughputNames_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetThroughputNames_End_Params, pMetricsContext)
746
+
747
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetThroughputNames_Begin.
748
+ NVPA_Status NVPW_MetricsContext_GetThroughputNames_End(NVPW_MetricsContext_GetThroughputNames_End_Params* pParams);
749
+
750
+ typedef struct NVPW_MetricsContext_GetRatioNames_Begin_Params
751
+ {
752
+ /// [in]
753
+ size_t structSize;
754
+ /// [in] assign to NULL
755
+ void* pPriv;
756
+ NVPA_MetricsContext* pMetricsContext;
757
+ /// [out]
758
+ size_t numRatios;
759
+ /// [out]
760
+ const char* const* ppRatioNames;
761
+ } NVPW_MetricsContext_GetRatioNames_Begin_Params;
762
+ #define NVPW_MetricsContext_GetRatioNames_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetRatioNames_Begin_Params, ppRatioNames)
763
+
764
+ /// Outputs (size, pointer) to an array of "const char* pRatioName". The lifetime of the array is tied to
765
+ /// MetricsContext. The names are sorted.
766
+ /// Impl: lazily creates list
767
+ NVPA_Status NVPW_MetricsContext_GetRatioNames_Begin(NVPW_MetricsContext_GetRatioNames_Begin_Params* pParams);
768
+
769
+ typedef struct NVPW_MetricsContext_GetRatioNames_End_Params
770
+ {
771
+ /// [in]
772
+ size_t structSize;
773
+ /// [in] assign to NULL
774
+ void* pPriv;
775
+ NVPA_MetricsContext* pMetricsContext;
776
+ } NVPW_MetricsContext_GetRatioNames_End_Params;
777
+ #define NVPW_MetricsContext_GetRatioNames_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetRatioNames_End_Params, pMetricsContext)
778
+
779
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetCounterNames_Begin.
780
+ NVPA_Status NVPW_MetricsContext_GetRatioNames_End(NVPW_MetricsContext_GetRatioNames_End_Params* pParams);
781
+
782
+ typedef struct NVPW_MetricsContext_GetMetricNames_Begin_Params
783
+ {
784
+ /// [in]
785
+ size_t structSize;
786
+ /// [in] assign to NULL
787
+ void* pPriv;
788
+ NVPA_MetricsContext* pMetricsContext;
789
+ /// out: number of elements in array ppMetricNames
790
+ size_t numMetrics;
791
+ /// out: pointer to array of 'const char* pMetricName'
792
+ const char* const* ppMetricNames;
793
+ /// in : if true, doesn't enumerate \<metric\>.peak_{burst, sustained}
794
+ NVPA_Bool hidePeakSubMetrics;
795
+ /// in : if true, doesn't enumerate \<metric\>.per_{active,elapsed,region,frame}_cycle
796
+ NVPA_Bool hidePerCycleSubMetrics;
797
+ /// in : if true, doesn't enumerate \<metric\>.pct_of_peak_{burst,sustained}_{active,elapsed,region,frame}
798
+ NVPA_Bool hidePctOfPeakSubMetrics;
799
+ /// in : if false, enumerate \<unit\>__throughput.pct_of_peak_sustained_elapsed even if hidePctOfPeakSubMetrics
800
+ /// is true
801
+ NVPA_Bool hidePctOfPeakSubMetricsOnThroughputs;
802
+ } NVPW_MetricsContext_GetMetricNames_Begin_Params;
803
+ #define NVPW_MetricsContext_GetMetricNames_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricNames_Begin_Params, hidePctOfPeakSubMetricsOnThroughputs)
804
+
805
+ /// Outputs (size, pointer) to an array of "const char* pMetricName". The lifetime of the array is tied to
806
+ /// MetricsContext. The names are sorted.
807
+ /// Enumerates all metrics at all levels. Includes:
808
+ /// * counter.{sum,avg,min,max}
809
+ /// * throughput.{avg,min,max}
810
+ /// * \<metric\>.peak_{burst, sustained}
811
+ /// * \<metric\>.per_{active,elapsed,region,frame}_cycle
812
+ /// * \<metric\>.pct_of_peak_{burst,sustained}_{active,elapsed,region,frame}
813
+ /// * \<metric\>.per.{other, other_pct}
814
+ NVPA_Status NVPW_MetricsContext_GetMetricNames_Begin(NVPW_MetricsContext_GetMetricNames_Begin_Params* pParams);
815
+
816
+ typedef struct NVPW_MetricsContext_GetMetricNames_End_Params
817
+ {
818
+ /// [in]
819
+ size_t structSize;
820
+ /// [in] assign to NULL
821
+ void* pPriv;
822
+ NVPA_MetricsContext* pMetricsContext;
823
+ } NVPW_MetricsContext_GetMetricNames_End_Params;
824
+ #define NVPW_MetricsContext_GetMetricNames_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricNames_End_Params, pMetricsContext)
825
+
826
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetMetricNames_Begin.
827
+ NVPA_Status NVPW_MetricsContext_GetMetricNames_End(NVPW_MetricsContext_GetMetricNames_End_Params* pParams);
828
+
829
+ typedef struct NVPW_MetricsContext_GetThroughputBreakdown_Begin_Params
830
+ {
831
+ /// [in]
832
+ size_t structSize;
833
+ /// [in] assign to NULL
834
+ void* pPriv;
835
+ NVPA_MetricsContext* pMetricsContext;
836
+ const char* pThroughputName;
837
+ const char* const* ppCounterNames;
838
+ const char* const* ppSubThroughputNames;
839
+ } NVPW_MetricsContext_GetThroughputBreakdown_Begin_Params;
840
+ #define NVPW_MetricsContext_GetThroughputBreakdown_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetThroughputBreakdown_Begin_Params, ppSubThroughputNames)
841
+
842
+ /// After this function returns, the lifetimes of strings pointed to by {ppCounterNames, ppSubThroughputNames,
843
+ /// ppSubMetricNames} are guaranteed until NVPW_MetricsContext_GetThroughputBreakdown_End, or until pMetricsContext
844
+ /// is destroyed
845
+ NVPA_Status NVPW_MetricsContext_GetThroughputBreakdown_Begin(NVPW_MetricsContext_GetThroughputBreakdown_Begin_Params* pParams);
846
+
847
+ typedef struct NVPW_MetricsContext_GetThroughputBreakdown_End_Params
848
+ {
849
+ /// [in]
850
+ size_t structSize;
851
+ /// [in] assign to NULL
852
+ void* pPriv;
853
+ NVPA_MetricsContext* pMetricsContext;
854
+ } NVPW_MetricsContext_GetThroughputBreakdown_End_Params;
855
+ #define NVPW_MetricsContext_GetThroughputBreakdown_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetThroughputBreakdown_End_Params, pMetricsContext)
856
+
857
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetThroughputBreakdown_Begin.
858
+ NVPA_Status NVPW_MetricsContext_GetThroughputBreakdown_End(NVPW_MetricsContext_GetThroughputBreakdown_End_Params* pParams);
859
+
860
+ typedef struct NVPW_MetricsContext_GetMetricProperties_Begin_Params
861
+ {
862
+ /// [in]
863
+ size_t structSize;
864
+ /// [in] assign to NULL
865
+ void* pPriv;
866
+ NVPA_MetricsContext* pMetricsContext;
867
+ const char* pMetricName;
868
+ /// out
869
+ const char* pDescription;
870
+ /// out
871
+ const char* pDimUnits;
872
+ /// out: a NULL-terminated array of pointers to RawMetric names that can be passed to
873
+ /// NVPW_RawMetricsConfig_AddMetrics()
874
+ const char** ppRawMetricDependencies;
875
+ /// out: metric.peak_burst.value.gpu
876
+ double gpuBurstRate;
877
+ /// out: metric.peak_sustained.value.gpu
878
+ double gpuSustainedRate;
879
+ /// out: a NULL-terminated array of pointers to RawMetric names that can be passed to
880
+ /// NVPW_RawMetricsConfig_AddMetrics().
881
+ const char** ppOptionalRawMetricDependencies;
882
+ } NVPW_MetricsContext_GetMetricProperties_Begin_Params;
883
+ #define NVPW_MetricsContext_GetMetricProperties_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricProperties_Begin_Params, ppOptionalRawMetricDependencies)
884
+
885
+ /// After this function returns, the lifetimes of strings pointed to by pMetricProperties or
886
+ /// ppOptionalRawMetricDependencies are guaranteed until NVPW_MetricsContext_GetMetricProperties_End, or until
887
+ /// pMetricsContext is destroyed.
888
+ NVPA_Status NVPW_MetricsContext_GetMetricProperties_Begin(NVPW_MetricsContext_GetMetricProperties_Begin_Params* pParams);
889
+
890
+ typedef struct NVPW_MetricsContext_GetMetricProperties_End_Params
891
+ {
892
+ /// [in]
893
+ size_t structSize;
894
+ /// [in] assign to NULL
895
+ void* pPriv;
896
+ NVPA_MetricsContext* pMetricsContext;
897
+ } NVPW_MetricsContext_GetMetricProperties_End_Params;
898
+ #define NVPW_MetricsContext_GetMetricProperties_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricProperties_End_Params, pMetricsContext)
899
+
900
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetMetricProperties_Begin.
901
+ NVPA_Status NVPW_MetricsContext_GetMetricProperties_End(NVPW_MetricsContext_GetMetricProperties_End_Params* pParams);
902
+
903
+ typedef struct NVPW_MetricsContext_SetCounterData_Params
904
+ {
905
+ /// [in]
906
+ size_t structSize;
907
+ /// [in] assign to NULL
908
+ void* pPriv;
909
+ NVPA_MetricsContext* pMetricsContext;
910
+ const uint8_t* pCounterDataImage;
911
+ size_t rangeIndex;
912
+ NVPA_Bool isolated;
913
+ } NVPW_MetricsContext_SetCounterData_Params;
914
+ #define NVPW_MetricsContext_SetCounterData_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_SetCounterData_Params, isolated)
915
+
916
+ /// Sets data for subsequent evaluation calls.
917
+ /// Only one (CounterData, range, isolated) set of counters can be active at a time; subsequent calls will overwrite
918
+ /// previous calls' data.
919
+ NVPA_Status NVPW_MetricsContext_SetCounterData(NVPW_MetricsContext_SetCounterData_Params* pParams);
920
+
921
+ typedef struct NVPW_MetricsContext_SetUserData_Params
922
+ {
923
+ /// [in]
924
+ size_t structSize;
925
+ /// [in] assign to NULL
926
+ void* pPriv;
927
+ NVPA_MetricsContext* pMetricsContext;
928
+ /// duration in ns of user defined frame
929
+ double frameDuration;
930
+ /// duration in ns of user defined region
931
+ double regionDuration;
932
+ } NVPW_MetricsContext_SetUserData_Params;
933
+ #define NVPW_MetricsContext_SetUserData_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_SetUserData_Params, regionDuration)
934
+
935
+ /// Sets user data for subsequent evaluation calls.
936
+ NVPA_Status NVPW_MetricsContext_SetUserData(NVPW_MetricsContext_SetUserData_Params* pParams);
937
+
938
+ typedef struct NVPW_MetricsContext_EvaluateToGpuValues_Params
939
+ {
940
+ /// [in]
941
+ size_t structSize;
942
+ /// [in] assign to NULL
943
+ void* pPriv;
944
+ NVPA_MetricsContext* pMetricsContext;
945
+ size_t numMetrics;
946
+ const char* const* ppMetricNames;
947
+ /// [out]
948
+ double* pMetricValues;
949
+ } NVPW_MetricsContext_EvaluateToGpuValues_Params;
950
+ #define NVPW_MetricsContext_EvaluateToGpuValues_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_EvaluateToGpuValues_Params, pMetricValues)
951
+
952
+ /// Evaluate multiple metrics to retrieve their GPU values.
953
+ NVPA_Status NVPW_MetricsContext_EvaluateToGpuValues(NVPW_MetricsContext_EvaluateToGpuValues_Params* pParams);
954
+
955
+ typedef struct NVPW_MetricsContext_GetMetricSuffix_Begin_Params
956
+ {
957
+ /// [in]
958
+ size_t structSize;
959
+ /// [in] assign to NULL
960
+ void* pPriv;
961
+ NVPA_MetricsContext* pMetricsContext;
962
+ /// in: pointer to the metric name
963
+ const char* pMetricName;
964
+ /// out: number of elements in array ppSuffixes
965
+ size_t numSuffixes;
966
+ /// out: pointer to array of 'const char* pSuffixes'
967
+ const char* const* ppSuffixes;
968
+ /// in : if true, doesn't enumerate \<metric\>.peak_{burst, sustained}
969
+ NVPA_Bool hidePeakSubMetrics;
970
+ /// in : if true, doesn't enumerate \<metric\>.per_{active,elapsed,region,frame}_cycle
971
+ NVPA_Bool hidePerCycleSubMetrics;
972
+ /// in : if true, doesn't enumerate \<metric\>.pct_of_peak_{burst,sustained}_{active,elapsed,region,frame}
973
+ NVPA_Bool hidePctOfPeakSubMetrics;
974
+ /// in : if false, enumerate \<unit\>__throughput.pct_of_peak_sustained_elapsed even if hidePctOfPeakSubMetrics
975
+ /// is true
976
+ NVPA_Bool hidePctOfPeakSubMetricsOnThroughputs;
977
+ } NVPW_MetricsContext_GetMetricSuffix_Begin_Params;
978
+ #define NVPW_MetricsContext_GetMetricSuffix_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricSuffix_Begin_Params, hidePctOfPeakSubMetricsOnThroughputs)
979
+
980
+ /// Outputs (size, pointer) to an array of "const char* pSuffixes". The lifetime of the array is tied to
981
+ /// MetricsContext.
982
+ /// return all the suffixes the metric has. the possible suffixes include:
983
+ /// * counter.{sum,avg,min,max}
984
+ /// * throughput.{avg,min,max}
985
+ /// * \<metric\>.peak_{burst, sustained}
986
+ /// * \<metric\>.per_{active,elapsed,region,frame}_cycle
987
+ /// * \<metric\>.pct_of_peak_{burst,sustained}_{active,elapsed,region,frame}
988
+ /// * \<metric\>.per.{other, other_pct}
989
+ NVPA_Status NVPW_MetricsContext_GetMetricSuffix_Begin(NVPW_MetricsContext_GetMetricSuffix_Begin_Params* pParams);
990
+
991
+ typedef struct NVPW_MetricsContext_GetMetricSuffix_End_Params
992
+ {
993
+ /// [in]
994
+ size_t structSize;
995
+ /// [in] assign to NULL
996
+ void* pPriv;
997
+ NVPA_MetricsContext* pMetricsContext;
998
+ } NVPW_MetricsContext_GetMetricSuffix_End_Params;
999
+ #define NVPW_MetricsContext_GetMetricSuffix_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricSuffix_End_Params, pMetricsContext)
1000
+
1001
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetMetricSuffix_Begin.
1002
+ NVPA_Status NVPW_MetricsContext_GetMetricSuffix_End(NVPW_MetricsContext_GetMetricSuffix_End_Params* pParams);
1003
+
1004
+ typedef struct NVPW_MetricsContext_GetMetricBaseNames_Begin_Params
1005
+ {
1006
+ /// [in]
1007
+ size_t structSize;
1008
+ /// [in] assign to NULL
1009
+ void* pPriv;
1010
+ NVPA_MetricsContext* pMetricsContext;
1011
+ /// out: number of elements in array pMetricsBaseNames
1012
+ size_t numMetricBaseNames;
1013
+ /// out: pointer to array of 'const char* pMetricsBaseName'
1014
+ const char* const* ppMetricBaseNames;
1015
+ } NVPW_MetricsContext_GetMetricBaseNames_Begin_Params;
1016
+ #define NVPW_MetricsContext_GetMetricBaseNames_Begin_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricBaseNames_Begin_Params, ppMetricBaseNames)
1017
+
1018
+ /// Outputs (size, pointer) to an array of "const char* ppMetricBaseNames". The lifetime of the array is tied to
1019
+ /// MetricsContext.
1020
+ /// return all the metric base names.
1021
+ NVPA_Status NVPW_MetricsContext_GetMetricBaseNames_Begin(NVPW_MetricsContext_GetMetricBaseNames_Begin_Params* pParams);
1022
+
1023
+ typedef struct NVPW_MetricsContext_GetMetricBaseNames_End_Params
1024
+ {
1025
+ /// [in]
1026
+ size_t structSize;
1027
+ /// [in] assign to NULL
1028
+ void* pPriv;
1029
+ NVPA_MetricsContext* pMetricsContext;
1030
+ } NVPW_MetricsContext_GetMetricBaseNames_End_Params;
1031
+ #define NVPW_MetricsContext_GetMetricBaseNames_End_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsContext_GetMetricBaseNames_End_Params, pMetricsContext)
1032
+
1033
+ /// Cleans up memory internally allocated by NVPW_MetricsContext_GetMetricBaseNames_Begin.
1034
+ NVPA_Status NVPW_MetricsContext_GetMetricBaseNames_End(NVPW_MetricsContext_GetMetricBaseNames_End_Params* pParams);
1035
+
1036
+ /**
1037
+ * @}
1038
+ ******************************************************************************/
1039
+
1040
+ /***************************************************************************//**
1041
+ * @name Metrics Evaluator
1042
+ * @{
1043
+ */
1044
+
1045
+ typedef struct NVPW_MetricsEvaluator NVPW_MetricsEvaluator;
1046
+
1047
+ #ifndef NVPW_DIM_UNIT_DEFINED
1048
+ #define NVPW_DIM_UNIT_DEFINED
1049
+ typedef enum NVPW_DimUnitName
1050
+ {
1051
+ NVPW_DIM_UNIT_INVALID = 3518299157,
1052
+ NVPW_DIM_UNIT_UNITLESS = 2126137902,
1053
+ NVPW_DIM_UNIT_ATTRIBUTES = 3776338729,
1054
+ NVPW_DIM_UNIT_BYTES = 3797850191,
1055
+ NVPW_DIM_UNIT_CTAS = 1960564139,
1056
+ NVPW_DIM_UNIT_DRAM_CYCLES = 2650981327,
1057
+ NVPW_DIM_UNIT_FBP_CYCLES = 1785238957,
1058
+ NVPW_DIM_UNIT_FE_OPS = 2919159083,
1059
+ NVPW_DIM_UNIT_GPC_CYCLES = 1222631184,
1060
+ NVPW_DIM_UNIT_IDC_REQUESTS = 2012649669,
1061
+ NVPW_DIM_UNIT_INSTRUCTIONS = 1418625543,
1062
+ NVPW_DIM_UNIT_KILOBYTES = 1335980302,
1063
+ NVPW_DIM_UNIT_L1DATA_BANK_ACCESSES = 1479493682,
1064
+ NVPW_DIM_UNIT_L1DATA_BANK_CONFLICTS = 3433170787,
1065
+ NVPW_DIM_UNIT_L1TEX_REQUESTS = 1306473767,
1066
+ NVPW_DIM_UNIT_L1TEX_TAGS = 26573010,
1067
+ NVPW_DIM_UNIT_L1TEX_WAVEFRONTS = 129373765,
1068
+ NVPW_DIM_UNIT_L2_REQUESTS = 1143695106,
1069
+ NVPW_DIM_UNIT_L2_SECTORS = 3424101564,
1070
+ NVPW_DIM_UNIT_L2_TAGS = 3755612781,
1071
+ NVPW_DIM_UNIT_NANOSECONDS = 3047500672,
1072
+ NVPW_DIM_UNIT_NVLRX_CYCLES = 4059934930,
1073
+ NVPW_DIM_UNIT_NVLTX_CYCLES = 1814350488,
1074
+ NVPW_DIM_UNIT_PCIE_CYCLES = 1230450943,
1075
+ NVPW_DIM_UNIT_PERCENT = 1284354694,
1076
+ NVPW_DIM_UNIT_PIXELS = 4227616663,
1077
+ NVPW_DIM_UNIT_PIXEL_SHADER_BARRIERS = 3705502518,
1078
+ NVPW_DIM_UNIT_PRIMITIVES = 2373084002,
1079
+ NVPW_DIM_UNIT_QUADS = 1539753497,
1080
+ NVPW_DIM_UNIT_REGISTERS = 2837260947,
1081
+ NVPW_DIM_UNIT_SAMPLES = 746046551,
1082
+ NVPW_DIM_UNIT_SECONDS = 1164825258,
1083
+ NVPW_DIM_UNIT_SYS_CYCLES = 3310821688,
1084
+ NVPW_DIM_UNIT_TEXELS = 1293214069,
1085
+ NVPW_DIM_UNIT_THREADS = 164261907,
1086
+ NVPW_DIM_UNIT_VERTICES = 1873662209,
1087
+ NVPW_DIM_UNIT_WARPS = 97951949,
1088
+ NVPW_DIM_UNIT_WORKLOADS = 1728142656
1089
+ } NVPW_DimUnitName;
1090
+ #endif //NVPW_DIM_UNIT_DEFINED
1091
+
1092
+ #ifndef NVPW_HW_UNIT_DEFINED
1093
+ #define NVPW_HW_UNIT_DEFINED
1094
+ typedef enum NVPW_HwUnit
1095
+ {
1096
+ NVPW_HW_UNIT_INVALID = 3498035701,
1097
+ NVPW_HW_UNIT_CROP = 2872137846,
1098
+ NVPW_HW_UNIT_DRAM = 1662616918,
1099
+ NVPW_HW_UNIT_DRAMC = 1401232876,
1100
+ NVPW_HW_UNIT_FBP = 2947194306,
1101
+ NVPW_HW_UNIT_FBPA = 690045803,
1102
+ NVPW_HW_UNIT_FE = 2204924321,
1103
+ NVPW_HW_UNIT_GPC = 1911735839,
1104
+ NVPW_HW_UNIT_GPU = 1014363534,
1105
+ NVPW_HW_UNIT_GR = 2933618517,
1106
+ NVPW_HW_UNIT_IDC = 842765289,
1107
+ NVPW_HW_UNIT_L1TEX = 893940957,
1108
+ NVPW_HW_UNIT_LTS = 2333266697,
1109
+ NVPW_HW_UNIT_NVLRX = 3091684901,
1110
+ NVPW_HW_UNIT_NVLTX = 869679659,
1111
+ NVPW_HW_UNIT_PCIE = 3433264174,
1112
+ NVPW_HW_UNIT_PDA = 345193251,
1113
+ NVPW_HW_UNIT_PES = 804128425,
1114
+ NVPW_HW_UNIT_PROP = 3339255507,
1115
+ NVPW_HW_UNIT_RASTER = 187932504,
1116
+ NVPW_HW_UNIT_SM = 724224710,
1117
+ NVPW_HW_UNIT_SMSP = 2837616917,
1118
+ NVPW_HW_UNIT_SYS = 768990063,
1119
+ NVPW_HW_UNIT_TPC = 1889024613,
1120
+ NVPW_HW_UNIT_VAF = 753670509,
1121
+ NVPW_HW_UNIT_VPC = 275561583,
1122
+ NVPW_HW_UNIT_ZROP = 979500456
1123
+ } NVPW_HwUnit;
1124
+ #endif //NVPW_HW_UNIT_DEFINED
1125
+
1126
+ typedef enum NVPW_RollupOp
1127
+ {
1128
+ NVPW_ROLLUP_OP_AVG = 0,
1129
+ NVPW_ROLLUP_OP_MAX,
1130
+ NVPW_ROLLUP_OP_MIN,
1131
+ NVPW_ROLLUP_OP_SUM,
1132
+ NVPW_ROLLUP_OP__COUNT
1133
+ } NVPW_RollupOp;
1134
+
1135
+ typedef enum NVPW_MetricType
1136
+ {
1137
+ NVPW_METRIC_TYPE_COUNTER = 0,
1138
+ NVPW_METRIC_TYPE_RATIO,
1139
+ NVPW_METRIC_TYPE_THROUGHPUT,
1140
+ NVPW_METRIC_TYPE__COUNT
1141
+ } NVPW_MetricType;
1142
+
1143
+ typedef enum NVPW_Submetric
1144
+ {
1145
+ NVPW_SUBMETRIC_NONE = 0,
1146
+ NVPW_SUBMETRIC_PEAK_SUSTAINED = 1,
1147
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_ACTIVE = 2,
1148
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_ACTIVE_PER_SECOND = 3,
1149
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_ELAPSED = 4,
1150
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_ELAPSED_PER_SECOND = 5,
1151
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_FRAME = 6,
1152
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_FRAME_PER_SECOND = 7,
1153
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_REGION = 8,
1154
+ NVPW_SUBMETRIC_PEAK_SUSTAINED_REGION_PER_SECOND = 9,
1155
+ NVPW_SUBMETRIC_PER_CYCLE_ACTIVE = 10,
1156
+ NVPW_SUBMETRIC_PER_CYCLE_ELAPSED = 11,
1157
+ NVPW_SUBMETRIC_PER_CYCLE_IN_FRAME = 12,
1158
+ NVPW_SUBMETRIC_PER_CYCLE_IN_REGION = 13,
1159
+ NVPW_SUBMETRIC_PER_SECOND = 14,
1160
+ NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_ACTIVE = 15,
1161
+ NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_ELAPSED = 16,
1162
+ NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_FRAME = 17,
1163
+ NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_REGION = 18,
1164
+ NVPW_SUBMETRIC_MAX_RATE = 19,
1165
+ NVPW_SUBMETRIC_PCT = 20,
1166
+ NVPW_SUBMETRIC_RATIO = 21,
1167
+ NVPW_SUBMETRIC__COUNT
1168
+ } NVPW_Submetric;
1169
+
1170
+ typedef struct NVPW_MetricEvalRequest
1171
+ {
1172
+ /// the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames'
1173
+ size_t metricIndex;
1174
+ /// one of 'NVPW_MetricType'
1175
+ uint8_t metricType;
1176
+ /// one of 'NVPW_RollupOp', required for Counter and Throughput, doesn't apply to Ratio
1177
+ uint8_t rollupOp;
1178
+ /// one of 'NVPW_Submetric', required for Ratio and Throughput, optional for Counter
1179
+ uint16_t submetric;
1180
+ } NVPW_MetricEvalRequest;
1181
+ #define NVPW_MetricEvalRequest_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricEvalRequest, submetric)
1182
+
1183
+ typedef struct NVPW_DimUnitFactor
1184
+ {
1185
+ /// one of 'NVPW_DimUnitName'
1186
+ uint32_t dimUnit;
1187
+ int8_t exponent;
1188
+ } NVPW_DimUnitFactor;
1189
+ #define NVPW_DimUnitFactor_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_DimUnitFactor, exponent)
1190
+
1191
+ typedef struct NVPW_MetricsEvaluator_Destroy_Params
1192
+ {
1193
+ /// [in]
1194
+ size_t structSize;
1195
+ /// [in] assign to NULL
1196
+ void* pPriv;
1197
+ /// [in]
1198
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1199
+ } NVPW_MetricsEvaluator_Destroy_Params;
1200
+ #define NVPW_MetricsEvaluator_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_Destroy_Params, pMetricsEvaluator)
1201
+
1202
+ NVPA_Status NVPW_MetricsEvaluator_Destroy(NVPW_MetricsEvaluator_Destroy_Params* pParams);
1203
+
1204
+ typedef struct NVPW_MetricsEvaluator_GetMetricNames_Params
1205
+ {
1206
+ /// [in]
1207
+ size_t structSize;
1208
+ /// [in] assign to NULL
1209
+ void* pPriv;
1210
+ /// [in]
1211
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1212
+ /// [in] one of 'NVPW_MetricType'
1213
+ uint8_t metricType;
1214
+ /// [out]
1215
+ const char* pMetricNames;
1216
+ /// [out]
1217
+ const size_t* pMetricNameBeginIndices;
1218
+ /// [out]
1219
+ size_t numMetrics;
1220
+ } NVPW_MetricsEvaluator_GetMetricNames_Params;
1221
+ #define NVPW_MetricsEvaluator_GetMetricNames_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricNames_Params, numMetrics)
1222
+
1223
+ NVPA_Status NVPW_MetricsEvaluator_GetMetricNames(NVPW_MetricsEvaluator_GetMetricNames_Params* pParams);
1224
+
1225
+ typedef struct NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params
1226
+ {
1227
+ /// [in]
1228
+ size_t structSize;
1229
+ /// [in] assign to NULL
1230
+ void* pPriv;
1231
+ /// [in]
1232
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1233
+ /// [in] can be either a base metric or a metric
1234
+ const char* pMetricName;
1235
+ /// [out] one of 'NVPW_MetricType'
1236
+ uint8_t metricType;
1237
+ /// [out] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames'
1238
+ size_t metricIndex;
1239
+ } NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params;
1240
+ #define NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params, metricIndex)
1241
+
1242
+ NVPA_Status NVPW_MetricsEvaluator_GetMetricTypeAndIndex(NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params* pParams);
1243
+
1244
+ typedef struct NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params
1245
+ {
1246
+ /// [in]
1247
+ size_t structSize;
1248
+ /// [in] assign to NULL
1249
+ void* pPriv;
1250
+ /// [in]
1251
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1252
+ /// [in]
1253
+ const char* pMetricName;
1254
+ /// [inout] 'pMetricEvalRequest' is in, '*pMetricEvalRequest' is out
1255
+ struct NVPW_MetricEvalRequest* pMetricEvalRequest;
1256
+ /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE'
1257
+ size_t metricEvalRequestStructSize;
1258
+ } NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params;
1259
+ #define NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params, metricEvalRequestStructSize)
1260
+
1261
+ NVPA_Status NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest(NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params* pParams);
1262
+
1263
+ typedef struct NVPW_MetricsEvaluator_HwUnitToString_Params
1264
+ {
1265
+ /// [in]
1266
+ size_t structSize;
1267
+ /// [in] assign to NULL
1268
+ void* pPriv;
1269
+ /// [in]
1270
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1271
+ /// [in] one of 'NVPW_HwUnit'
1272
+ uint32_t hwUnit;
1273
+ /// [out]
1274
+ const char* pHwUnitName;
1275
+ } NVPW_MetricsEvaluator_HwUnitToString_Params;
1276
+ #define NVPW_MetricsEvaluator_HwUnitToString_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_HwUnitToString_Params, pHwUnitName)
1277
+
1278
+ NVPA_Status NVPW_MetricsEvaluator_HwUnitToString(NVPW_MetricsEvaluator_HwUnitToString_Params* pParams);
1279
+
1280
+ typedef struct NVPW_MetricsEvaluator_GetCounterProperties_Params
1281
+ {
1282
+ /// [in]
1283
+ size_t structSize;
1284
+ /// [in] assign to NULL
1285
+ void* pPriv;
1286
+ /// [in]
1287
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1288
+ /// [in] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames'
1289
+ size_t counterIndex;
1290
+ /// [out]
1291
+ const char* pDescription;
1292
+ /// [out] one of 'NVPW_HwUnit'
1293
+ uint32_t hwUnit;
1294
+ } NVPW_MetricsEvaluator_GetCounterProperties_Params;
1295
+ #define NVPW_MetricsEvaluator_GetCounterProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetCounterProperties_Params, hwUnit)
1296
+
1297
+ NVPA_Status NVPW_MetricsEvaluator_GetCounterProperties(NVPW_MetricsEvaluator_GetCounterProperties_Params* pParams);
1298
+
1299
+ typedef struct NVPW_MetricsEvaluator_GetRatioMetricProperties_Params
1300
+ {
1301
+ /// [in]
1302
+ size_t structSize;
1303
+ /// [in] assign to NULL
1304
+ void* pPriv;
1305
+ /// [in]
1306
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1307
+ /// [in] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames'
1308
+ size_t ratioMetricIndex;
1309
+ /// [out]
1310
+ const char* pDescription;
1311
+ /// [out]
1312
+ uint64_t hwUnit;
1313
+ } NVPW_MetricsEvaluator_GetRatioMetricProperties_Params;
1314
+ #define NVPW_MetricsEvaluator_GetRatioMetricProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetRatioMetricProperties_Params, hwUnit)
1315
+
1316
+ NVPA_Status NVPW_MetricsEvaluator_GetRatioMetricProperties(NVPW_MetricsEvaluator_GetRatioMetricProperties_Params* pParams);
1317
+
1318
+ typedef struct NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params
1319
+ {
1320
+ /// [in]
1321
+ size_t structSize;
1322
+ /// [in] assign to NULL
1323
+ void* pPriv;
1324
+ /// [in]
1325
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1326
+ /// [in] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames'
1327
+ size_t throughputMetricIndex;
1328
+ /// [out]
1329
+ const char* pDescription;
1330
+ /// [out]
1331
+ uint32_t hwUnit;
1332
+ /// [out] number of constituent counters for the throughput metric
1333
+ size_t numCounters;
1334
+ /// [out] metric indices as in 'NVPW_MetricsEvaluator_GetMetricNames', valid if 'numCounters' > 0, otherwise
1335
+ /// returned as nullptr
1336
+ const size_t* pCounterIndices;
1337
+ /// [out] number of constituent sub-throughputs for the throughput metric
1338
+ size_t numSubThroughputs;
1339
+ /// [out] metric indices as in 'NVPW_MetricsEvaluator_GetMetricNames', valid if 'numSubThroughputs' > 0,
1340
+ /// otherwise returned as nullptr
1341
+ const size_t* pSubThroughputIndices;
1342
+ } NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params;
1343
+ #define NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params, pSubThroughputIndices)
1344
+
1345
+ NVPA_Status NVPW_MetricsEvaluator_GetThroughputMetricProperties(NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params* pParams);
1346
+
1347
+ typedef struct NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params
1348
+ {
1349
+ /// [in]
1350
+ size_t structSize;
1351
+ /// [in] assign to NULL
1352
+ void* pPriv;
1353
+ /// [in]
1354
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1355
+ /// [in] one of 'NVPW_MetricType'
1356
+ uint8_t metricType;
1357
+ /// [out] an array of 'NVPW_Submetric'
1358
+ const uint16_t* pSupportedSubmetrics;
1359
+ /// [out]
1360
+ size_t numSupportedSubmetrics;
1361
+ } NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params;
1362
+ #define NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params, numSupportedSubmetrics)
1363
+
1364
+ NVPA_Status NVPW_MetricsEvaluator_GetSupportedSubmetrics(NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params* pParams);
1365
+
1366
+ typedef struct NVPW_MetricsEvaluator_GetMetricRawDependencies_Params
1367
+ {
1368
+ /// [in]
1369
+ size_t structSize;
1370
+ /// [in] assign to NULL
1371
+ void* pPriv;
1372
+ /// [in]
1373
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1374
+ /// [in]
1375
+ const struct NVPW_MetricEvalRequest* pMetricEvalRequests;
1376
+ /// [in]
1377
+ size_t numMetricEvalRequests;
1378
+ /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE'
1379
+ size_t metricEvalRequestStructSize;
1380
+ /// [in] set to sizeof('NVPW_MetricEvalRequest')
1381
+ size_t metricEvalRequestStrideSize;
1382
+ /// [inout] 'ppRawDependencies' is in, '*ppRawDependencies' is out
1383
+ const char** ppRawDependencies;
1384
+ /// [inout] if 'ppRawDependencies' is NULL, number of raw dependencies available will be returned; otherwise it
1385
+ /// should be set to the number of elements allocated for 'ppRawDependencies', and on return, it will be
1386
+ /// overwritten by number of elements copied to 'ppRawDependencies'
1387
+ size_t numRawDependencies;
1388
+ /// [inout] 'ppOptionalRawDependencies' is in, '*ppOptionalRawDependencies' is out
1389
+ const char** ppOptionalRawDependencies;
1390
+ /// [inout] if 'ppOptionalRawDependencies' is NULL, number of optional raw dependencies available will be
1391
+ /// returned; otherwise it should be set to the number of elements allocated for 'ppOptionalRawDependencies',
1392
+ /// and on return, it will be overwritten by number of elements copied to 'ppOptionalRawDependencies'
1393
+ size_t numOptionalRawDependencies;
1394
+ } NVPW_MetricsEvaluator_GetMetricRawDependencies_Params;
1395
+ #define NVPW_MetricsEvaluator_GetMetricRawDependencies_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricRawDependencies_Params, numOptionalRawDependencies)
1396
+
1397
+ NVPA_Status NVPW_MetricsEvaluator_GetMetricRawDependencies(NVPW_MetricsEvaluator_GetMetricRawDependencies_Params* pParams);
1398
+
1399
+ typedef struct NVPW_MetricsEvaluator_DimUnitToString_Params
1400
+ {
1401
+ /// [in]
1402
+ size_t structSize;
1403
+ /// [in] assign to NULL
1404
+ void* pPriv;
1405
+ /// [in]
1406
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1407
+ /// [in] one of 'NVPW_DimUnitName'
1408
+ uint32_t dimUnit;
1409
+ /// [out]
1410
+ const char* pSingularName;
1411
+ /// [out]
1412
+ const char* pPluralName;
1413
+ } NVPW_MetricsEvaluator_DimUnitToString_Params;
1414
+ #define NVPW_MetricsEvaluator_DimUnitToString_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_DimUnitToString_Params, pPluralName)
1415
+
1416
+ NVPA_Status NVPW_MetricsEvaluator_DimUnitToString(NVPW_MetricsEvaluator_DimUnitToString_Params* pParams);
1417
+
1418
+ typedef struct NVPW_MetricsEvaluator_GetMetricDimUnits_Params
1419
+ {
1420
+ /// [in]
1421
+ size_t structSize;
1422
+ /// [in] assign to NULL
1423
+ void* pPriv;
1424
+ /// [in]
1425
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1426
+ /// [in]
1427
+ const struct NVPW_MetricEvalRequest* pMetricEvalRequest;
1428
+ /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE'
1429
+ size_t metricEvalRequestStructSize;
1430
+ /// [inout] 'pDimUnits' is in, '*pDimUnits' is out
1431
+ NVPW_DimUnitFactor* pDimUnits;
1432
+ /// [inout] if 'pDimUnits' is NULL, number of dim-units available will be returned; otherwise it should be set
1433
+ /// to the number of elements allocated for 'pDimUnits', and on return, it will be overwritten by number of
1434
+ /// elements copied to 'pDimUnits'
1435
+ size_t numDimUnits;
1436
+ /// [in] set to 'NVPW_DimUnitFactor_STRUCT_SIZE'
1437
+ size_t dimUnitFactorStructSize;
1438
+ } NVPW_MetricsEvaluator_GetMetricDimUnits_Params;
1439
+ #define NVPW_MetricsEvaluator_GetMetricDimUnits_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricDimUnits_Params, dimUnitFactorStructSize)
1440
+
1441
+ NVPA_Status NVPW_MetricsEvaluator_GetMetricDimUnits(NVPW_MetricsEvaluator_GetMetricDimUnits_Params* pParams);
1442
+
1443
+ typedef struct NVPW_MetricsEvaluator_SetUserData_Params
1444
+ {
1445
+ /// [in]
1446
+ size_t structSize;
1447
+ /// [in] assign to NULL
1448
+ void* pPriv;
1449
+ /// [in]
1450
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1451
+ /// [in] duration in ns of user defined frame
1452
+ double frameDuration;
1453
+ /// [in] duration in ns of user defined region
1454
+ double regionDuration;
1455
+ /// [in]
1456
+ NVPA_Bool isolated;
1457
+ } NVPW_MetricsEvaluator_SetUserData_Params;
1458
+ #define NVPW_MetricsEvaluator_SetUserData_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_SetUserData_Params, isolated)
1459
+
1460
+ NVPA_Status NVPW_MetricsEvaluator_SetUserData(NVPW_MetricsEvaluator_SetUserData_Params* pParams);
1461
+
1462
+ typedef struct NVPW_MetricsEvaluator_EvaluateToGpuValues_Params
1463
+ {
1464
+ /// [in]
1465
+ size_t structSize;
1466
+ /// [in] assign to NULL
1467
+ void* pPriv;
1468
+ /// [in]
1469
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1470
+ /// [in]
1471
+ const struct NVPW_MetricEvalRequest* pMetricEvalRequests;
1472
+ /// [in]
1473
+ size_t numMetricEvalRequests;
1474
+ /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE'
1475
+ size_t metricEvalRequestStructSize;
1476
+ /// [in] set to sizeof('NVPW_MetricEvalRequest')
1477
+ size_t metricEvalRequestStrideSize;
1478
+ /// [in]
1479
+ const uint8_t* pCounterDataImage;
1480
+ /// [in]
1481
+ size_t counterDataImageSize;
1482
+ /// [in]
1483
+ size_t rangeIndex;
1484
+ /// [in]
1485
+ NVPA_Bool isolated;
1486
+ /// [inout] 'pMetricValues' is in, '*pMetricValues' is out
1487
+ double* pMetricValues;
1488
+ } NVPW_MetricsEvaluator_EvaluateToGpuValues_Params;
1489
+ #define NVPW_MetricsEvaluator_EvaluateToGpuValues_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_EvaluateToGpuValues_Params, pMetricValues)
1490
+
1491
+ NVPA_Status NVPW_MetricsEvaluator_EvaluateToGpuValues(NVPW_MetricsEvaluator_EvaluateToGpuValues_Params* pParams);
1492
+
1493
+ typedef struct NVPW_MetricsEvaluator_SetDeviceAttributes_Params
1494
+ {
1495
+ /// [in]
1496
+ size_t structSize;
1497
+ /// [in] assign to NULL
1498
+ void* pPriv;
1499
+ /// [in]
1500
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
1501
+ /// [in]
1502
+ const uint8_t* pCounterDataImage;
1503
+ /// [in]
1504
+ size_t counterDataImageSize;
1505
+ } NVPW_MetricsEvaluator_SetDeviceAttributes_Params;
1506
+ #define NVPW_MetricsEvaluator_SetDeviceAttributes_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_SetDeviceAttributes_Params, counterDataImageSize)
1507
+
1508
+ NVPA_Status NVPW_MetricsEvaluator_SetDeviceAttributes(NVPW_MetricsEvaluator_SetDeviceAttributes_Params* pParams);
1509
+
1510
+ /**
1511
+ * @}
1512
+ ******************************************************************************/
1513
+
1514
+
1515
+ #endif // NVPERF_HOST_API_DEFINED
1516
+
1517
+
1518
+
1519
+
1520
+ #ifdef __cplusplus
1521
+ } // extern "C"
1522
+ #endif
1523
+
1524
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
1525
+ #pragma GCC visibility pop
1526
+ #endif
1527
+
1528
+ #endif // NVPERF_HOST_H