ZTWHHH commited on
Commit
99a6fc5
·
verified ·
1 Parent(s): 60a47b6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llava_next/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst +28 -0
  2. llava_next/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/METADATA +93 -0
  3. llava_next/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/RECORD +15 -0
  4. llava_next/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL +6 -0
  5. llava_next/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt +1 -0
  6. llava_next/lib/python3.10/site-packages/jaraco.context-5.3.0.dist-info/METADATA +75 -0
  7. llava_next/lib/python3.10/site-packages/jaraco.context-5.3.0.dist-info/top_level.txt +1 -0
  8. llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/__init__.py +0 -0
  9. llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/__pycache__/__init__.cpython-310.pyc +0 -0
  10. llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__init__.py +0 -0
  11. llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_events.h +1371 -0
  12. llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_profiler_target.h +589 -0
  13. llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_cuda_host.h +197 -0
  14. llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/__init__.py +0 -0
  15. llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/__pycache__/__init__.cpython-310.pyc +0 -0
  16. llava_next/lib/python3.10/site-packages/nvidia/cudnn/__init__.py +0 -0
  17. llava_next/lib/python3.10/site-packages/nvidia/cudnn/__pycache__/__init__.cpython-310.pyc +0 -0
  18. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/__init__.py +0 -0
  19. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-310.pyc +0 -0
  20. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn.h +78 -0
  21. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer.h +658 -0
  22. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer_v8.h +658 -0
  23. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train.h +540 -0
  24. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train_v8.h +540 -0
  25. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend.h +608 -0
  26. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend_v8.h +608 -0
  27. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer.h +571 -0
  28. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer_v8.h +571 -0
  29. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train.h +219 -0
  30. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train_v8.h +219 -0
  31. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer.h +1183 -0
  32. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer_v8.h +1183 -0
  33. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train.h +501 -0
  34. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train_v8.h +501 -0
  35. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_v8.h +78 -0
  36. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_version.h +109 -0
  37. llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_version_v8.h +109 -0
  38. llava_next/lib/python3.10/site-packages/nvidia/cudnn/lib/__init__.py +0 -0
  39. llava_next/lib/python3.10/site-packages/nvidia/cudnn/lib/__pycache__/__init__.cpython-310.pyc +0 -0
  40. llava_next/lib/python3.10/site-packages/nvidia/nvjitlink/include/__init__.py +0 -0
  41. llava_next/lib/python3.10/site-packages/nvidia/nvjitlink/include/__pycache__/__init__.cpython-310.pyc +0 -0
  42. llava_next/lib/python3.10/site-packages/nvidia/nvjitlink/include/nvJitLink.h +531 -0
  43. llava_next/lib/python3.10/site-packages/nvidia/nvjitlink/lib/__init__.py +0 -0
  44. llava_next/lib/python3.10/site-packages/nvidia/nvjitlink/lib/__pycache__/__init__.cpython-310.pyc +0 -0
  45. llava_next/lib/python3.10/site-packages/nvidia/nvtx/__init__.py +0 -0
  46. llava_next/lib/python3.10/site-packages/nvidia/nvtx/__pycache__/__init__.cpython-310.pyc +0 -0
  47. llava_next/lib/python3.10/site-packages/nvidia/nvtx/include/__init__.py +0 -0
  48. llava_next/lib/python3.10/site-packages/nvidia/nvtx/include/__pycache__/__init__.cpython-310.pyc +0 -0
  49. llava_next/lib/python3.10/site-packages/nvidia/nvtx/include/nvToolsExt.h +1561 -0
  50. llava_next/lib/python3.10/site-packages/nvidia/nvtx/include/nvToolsExtCuda.h +164 -0
llava_next/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2010 Pallets
2
+
3
+ Redistribution and use in source and binary forms, with or without
4
+ modification, are permitted provided that the following conditions are
5
+ met:
6
+
7
+ 1. Redistributions of source code must retain the above copyright
8
+ notice, this list of conditions and the following disclaimer.
9
+
10
+ 2. Redistributions in binary form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in the
12
+ documentation and/or other materials provided with the distribution.
13
+
14
+ 3. Neither the name of the copyright holder nor the names of its
15
+ contributors may be used to endorse or promote products derived from
16
+ this software without specific prior written permission.
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
21
+ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
24
+ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
llava_next/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/METADATA ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: MarkupSafe
3
+ Version: 2.1.5
4
+ Summary: Safely add untrusted strings to HTML/XML markup.
5
+ Home-page: https://palletsprojects.com/p/markupsafe/
6
+ Maintainer: Pallets
7
+ Maintainer-email: contact@palletsprojects.com
8
+ License: BSD-3-Clause
9
+ Project-URL: Donate, https://palletsprojects.com/donate
10
+ Project-URL: Documentation, https://markupsafe.palletsprojects.com/
11
+ Project-URL: Changes, https://markupsafe.palletsprojects.com/changes/
12
+ Project-URL: Source Code, https://github.com/pallets/markupsafe/
13
+ Project-URL: Issue Tracker, https://github.com/pallets/markupsafe/issues/
14
+ Project-URL: Chat, https://discord.gg/pallets
15
+ Classifier: Development Status :: 5 - Production/Stable
16
+ Classifier: Environment :: Web Environment
17
+ Classifier: Intended Audience :: Developers
18
+ Classifier: License :: OSI Approved :: BSD License
19
+ Classifier: Operating System :: OS Independent
20
+ Classifier: Programming Language :: Python
21
+ Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
22
+ Classifier: Topic :: Text Processing :: Markup :: HTML
23
+ Requires-Python: >=3.7
24
+ Description-Content-Type: text/x-rst
25
+ License-File: LICENSE.rst
26
+
27
+ MarkupSafe
28
+ ==========
29
+
30
+ MarkupSafe implements a text object that escapes characters so it is
31
+ safe to use in HTML and XML. Characters that have special meanings are
32
+ replaced so that they display as the actual characters. This mitigates
33
+ injection attacks, meaning untrusted user input can safely be displayed
34
+ on a page.
35
+
36
+
37
+ Installing
38
+ ----------
39
+
40
+ Install and update using `pip`_:
41
+
42
+ .. code-block:: text
43
+
44
+ pip install -U MarkupSafe
45
+
46
+ .. _pip: https://pip.pypa.io/en/stable/getting-started/
47
+
48
+
49
+ Examples
50
+ --------
51
+
52
+ .. code-block:: pycon
53
+
54
+ >>> from markupsafe import Markup, escape
55
+
56
+ >>> # escape replaces special characters and wraps in Markup
57
+ >>> escape("<script>alert(document.cookie);</script>")
58
+ Markup('&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
59
+
60
+ >>> # wrap in Markup to mark text "safe" and prevent escaping
61
+ >>> Markup("<strong>Hello</strong>")
62
+ Markup('<strong>hello</strong>')
63
+
64
+ >>> escape(Markup("<strong>Hello</strong>"))
65
+ Markup('<strong>hello</strong>')
66
+
67
+ >>> # Markup is a str subclass
68
+ >>> # methods and operators escape their arguments
69
+ >>> template = Markup("Hello <em>{name}</em>")
70
+ >>> template.format(name='"World"')
71
+ Markup('Hello <em>&#34;World&#34;</em>')
72
+
73
+
74
+ Donate
75
+ ------
76
+
77
+ The Pallets organization develops and supports MarkupSafe and other
78
+ popular packages. In order to grow the community of contributors and
79
+ users, and allow the maintainers to devote more time to the projects,
80
+ `please donate today`_.
81
+
82
+ .. _please donate today: https://palletsprojects.com/donate
83
+
84
+
85
+ Links
86
+ -----
87
+
88
+ - Documentation: https://markupsafe.palletsprojects.com/
89
+ - Changes: https://markupsafe.palletsprojects.com/changes/
90
+ - PyPI Releases: https://pypi.org/project/MarkupSafe/
91
+ - Source Code: https://github.com/pallets/markupsafe/
92
+ - Issue Tracker: https://github.com/pallets/markupsafe/issues/
93
+ - Chat: https://discord.gg/pallets
llava_next/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/RECORD ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MarkupSafe-2.1.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ MarkupSafe-2.1.5.dist-info/LICENSE.rst,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475
3
+ MarkupSafe-2.1.5.dist-info/METADATA,sha256=2dRDPam6OZLfpX0wg1JN5P3u9arqACxVSfdGmsJU7o8,3003
4
+ MarkupSafe-2.1.5.dist-info/RECORD,,
5
+ MarkupSafe-2.1.5.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ MarkupSafe-2.1.5.dist-info/WHEEL,sha256=1FEjxEYgybphwh9S0FO9IcZ0B-NIeM2ko8OzhFZeOeQ,152
7
+ MarkupSafe-2.1.5.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
8
+ markupsafe/__init__.py,sha256=r7VOTjUq7EMQ4v3p4R1LoVOGJg6ysfYRncLr34laRBs,10958
9
+ markupsafe/__pycache__/__init__.cpython-310.pyc,,
10
+ markupsafe/__pycache__/_native.cpython-310.pyc,,
11
+ markupsafe/_native.py,sha256=GR86Qvo_GcgKmKreA1WmYN9ud17OFwkww8E-fiW-57s,1713
12
+ markupsafe/_speedups.c,sha256=X2XvQVtIdcK4Usz70BvkzoOfjTCmQlDkkjYSn-swE0g,7083
13
+ markupsafe/_speedups.cpython-310-x86_64-linux-gnu.so,sha256=kPt-fhZ_RG7PUbDvwmyC26ZvRJ9DvUlF3hszBIB6_xs,44240
14
+ markupsafe/_speedups.pyi,sha256=vfMCsOgbAXRNLUXkyuyonG8uEWKYU4PDqNuMaDELAYw,229
15
+ markupsafe/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
llava_next/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.42.0)
3
+ Root-Is-Purelib: false
4
+ Tag: cp310-cp310-manylinux_2_17_x86_64
5
+ Tag: cp310-cp310-manylinux2014_x86_64
6
+
llava_next/lib/python3.10/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ markupsafe
llava_next/lib/python3.10/site-packages/jaraco.context-5.3.0.dist-info/METADATA ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: jaraco.context
3
+ Version: 5.3.0
4
+ Summary: Useful decorators and context managers
5
+ Home-page: https://github.com/jaraco/jaraco.context
6
+ Author: Jason R. Coombs
7
+ Author-email: jaraco@jaraco.com
8
+ Classifier: Development Status :: 5 - Production/Stable
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Programming Language :: Python :: 3 :: Only
13
+ Requires-Python: >=3.8
14
+ License-File: LICENSE
15
+ Requires-Dist: backports.tarfile ; python_version < "3.12"
16
+ Provides-Extra: docs
17
+ Requires-Dist: sphinx >=3.5 ; extra == 'docs'
18
+ Requires-Dist: jaraco.packaging >=9.3 ; extra == 'docs'
19
+ Requires-Dist: rst.linker >=1.9 ; extra == 'docs'
20
+ Requires-Dist: furo ; extra == 'docs'
21
+ Requires-Dist: sphinx-lint ; extra == 'docs'
22
+ Requires-Dist: jaraco.tidelift >=1.4 ; extra == 'docs'
23
+ Provides-Extra: testing
24
+ Requires-Dist: pytest !=8.1.1,>=6 ; extra == 'testing'
25
+ Requires-Dist: pytest-checkdocs >=2.4 ; extra == 'testing'
26
+ Requires-Dist: pytest-cov ; extra == 'testing'
27
+ Requires-Dist: pytest-mypy ; extra == 'testing'
28
+ Requires-Dist: pytest-enabler >=2.2 ; extra == 'testing'
29
+ Requires-Dist: pytest-ruff >=0.2.1 ; extra == 'testing'
30
+ Requires-Dist: portend ; extra == 'testing'
31
+
32
+ .. image:: https://img.shields.io/pypi/v/jaraco.context.svg
33
+ :target: https://pypi.org/project/jaraco.context
34
+
35
+ .. image:: https://img.shields.io/pypi/pyversions/jaraco.context.svg
36
+
37
+ .. image:: https://github.com/jaraco/jaraco.context/actions/workflows/main.yml/badge.svg
38
+ :target: https://github.com/jaraco/jaraco.context/actions?query=workflow%3A%22tests%22
39
+ :alt: tests
40
+
41
+ .. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json
42
+ :target: https://github.com/astral-sh/ruff
43
+ :alt: Ruff
44
+
45
+ .. image:: https://readthedocs.org/projects/jaracocontext/badge/?version=latest
46
+ :target: https://jaracocontext.readthedocs.io/en/latest/?badge=latest
47
+
48
+ .. image:: https://img.shields.io/badge/skeleton-2024-informational
49
+ :target: https://blog.jaraco.com/skeleton
50
+
51
+ .. image:: https://tidelift.com/badges/package/pypi/jaraco.context
52
+ :target: https://tidelift.com/subscription/pkg/pypi-jaraco.context?utm_source=pypi-jaraco.context&utm_medium=readme
53
+
54
+
55
+ Highlights
56
+ ==========
57
+
58
+ See the docs linked from the badge above for the full details, but here are some features that may be of interest.
59
+
60
+ - ``ExceptionTrap`` provides a general-purpose wrapper for trapping exceptions and then acting on the outcome. Includes ``passes`` and ``raises`` decorators to replace the result of a wrapped function by a boolean indicating the outcome of the exception trap. See `this keyring commit <https://github.com/jaraco/keyring/commit/a85a7cbc6c909f8121660ed1f7b487f99a1c2bf7>`_ for an example of it in production.
61
+ - ``suppress`` simply enables ``contextlib.suppress`` as a decorator.
62
+ - ``on_interrupt`` is a decorator used by CLI entry points to affect the handling of a ``KeyboardInterrupt``. Inspired by `Lucretiel/autocommand#18 <https://github.com/Lucretiel/autocommand/issues/18>`_.
63
+ - ``pushd`` is similar to pytest's ``monkeypatch.chdir`` or path's `default context <https://path.readthedocs.io/en/latest/api.html>`_, changes the current working directory for the duration of the context.
64
+ - ``tarball`` will download a tarball, extract it, change directory, yield, then clean up after. Convenient when working with web assets.
65
+ - ``null`` is there for those times when one code branch needs a context and the other doesn't; this null context provides symmetry across those branches.
66
+
67
+
68
+ For Enterprise
69
+ ==============
70
+
71
+ Available as part of the Tidelift Subscription.
72
+
73
+ This project and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
74
+
75
+ `Learn more <https://tidelift.com/subscription/pkg/pypi-jaraco.context?utm_source=pypi-jaraco.context&utm_medium=referral&utm_campaign=github>`_.
llava_next/lib/python3.10/site-packages/jaraco.context-5.3.0.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ jaraco
llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (173 Bytes). View file
 
llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/include/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_events.h ADDED
@@ -0,0 +1,1371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_EVENTS_H_)
51
+ #define _CUPTI_EVENTS_H_
52
+
53
+ #include <cuda.h>
54
+ #include <string.h>
55
+ #include <cuda_stdint.h>
56
+ #include <cupti_result.h>
57
+
58
+ #ifndef CUPTIAPI
59
+ #ifdef _WIN32
60
+ #define CUPTIAPI __stdcall
61
+ #else
62
+ #define CUPTIAPI
63
+ #endif
64
+ #endif
65
+
66
+ #if defined(__cplusplus)
67
+ extern "C" {
68
+ #endif
69
+
70
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
71
+ #pragma GCC visibility push(default)
72
+ #endif
73
+
74
+ /**
75
+ * \defgroup CUPTI_EVENT_API CUPTI Event API
76
+ * Functions, types, and enums that implement the CUPTI Event API.
77
+ *
78
+ * \note CUPTI event API from the header cupti_events.h are not supported on devices
79
+ * with compute capability 7.5 and higher (i.e. Turing and later GPU architectures).
80
+ * These API will be deprecated in a future CUDA release. These are replaced by
81
+ * Profiling API in the header cupti_profiler_target.h and Perfworks metrics API
82
+ * in the headers nvperf_host.h and nvperf_target.h which are supported on
83
+ * devices with compute capability 7.0 and higher (i.e. Volta and later GPU
84
+ * architectures).
85
+ *
86
+ * @{
87
+ */
88
+
89
+ /**
90
+ * \brief ID for an event.
91
+ *
92
+ * An event represents a countable activity, action, or occurrence on
93
+ * the device.
94
+ */
95
+ typedef uint32_t CUpti_EventID;
96
+
97
+ /**
98
+ * \brief ID for an event domain.
99
+ *
100
+ * ID for an event domain. An event domain represents a group of
101
+ * related events. A device may have multiple instances of a domain,
102
+ * indicating that the device can simultaneously record multiple
103
+ * instances of each event within that domain.
104
+ */
105
+ typedef uint32_t CUpti_EventDomainID;
106
+
107
+ /**
108
+ * \brief A group of events.
109
+ *
110
+ * An event group is a collection of events that are managed
111
+ * together. All events in an event group must belong to the same
112
+ * domain.
113
+ */
114
+ typedef void *CUpti_EventGroup;
115
+
116
+ /**
117
+ * \brief Device class.
118
+ *
119
+ * Enumeration of device classes for device attribute
120
+ * CUPTI_DEVICE_ATTR_DEVICE_CLASS.
121
+ */
122
+ typedef enum {
123
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_TESLA = 0,
124
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_QUADRO = 1,
125
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_GEFORCE = 2,
126
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS_TEGRA = 3,
127
+ } CUpti_DeviceAttributeDeviceClass;
128
+
129
+ /**
130
+ * \brief Device attributes.
131
+ *
132
+ * CUPTI device attributes. These attributes can be read using \ref
133
+ * cuptiDeviceGetAttribute.
134
+ */
135
+ typedef enum {
136
+ /**
137
+ * Number of event IDs for a device. Value is a uint32_t.
138
+ */
139
+ CUPTI_DEVICE_ATTR_MAX_EVENT_ID = 1,
140
+ /**
141
+ * Number of event domain IDs for a device. Value is a uint32_t.
142
+ */
143
+ CUPTI_DEVICE_ATTR_MAX_EVENT_DOMAIN_ID = 2,
144
+ /**
145
+ * Get global memory bandwidth in Kbytes/sec. Value is a uint64_t.
146
+ */
147
+ CUPTI_DEVICE_ATTR_GLOBAL_MEMORY_BANDWIDTH = 3,
148
+ /**
149
+ * Get theoretical maximum number of instructions per cycle. Value
150
+ * is a uint32_t.
151
+ */
152
+ CUPTI_DEVICE_ATTR_INSTRUCTION_PER_CYCLE = 4,
153
+ /**
154
+ * Get theoretical maximum number of single precision instructions
155
+ * that can be executed per second. Value is a uint64_t.
156
+ */
157
+ CUPTI_DEVICE_ATTR_INSTRUCTION_THROUGHPUT_SINGLE_PRECISION = 5,
158
+ /**
159
+ * Get number of frame buffers for device. Value is a uint64_t.
160
+ */
161
+ CUPTI_DEVICE_ATTR_MAX_FRAME_BUFFERS = 6,
162
+ /**
163
+ * Get PCIE link rate in Mega bits/sec for device. Return 0 if bus-type
164
+ * is non-PCIE. Value is a uint64_t.
165
+ */
166
+ CUPTI_DEVICE_ATTR_PCIE_LINK_RATE = 7,
167
+ /**
168
+ * Get PCIE link width for device. Return 0 if bus-type
169
+ * is non-PCIE. Value is a uint64_t.
170
+ */
171
+ CUPTI_DEVICE_ATTR_PCIE_LINK_WIDTH = 8,
172
+ /**
173
+ * Get PCIE generation for device. Return 0 if bus-type
174
+ * is non-PCIE. Value is a uint64_t.
175
+ */
176
+ CUPTI_DEVICE_ATTR_PCIE_GEN = 9,
177
+ /**
178
+ * Get the class for the device. Value is a
179
+ * CUpti_DeviceAttributeDeviceClass.
180
+ */
181
+ CUPTI_DEVICE_ATTR_DEVICE_CLASS = 10,
182
+ /**
183
+ * Get the peak single precision flop per cycle. Value is a uint64_t.
184
+ */
185
+ CUPTI_DEVICE_ATTR_FLOP_SP_PER_CYCLE = 11,
186
+ /**
187
+ * Get the peak double precision flop per cycle. Value is a uint64_t.
188
+ */
189
+ CUPTI_DEVICE_ATTR_FLOP_DP_PER_CYCLE = 12,
190
+ /**
191
+ * Get number of L2 units. Value is a uint64_t.
192
+ */
193
+ CUPTI_DEVICE_ATTR_MAX_L2_UNITS = 13,
194
+ /**
195
+ * Get the maximum shared memory for the CU_FUNC_CACHE_PREFER_SHARED
196
+ * preference. Value is a uint64_t.
197
+ */
198
+ CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_SHARED = 14,
199
+ /**
200
+ * Get the maximum shared memory for the CU_FUNC_CACHE_PREFER_L1
201
+ * preference. Value is a uint64_t.
202
+ */
203
+ CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_L1 = 15,
204
+ /**
205
+ * Get the maximum shared memory for the CU_FUNC_CACHE_PREFER_EQUAL
206
+ * preference. Value is a uint64_t.
207
+ */
208
+ CUPTI_DEVICE_ATTR_MAX_SHARED_MEMORY_CACHE_CONFIG_PREFER_EQUAL = 16,
209
+ /**
210
+ * Get the peak half precision flop per cycle. Value is a uint64_t.
211
+ */
212
+ CUPTI_DEVICE_ATTR_FLOP_HP_PER_CYCLE = 17,
213
+ /**
214
+ * Check if Nvlink is connected to device. Returns 1, if at least one
215
+ * Nvlink is connected to the device, returns 0 otherwise.
216
+ * Value is a uint32_t.
217
+ */
218
+ CUPTI_DEVICE_ATTR_NVLINK_PRESENT = 18,
219
+ /**
220
+ * Check if Nvlink is present between GPU and CPU. Returns Bandwidth,
221
+ * in Bytes/sec, if Nvlink is present, returns 0 otherwise.
222
+ * Value is a uint64_t.
223
+ */
224
+ CUPTI_DEVICE_ATTR_GPU_CPU_NVLINK_BW = 19,
225
+ /**
226
+ * Check if NVSwitch is present in the underlying topology.
227
+ * Returns 1, if present, returns 0 otherwise.
228
+ * Value is a uint32_t.
229
+ */
230
+ CUPTI_DEVICE_ATTR_NVSWITCH_PRESENT = 20,
231
+ CUPTI_DEVICE_ATTR_FORCE_INT = 0x7fffffff,
232
+ } CUpti_DeviceAttribute;
233
+
234
+ /**
235
+ * \brief Event domain attributes.
236
+ *
237
+ * Event domain attributes. Except where noted, all the attributes can
238
+ * be read using either \ref cuptiDeviceGetEventDomainAttribute or
239
+ * \ref cuptiEventDomainGetAttribute.
240
+ */
241
+ typedef enum {
242
+ /**
243
+ * Event domain name. Value is a null terminated const c-string.
244
+ */
245
+ CUPTI_EVENT_DOMAIN_ATTR_NAME = 0,
246
+ /**
247
+ * Number of instances of the domain for which event counts will be
248
+ * collected. The domain may have additional instances that cannot
249
+ * be profiled (see CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT).
250
+ * Can be read only with \ref
251
+ * cuptiDeviceGetEventDomainAttribute. Value is a uint32_t.
252
+ */
253
+ CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT = 1,
254
+ /**
255
+ * Total number of instances of the domain, including instances that
256
+ * cannot be profiled. Use CUPTI_EVENT_DOMAIN_ATTR_INSTANCE_COUNT
257
+ * to get the number of instances that can be profiled. Can be read
258
+ * only with \ref cuptiDeviceGetEventDomainAttribute. Value is a
259
+ * uint32_t.
260
+ */
261
+ CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT = 3,
262
+ /**
263
+ * Collection method used for events contained in the event domain.
264
+ * Value is a \ref CUpti_EventCollectionMethod.
265
+ */
266
+ CUPTI_EVENT_DOMAIN_ATTR_COLLECTION_METHOD = 4,
267
+
268
+ CUPTI_EVENT_DOMAIN_ATTR_FORCE_INT = 0x7fffffff,
269
+ } CUpti_EventDomainAttribute;
270
+
271
+ /**
272
+ * \brief The collection method used for an event.
273
+ *
274
+ * The collection method indicates how an event is collected.
275
+ */
276
+ typedef enum {
277
+ /**
278
+ * Event is collected using a hardware global performance monitor.
279
+ */
280
+ CUPTI_EVENT_COLLECTION_METHOD_PM = 0,
281
+ /**
282
+ * Event is collected using a hardware SM performance monitor.
283
+ */
284
+ CUPTI_EVENT_COLLECTION_METHOD_SM = 1,
285
+ /**
286
+ * Event is collected using software instrumentation.
287
+ */
288
+ CUPTI_EVENT_COLLECTION_METHOD_INSTRUMENTED = 2,
289
+ /**
290
+ * Event is collected using NvLink throughput counter method.
291
+ */
292
+ CUPTI_EVENT_COLLECTION_METHOD_NVLINK_TC = 3,
293
+ CUPTI_EVENT_COLLECTION_METHOD_FORCE_INT = 0x7fffffff
294
+ } CUpti_EventCollectionMethod;
295
+
296
+ /**
297
+ * \brief Event group attributes.
298
+ *
299
+ * Event group attributes. These attributes can be read using \ref
300
+ * cuptiEventGroupGetAttribute. Attributes marked [rw] can also be
301
+ * written using \ref cuptiEventGroupSetAttribute.
302
+ */
303
+ typedef enum {
304
+ /**
305
+ * The domain to which the event group is bound. This attribute is
306
+ * set when the first event is added to the group. Value is a
307
+ * CUpti_EventDomainID.
308
+ */
309
+ CUPTI_EVENT_GROUP_ATTR_EVENT_DOMAIN_ID = 0,
310
+ /**
311
+ * [rw] Profile all the instances of the domain for this
312
+ * eventgroup. This feature can be used to get load balancing
313
+ * across all instances of a domain. Value is an integer.
314
+ */
315
+ CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES = 1,
316
+ /**
317
+ * [rw] Reserved for user data.
318
+ */
319
+ CUPTI_EVENT_GROUP_ATTR_USER_DATA = 2,
320
+ /**
321
+ * Number of events in the group. Value is a uint32_t.
322
+ */
323
+ CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS = 3,
324
+ /**
325
+ * Enumerates events in the group. Value is a pointer to buffer of
326
+ * size sizeof(CUpti_EventID) * num_of_events in the eventgroup.
327
+ * num_of_events can be queried using
328
+ * CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS.
329
+ */
330
+ CUPTI_EVENT_GROUP_ATTR_EVENTS = 4,
331
+ /**
332
+ * Number of instances of the domain bound to this event group that
333
+ * will be counted. Value is a uint32_t.
334
+ */
335
+ CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT = 5,
336
+ /**
337
+ * Event group scope can be set to CUPTI_EVENT_PROFILING_SCOPE_DEVICE or
338
+ * CUPTI_EVENT_PROFILING_SCOPE_CONTEXT for an eventGroup, before
339
+ * adding any event.
340
+ * Sets the scope of eventgroup as CUPTI_EVENT_PROFILING_SCOPE_DEVICE or
341
+ * CUPTI_EVENT_PROFILING_SCOPE_CONTEXT when the scope of the events
342
+ * that will be added is CUPTI_EVENT_PROFILING_SCOPE_BOTH.
343
+ * If profiling scope of event is either
344
+ * CUPTI_EVENT_PROFILING_SCOPE_DEVICE or CUPTI_EVENT_PROFILING_SCOPE_CONTEXT
345
+ * then setting this attribute will not affect the default scope.
346
+ * It is not allowed to add events of different scope to same eventgroup.
347
+ * Value is a uint32_t.
348
+ */
349
+ CUPTI_EVENT_GROUP_ATTR_PROFILING_SCOPE = 6,
350
+ CUPTI_EVENT_GROUP_ATTR_FORCE_INT = 0x7fffffff,
351
+ } CUpti_EventGroupAttribute;
352
+
353
+ /**
354
+ * \brief Profiling scope for event.
355
+ *
356
+ * Profiling scope of event indicates if the event can be collected at context
357
+ * scope or device scope or both i.e. it can be collected at any of context or
358
+ * device scope.
359
+ */
360
+ typedef enum {
361
+ /**
362
+ * Event is collected at context scope.
363
+ */
364
+ CUPTI_EVENT_PROFILING_SCOPE_CONTEXT = 0,
365
+ /**
366
+ * Event is collected at device scope.
367
+ */
368
+ CUPTI_EVENT_PROFILING_SCOPE_DEVICE = 1,
369
+ /**
370
+ * Event can be collected at device or context scope.
371
+ * The scope can be set using \ref cuptiEventGroupSetAttribute API.
372
+ */
373
+ CUPTI_EVENT_PROFILING_SCOPE_BOTH = 2,
374
+ CUPTI_EVENT_PROFILING_SCOPE_FORCE_INT = 0x7fffffff
375
+ } CUpti_EventProfilingScope;
376
+
377
+ /**
378
+ * \brief Event attributes.
379
+ *
380
+ * Event attributes. These attributes can be read using \ref
381
+ * cuptiEventGetAttribute.
382
+ */
383
+ typedef enum {
384
+ /**
385
+ * Event name. Value is a null terminated const c-string.
386
+ */
387
+ CUPTI_EVENT_ATTR_NAME = 0,
388
+ /**
389
+ * Short description of event. Value is a null terminated const
390
+ * c-string.
391
+ */
392
+ CUPTI_EVENT_ATTR_SHORT_DESCRIPTION = 1,
393
+ /**
394
+ * Long description of event. Value is a null terminated const
395
+ * c-string.
396
+ */
397
+ CUPTI_EVENT_ATTR_LONG_DESCRIPTION = 2,
398
+ /**
399
+ * Category of event. Value is CUpti_EventCategory.
400
+ */
401
+ CUPTI_EVENT_ATTR_CATEGORY = 3,
402
+ /**
403
+ * Profiling scope of the events. It can be either device or context or both.
404
+ * Value is a \ref CUpti_EventProfilingScope.
405
+ */
406
+ CUPTI_EVENT_ATTR_PROFILING_SCOPE = 5,
407
+
408
+ CUPTI_EVENT_ATTR_FORCE_INT = 0x7fffffff,
409
+ } CUpti_EventAttribute;
410
+
411
+ /**
412
+ * \brief Event collection modes.
413
+ *
414
+ * The event collection mode determines the period over which the
415
+ * events within the enabled event groups will be collected.
416
+ */
417
+ typedef enum {
418
+ /**
419
+ * Events are collected for the entire duration between the
420
+ * cuptiEventGroupEnable and cuptiEventGroupDisable calls.
421
+ * Event values are reset when the events are read.
422
+ * For CUDA toolkit v6.0 and older this was the default mode.
423
+ */
424
+ CUPTI_EVENT_COLLECTION_MODE_CONTINUOUS = 0,
425
+ /**
426
+ * Events are collected only for the durations of kernel executions
427
+ * that occur between the cuptiEventGroupEnable and
428
+ * cuptiEventGroupDisable calls. Event collection begins when a
429
+ * kernel execution begins, and stops when kernel execution
430
+ * completes. Event values are reset to zero when each kernel
431
+ * execution begins. If multiple kernel executions occur between the
432
+ * cuptiEventGroupEnable and cuptiEventGroupDisable calls then the
433
+ * event values must be read after each kernel launch if those
434
+ * events need to be associated with the specific kernel launch.
435
+ * Note that collection in this mode may significantly change the
436
+ * overall performance characteristics of the application because
437
+ * kernel executions that occur between the cuptiEventGroupEnable and
438
+ * cuptiEventGroupDisable calls are serialized on the GPU.
439
+ * This is the default mode from CUDA toolkit v6.5
440
+ */
441
+ CUPTI_EVENT_COLLECTION_MODE_KERNEL = 1,
442
+ CUPTI_EVENT_COLLECTION_MODE_FORCE_INT = 0x7fffffff
443
+ } CUpti_EventCollectionMode;
444
+
445
+ /**
446
+ * \brief An event category.
447
+ *
448
+ * Each event is assigned to a category that represents the general
449
+ * type of the event. A event's category is accessed using \ref
450
+ * cuptiEventGetAttribute and the CUPTI_EVENT_ATTR_CATEGORY attribute.
451
+ */
452
+ typedef enum {
453
+ /**
454
+ * An instruction related event.
455
+ */
456
+ CUPTI_EVENT_CATEGORY_INSTRUCTION = 0,
457
+ /**
458
+ * A memory related event.
459
+ */
460
+ CUPTI_EVENT_CATEGORY_MEMORY = 1,
461
+ /**
462
+ * A cache related event.
463
+ */
464
+ CUPTI_EVENT_CATEGORY_CACHE = 2,
465
+ /**
466
+ * A profile-trigger event.
467
+ */
468
+ CUPTI_EVENT_CATEGORY_PROFILE_TRIGGER = 3,
469
+ /**
470
+ * A system event.
471
+ */
472
+ CUPTI_EVENT_CATEGORY_SYSTEM = 4,
473
+ CUPTI_EVENT_CATEGORY_FORCE_INT = 0x7fffffff
474
+ } CUpti_EventCategory;
475
+
476
+ /**
477
+ * \brief The overflow value for a CUPTI event.
478
+ *
479
+ * The CUPTI event value that indicates an overflow.
480
+ */
481
+ #define CUPTI_EVENT_OVERFLOW ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
482
+
483
+ /**
484
+ * \brief The value that indicates the event value is invalid
485
+ */
486
+ #define CUPTI_EVENT_INVALID ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
487
+
488
+ /**
489
+ * \brief Flags for cuptiEventGroupReadEvent an
490
+ * cuptiEventGroupReadAllEvents.
491
+ *
492
+ * Flags for \ref cuptiEventGroupReadEvent an \ref
493
+ * cuptiEventGroupReadAllEvents.
494
+ */
495
+ typedef enum {
496
+ /**
497
+ * No flags.
498
+ */
499
+ CUPTI_EVENT_READ_FLAG_NONE = 0,
500
+ CUPTI_EVENT_READ_FLAG_FORCE_INT = 0x7fffffff,
501
+ } CUpti_ReadEventFlags;
502
+
503
+
504
+ /**
505
+ * \brief A set of event groups.
506
+ *
507
+ * A set of event groups. When returned by \ref
508
+ * cuptiEventGroupSetsCreate and \ref cuptiMetricCreateEventGroupSets
509
+ * a set indicates that event groups that can be enabled at the same
510
+ * time (i.e. all the events in the set can be collected
511
+ * simultaneously).
512
+ */
513
+ typedef struct {
514
+ /**
515
+ * The number of event groups in the set.
516
+ */
517
+ uint32_t numEventGroups;
518
+ /**
519
+ * An array of \p numEventGroups event groups.
520
+ */
521
+ CUpti_EventGroup *eventGroups;
522
+ } CUpti_EventGroupSet;
523
+
524
+ /**
525
+ * \brief A set of event group sets.
526
+ *
527
+ * A set of event group sets. When returned by \ref
528
+ * cuptiEventGroupSetsCreate and \ref cuptiMetricCreateEventGroupSets
529
+ * a CUpti_EventGroupSets indicates the number of passes required to
530
+ * collect all the events, and the event groups that should be
531
+ * collected during each pass.
532
+ */
533
+ typedef struct {
534
+ /**
535
+ * Number of event group sets.
536
+ */
537
+ uint32_t numSets;
538
+ /**
539
+ * An array of \p numSets event group sets.
540
+ */
541
+ CUpti_EventGroupSet *sets;
542
+ } CUpti_EventGroupSets;
543
+
544
+ /**
545
+ * \brief Set the event collection mode.
546
+ *
547
+ * Set the event collection mode for a \p context. The \p mode
548
+ * controls the event collection behavior of all events in event
549
+ * groups created in the \p context. This API is invalid in kernel
550
+ * replay mode.
551
+ * \note \b Thread-safety: this function is thread safe.
552
+ *
553
+ * \param context The context
554
+ * \param mode The event collection mode
555
+ *
556
+ * \retval CUPTI_SUCCESS
557
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
558
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
559
+ * \retval CUPTI_ERROR_INVALID_OPERATION if called when replay mode is enabled
560
+ * \retval CUPTI_ERROR_NOT_SUPPORTED if mode is not supported on the device
561
+ */
562
+
563
+ CUptiResult CUPTIAPI cuptiSetEventCollectionMode(CUcontext context,
564
+ CUpti_EventCollectionMode mode);
565
+
566
+ /**
567
+ * \brief Read a device attribute.
568
+ *
569
+ * Read a device attribute and return it in \p *value.
570
+ * \note \b Thread-safety: this function is thread safe.
571
+ *
572
+ * \param device The CUDA device
573
+ * \param attrib The attribute to read
574
+ * \param valueSize Size of buffer pointed by the value, and
575
+ * returns the number of bytes written to \p value
576
+ * \param value Returns the value of the attribute
577
+ *
578
+ * \retval CUPTI_SUCCESS
579
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
580
+ * \retval CUPTI_ERROR_INVALID_DEVICE
581
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
582
+ * is NULL, or if \p attrib is not a device attribute
583
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
584
+ * attribute values, indicates that the \p value buffer is too small
585
+ * to hold the attribute value.
586
+ */
587
+ CUptiResult CUPTIAPI cuptiDeviceGetAttribute(CUdevice device,
588
+ CUpti_DeviceAttribute attrib,
589
+ size_t *valueSize,
590
+ void *value);
591
+
592
+ /**
593
+ * \brief Read a device timestamp.
594
+ *
595
+ * Returns the device timestamp in \p *timestamp. The timestamp is
596
+ * reported in nanoseconds and indicates the time since the device was
597
+ * last reset.
598
+ * \note \b Thread-safety: this function is thread safe.
599
+ *
600
+ * \param context A context on the device from which to get the timestamp
601
+ * \param timestamp Returns the device timestamp
602
+ *
603
+ * \retval CUPTI_SUCCESS
604
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
605
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
606
+ * \retval CUPTI_ERROR_INVALID_PARAMETER is \p timestamp is NULL
607
+
608
+ * **DEPRECATED** This API is deprecated as of CUDA 11.3
609
+ */
610
+ CUptiResult CUPTIAPI cuptiDeviceGetTimestamp(CUcontext context,
611
+ uint64_t *timestamp);
612
+
613
+ /**
614
+ * \brief Get the number of domains for a device.
615
+ *
616
+ * Returns the number of domains in \p numDomains for a device.
617
+ * \note \b Thread-safety: this function is thread safe.
618
+ *
619
+ * \param device The CUDA device
620
+ * \param numDomains Returns the number of domains
621
+ *
622
+ * \retval CUPTI_SUCCESS
623
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
624
+ * \retval CUPTI_ERROR_INVALID_DEVICE
625
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numDomains is NULL
626
+ */
627
+ CUptiResult CUPTIAPI cuptiDeviceGetNumEventDomains(CUdevice device,
628
+ uint32_t *numDomains);
629
+
630
+ /**
631
+ * \brief Get the event domains for a device.
632
+ *
633
+ * Returns the event domains IDs in \p domainArray for a device. The
634
+ * size of the \p domainArray buffer is given by \p
635
+ * *arraySizeBytes. The size of the \p domainArray buffer must be at
636
+ * least \p numdomains * sizeof(CUpti_EventDomainID) or else all
637
+ * domains will not be returned. The value returned in \p
638
+ * *arraySizeBytes contains the number of bytes returned in \p
639
+ * domainArray.
640
+ * \note \b Thread-safety: this function is thread safe.
641
+ *
642
+ * \param device The CUDA device
643
+ * \param arraySizeBytes The size of \p domainArray in bytes, and
644
+ * returns the number of bytes written to \p domainArray
645
+ * \param domainArray Returns the IDs of the event domains for the device
646
+ *
647
+ * \retval CUPTI_SUCCESS
648
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
649
+ * \retval CUPTI_ERROR_INVALID_DEVICE
650
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or
651
+ * \p domainArray are NULL
652
+ */
653
+ CUptiResult CUPTIAPI cuptiDeviceEnumEventDomains(CUdevice device,
654
+ size_t *arraySizeBytes,
655
+ CUpti_EventDomainID *domainArray);
656
+
657
+ /**
658
+ * \brief Read an event domain attribute.
659
+ *
660
+ * Returns an event domain attribute in \p *value. The size of the \p
661
+ * value buffer is given by \p *valueSize. The value returned in \p
662
+ * *valueSize contains the number of bytes returned in \p value.
663
+ *
664
+ * If the attribute value is a c-string that is longer than \p
665
+ * *valueSize, then only the first \p *valueSize characters will be
666
+ * returned and there will be no terminating null byte.
667
+ * \note \b Thread-safety: this function is thread safe.
668
+ *
669
+ * \param device The CUDA device
670
+ * \param eventDomain ID of the event domain
671
+ * \param attrib The event domain attribute to read
672
+ * \param valueSize The size of the \p value buffer in bytes, and
673
+ * returns the number of bytes written to \p value
674
+ * \param value Returns the attribute's value
675
+ *
676
+ * \retval CUPTI_SUCCESS
677
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
678
+ * \retval CUPTI_ERROR_INVALID_DEVICE
679
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
680
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
681
+ * is NULL, or if \p attrib is not an event domain attribute
682
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
683
+ * attribute values, indicates that the \p value buffer is too small
684
+ * to hold the attribute value.
685
+ */
686
+ CUptiResult CUPTIAPI cuptiDeviceGetEventDomainAttribute(CUdevice device,
687
+ CUpti_EventDomainID eventDomain,
688
+ CUpti_EventDomainAttribute attrib,
689
+ size_t *valueSize,
690
+ void *value);
691
+
692
+ /**
693
+ * \brief Get the number of event domains available on any device.
694
+ *
695
+ * Returns the total number of event domains available on any
696
+ * CUDA-capable device.
697
+ * \note \b Thread-safety: this function is thread safe.
698
+ *
699
+ * \param numDomains Returns the number of domains
700
+ *
701
+ * \retval CUPTI_SUCCESS
702
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numDomains is NULL
703
+ */
704
+ CUptiResult CUPTIAPI cuptiGetNumEventDomains(uint32_t *numDomains);
705
+
706
+ /**
707
+ * \brief Get the event domains available on any device.
708
+ *
709
+ * Returns all the event domains available on any CUDA-capable device.
710
+ * Event domain IDs are returned in \p domainArray. The size of the \p
711
+ * domainArray buffer is given by \p *arraySizeBytes. The size of the
712
+ * \p domainArray buffer must be at least \p numDomains *
713
+ * sizeof(CUpti_EventDomainID) or all domains will not be
714
+ * returned. The value returned in \p *arraySizeBytes contains the
715
+ * number of bytes returned in \p domainArray.
716
+ * \note \b Thread-safety: this function is thread safe.
717
+ *
718
+ * \param arraySizeBytes The size of \p domainArray in bytes, and
719
+ * returns the number of bytes written to \p domainArray
720
+ * \param domainArray Returns all the event domains
721
+ *
722
+ * \retval CUPTI_SUCCESS
723
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or
724
+ * \p domainArray are NULL
725
+ */
726
+ CUptiResult CUPTIAPI cuptiEnumEventDomains(size_t *arraySizeBytes,
727
+ CUpti_EventDomainID *domainArray);
728
+
729
+ /**
730
+ * \brief Read an event domain attribute.
731
+ *
732
+ * Returns an event domain attribute in \p *value. The size of the \p
733
+ * value buffer is given by \p *valueSize. The value returned in \p
734
+ * *valueSize contains the number of bytes returned in \p value.
735
+ *
736
+ * If the attribute value is a c-string that is longer than \p
737
+ * *valueSize, then only the first \p *valueSize characters will be
738
+ * returned and there will be no terminating null byte.
739
+ * \note \b Thread-safety: this function is thread safe.
740
+ *
741
+ * \param eventDomain ID of the event domain
742
+ * \param attrib The event domain attribute to read
743
+ * \param valueSize The size of the \p value buffer in bytes, and
744
+ * returns the number of bytes written to \p value
745
+ * \param value Returns the attribute's value
746
+ *
747
+ * \retval CUPTI_SUCCESS
748
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
749
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
750
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
751
+ * is NULL, or if \p attrib is not an event domain attribute
752
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
753
+ * attribute values, indicates that the \p value buffer is too small
754
+ * to hold the attribute value.
755
+ */
756
+ CUptiResult CUPTIAPI cuptiEventDomainGetAttribute(CUpti_EventDomainID eventDomain,
757
+ CUpti_EventDomainAttribute attrib,
758
+ size_t *valueSize,
759
+ void *value);
760
+
761
+ /**
762
+ * \brief Get number of events in a domain.
763
+ *
764
+ * Returns the number of events in \p numEvents for a domain.
765
+ * \note \b Thread-safety: this function is thread safe.
766
+ *
767
+ * \param eventDomain ID of the event domain
768
+ * \param numEvents Returns the number of events in the domain
769
+ *
770
+ * \retval CUPTI_SUCCESS
771
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
772
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
773
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p numEvents is NULL
774
+ */
775
+ CUptiResult CUPTIAPI cuptiEventDomainGetNumEvents(CUpti_EventDomainID eventDomain,
776
+ uint32_t *numEvents);
777
+
778
+ /**
779
+ * \brief Get the events in a domain.
780
+ *
781
+ * Returns the event IDs in \p eventArray for a domain. The size of
782
+ * the \p eventArray buffer is given by \p *arraySizeBytes. The size
783
+ * of the \p eventArray buffer must be at least \p numdomainevents *
784
+ * sizeof(CUpti_EventID) or else all events will not be returned. The
785
+ * value returned in \p *arraySizeBytes contains the number of bytes
786
+ * returned in \p eventArray.
787
+ * \note \b Thread-safety: this function is thread safe.
788
+ *
789
+ * \param eventDomain ID of the event domain
790
+ * \param arraySizeBytes The size of \p eventArray in bytes, and
791
+ * returns the number of bytes written to \p eventArray
792
+ * \param eventArray Returns the IDs of the events in the domain
793
+ *
794
+ * \retval CUPTI_SUCCESS
795
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
796
+ * \retval CUPTI_ERROR_INVALID_EVENT_DOMAIN_ID
797
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p arraySizeBytes or \p
798
+ * eventArray are NULL
799
+ */
800
+ CUptiResult CUPTIAPI cuptiEventDomainEnumEvents(CUpti_EventDomainID eventDomain,
801
+ size_t *arraySizeBytes,
802
+ CUpti_EventID *eventArray);
803
+
804
+ /**
805
+ * \brief Get an event attribute.
806
+ *
807
+ * Returns an event attribute in \p *value. The size of the \p
808
+ * value buffer is given by \p *valueSize. The value returned in \p
809
+ * *valueSize contains the number of bytes returned in \p value.
810
+ *
811
+ * If the attribute value is a c-string that is longer than \p
812
+ * *valueSize, then only the first \p *valueSize characters will be
813
+ * returned and there will be no terminating null byte.
814
+ * \note \b Thread-safety: this function is thread safe.
815
+ *
816
+ * \param event ID of the event
817
+ * \param attrib The event attribute to read
818
+ * \param valueSize The size of the \p value buffer in bytes, and
819
+ * returns the number of bytes written to \p value
820
+ * \param value Returns the attribute's value
821
+ *
822
+ * \retval CUPTI_SUCCESS
823
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
824
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
825
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
826
+ * is NULL, or if \p attrib is not an event attribute
827
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
828
+ * attribute values, indicates that the \p value buffer is too small
829
+ * to hold the attribute value.
830
+ */
831
+ CUptiResult CUPTIAPI cuptiEventGetAttribute(CUpti_EventID event,
832
+ CUpti_EventAttribute attrib,
833
+ size_t *valueSize,
834
+ void *value);
835
+
836
+ /**
837
+ * \brief Find an event by name.
838
+ *
839
+ * Find an event by name and return the event ID in \p *event.
840
+ * \note \b Thread-safety: this function is thread safe.
841
+ *
842
+ * \param device The CUDA device
843
+ * \param eventName The name of the event to find
844
+ * \param event Returns the ID of the found event or undefined if
845
+ * unable to find the event
846
+ *
847
+ * \retval CUPTI_SUCCESS
848
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
849
+ * \retval CUPTI_ERROR_INVALID_DEVICE
850
+ * \retval CUPTI_ERROR_INVALID_EVENT_NAME if unable to find an event
851
+ * with name \p eventName. In this case \p *event is undefined
852
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventName or \p event are NULL
853
+ */
854
+ CUptiResult CUPTIAPI cuptiEventGetIdFromName(CUdevice device,
855
+ const char *eventName,
856
+ CUpti_EventID *event);
857
+
858
+ /**
859
+ * \brief Create a new event group for a context.
860
+ *
861
+ * Creates a new event group for \p context and returns the new group
862
+ * in \p *eventGroup.
863
+ * \note \p flags are reserved for future use and should be set to zero.
864
+ * \note \b Thread-safety: this function is thread safe.
865
+ *
866
+ * \param context The context for the event group
867
+ * \param eventGroup Returns the new event group
868
+ * \param flags Reserved - must be zero
869
+ *
870
+ * \retval CUPTI_SUCCESS
871
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
872
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
873
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY
874
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
875
+ */
876
+ CUptiResult CUPTIAPI cuptiEventGroupCreate(CUcontext context,
877
+ CUpti_EventGroup *eventGroup,
878
+ uint32_t flags);
879
+
880
+ /**
881
+ * \brief Destroy an event group.
882
+ *
883
+ * Destroy an \p eventGroup and free its resources. An event group
884
+ * cannot be destroyed if it is enabled.
885
+ * \note \b Thread-safety: this function is thread safe.
886
+ *
887
+ * \param eventGroup The event group to destroy
888
+ *
889
+ * \retval CUPTI_SUCCESS
890
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
891
+ * \retval CUPTI_ERROR_INVALID_OPERATION if the event group is enabled
892
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if eventGroup is NULL
893
+ */
894
+ CUptiResult CUPTIAPI cuptiEventGroupDestroy(CUpti_EventGroup eventGroup);
895
+
896
+ /**
897
+ * \brief Read an event group attribute.
898
+ *
899
+ * Read an event group attribute and return it in \p *value.
900
+ * \note \b Thread-safety: this function is thread safe but client
901
+ * must guard against simultaneous destruction or modification of \p
902
+ * eventGroup (for example, client must guard against simultaneous
903
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
904
+ * etc.), and must guard against simultaneous destruction of the
905
+ * context in which \p eventGroup was created (for example, client
906
+ * must guard against simultaneous calls to cudaDeviceReset,
907
+ * cuCtxDestroy, etc.).
908
+ *
909
+ * \param eventGroup The event group
910
+ * \param attrib The attribute to read
911
+ * \param valueSize Size of buffer pointed by the value, and
912
+ * returns the number of bytes written to \p value
913
+ * \param value Returns the value of the attribute
914
+ *
915
+ * \retval CUPTI_SUCCESS
916
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
917
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
918
+ * is NULL, or if \p attrib is not an eventgroup attribute
919
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT For non-c-string
920
+ * attribute values, indicates that the \p value buffer is too small
921
+ * to hold the attribute value.
922
+ */
923
+ CUptiResult CUPTIAPI cuptiEventGroupGetAttribute(CUpti_EventGroup eventGroup,
924
+ CUpti_EventGroupAttribute attrib,
925
+ size_t *valueSize,
926
+ void *value);
927
+
928
+ /**
929
+ * \brief Write an event group attribute.
930
+ *
931
+ * Write an event group attribute.
932
+ * \note \b Thread-safety: this function is thread safe.
933
+ *
934
+ * \param eventGroup The event group
935
+ * \param attrib The attribute to write
936
+ * \param valueSize The size, in bytes, of the value
937
+ * \param value The attribute value to write
938
+ *
939
+ * \retval CUPTI_SUCCESS
940
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
941
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p valueSize or \p value
942
+ * is NULL, or if \p attrib is not an event group attribute, or if
943
+ * \p attrib is not a writable attribute
944
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT Indicates that
945
+ * the \p value buffer is too small to hold the attribute value.
946
+ */
947
+ CUptiResult CUPTIAPI cuptiEventGroupSetAttribute(CUpti_EventGroup eventGroup,
948
+ CUpti_EventGroupAttribute attrib,
949
+ size_t valueSize,
950
+ void *value);
951
+
952
+ /**
953
+ * \brief Add an event to an event group.
954
+ *
955
+ * Add an event to an event group. The event add can fail for a number of reasons:
956
+ * \li The event group is enabled
957
+ * \li The event does not belong to the same event domain as the
958
+ * events that are already in the event group
959
+ * \li Device limitations on the events that can belong to the same group
960
+ * \li The event group is full
961
+ *
962
+ * \note \b Thread-safety: this function is thread safe.
963
+ *
964
+ * \param eventGroup The event group
965
+ * \param event The event to add to the group
966
+ *
967
+ * \retval CUPTI_SUCCESS
968
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
969
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
970
+ * \retval CUPTI_ERROR_OUT_OF_MEMORY
971
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is enabled
972
+ * \retval CUPTI_ERROR_NOT_COMPATIBLE if \p event belongs to a
973
+ * different event domain than the events already in \p eventGroup, or
974
+ * if a device limitation prevents \p event from being collected at
975
+ * the same time as the events already in \p eventGroup
976
+ * \retval CUPTI_ERROR_MAX_LIMIT_REACHED if \p eventGroup is full
977
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
978
+ */
979
+ CUptiResult CUPTIAPI cuptiEventGroupAddEvent(CUpti_EventGroup eventGroup,
980
+ CUpti_EventID event);
981
+
982
+ /**
983
+ * \brief Remove an event from an event group.
984
+ *
985
+ * Remove \p event from the an event group. The event cannot be
986
+ * removed if the event group is enabled.
987
+ * \note \b Thread-safety: this function is thread safe.
988
+ *
989
+ * \param eventGroup The event group
990
+ * \param event The event to remove from the group
991
+ *
992
+ * \retval CUPTI_SUCCESS
993
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
994
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
995
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is enabled
996
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
997
+ */
998
+ CUptiResult CUPTIAPI cuptiEventGroupRemoveEvent(CUpti_EventGroup eventGroup,
999
+ CUpti_EventID event);
1000
+
1001
+ /**
1002
+ * \brief Remove all events from an event group.
1003
+ *
1004
+ * Remove all events from an event group. Events cannot be removed if
1005
+ * the event group is enabled.
1006
+ * \note \b Thread-safety: this function is thread safe.
1007
+ *
1008
+ * \param eventGroup The event group
1009
+ *
1010
+ * \retval CUPTI_SUCCESS
1011
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1012
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is enabled
1013
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
1014
+ */
1015
+ CUptiResult CUPTIAPI cuptiEventGroupRemoveAllEvents(CUpti_EventGroup eventGroup);
1016
+
1017
+ /**
1018
+ * \brief Zero all the event counts in an event group.
1019
+ *
1020
+ * Zero all the event counts in an event group.
1021
+ * \note \b Thread-safety: this function is thread safe but client
1022
+ * must guard against simultaneous destruction or modification of \p
1023
+ * eventGroup (for example, client must guard against simultaneous
1024
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
1025
+ * etc.), and must guard against simultaneous destruction of the
1026
+ * context in which \p eventGroup was created (for example, client
1027
+ * must guard against simultaneous calls to cudaDeviceReset,
1028
+ * cuCtxDestroy, etc.).
1029
+ *
1030
+ * \param eventGroup The event group
1031
+ *
1032
+ * \retval CUPTI_SUCCESS
1033
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1034
+ * \retval CUPTI_ERROR_HARDWARE
1035
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
1036
+ */
1037
+ CUptiResult CUPTIAPI cuptiEventGroupResetAllEvents(CUpti_EventGroup eventGroup);
1038
+
1039
+ /**
1040
+ * \brief Enable an event group.
1041
+ *
1042
+ * Enable an event group. Enabling an event group zeros the value of
1043
+ * all the events in the group and then starts collection of those
1044
+ * events.
1045
+ * \note \b Thread-safety: this function is thread safe.
1046
+ *
1047
+ * \param eventGroup The event group
1048
+ *
1049
+ * \retval CUPTI_SUCCESS
1050
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1051
+ * \retval CUPTI_ERROR_HARDWARE
1052
+ * \retval CUPTI_ERROR_NOT_READY if \p eventGroup does not contain any events
1053
+ * \retval CUPTI_ERROR_NOT_COMPATIBLE if \p eventGroup cannot be
1054
+ * enabled due to other already enabled event groups
1055
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
1056
+ * \retval CUPTI_ERROR_HARDWARE_BUSY if another client is profiling
1057
+ * and hardware is busy
1058
+ */
1059
+ CUptiResult CUPTIAPI cuptiEventGroupEnable(CUpti_EventGroup eventGroup);
1060
+
1061
+ /**
1062
+ * \brief Disable an event group.
1063
+ *
1064
+ * Disable an event group. Disabling an event group stops collection
1065
+ * of events contained in the group.
1066
+ * \note \b Thread-safety: this function is thread safe.
1067
+ *
1068
+ * \param eventGroup The event group
1069
+ *
1070
+ * \retval CUPTI_SUCCESS
1071
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1072
+ * \retval CUPTI_ERROR_HARDWARE
1073
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup is NULL
1074
+ */
1075
+ CUptiResult CUPTIAPI cuptiEventGroupDisable(CUpti_EventGroup eventGroup);
1076
+
1077
+ /**
1078
+ * \brief Read the value for an event in an event group.
1079
+ *
1080
+ * Read the value for an event in an event group. The event value is
1081
+ * returned in the \p eventValueBuffer buffer. \p
1082
+ * eventValueBufferSizeBytes indicates the size of the \p
1083
+ * eventValueBuffer buffer. The buffer must be at least sizeof(uint64)
1084
+ * if ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is not set
1085
+ * on the group containing the event. The buffer must be at least
1086
+ * (sizeof(uint64) * number of domain instances) if
1087
+ * ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is set on the
1088
+ * group.
1089
+ *
1090
+ * If any instance of an event counter overflows, the value returned
1091
+ * for that event instance will be ::CUPTI_EVENT_OVERFLOW.
1092
+ *
1093
+ * The only allowed value for \p flags is ::CUPTI_EVENT_READ_FLAG_NONE.
1094
+ *
1095
+ * Reading an event from a disabled event group is not allowed. After
1096
+ * being read, an event's value is reset to zero.
1097
+ * \note \b Thread-safety: this function is thread safe but client
1098
+ * must guard against simultaneous destruction or modification of \p
1099
+ * eventGroup (for example, client must guard against simultaneous
1100
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
1101
+ * etc.), and must guard against simultaneous destruction of the
1102
+ * context in which \p eventGroup was created (for example, client
1103
+ * must guard against simultaneous calls to cudaDeviceReset,
1104
+ * cuCtxDestroy, etc.). If \ref cuptiEventGroupResetAllEvents is
1105
+ * called simultaneously with this function, then returned event
1106
+ * values are undefined.
1107
+ *
1108
+ * \param eventGroup The event group
1109
+ * \param flags Flags controlling the reading mode
1110
+ * \param event The event to read
1111
+ * \param eventValueBufferSizeBytes The size of \p eventValueBuffer
1112
+ * in bytes, and returns the number of bytes written to \p
1113
+ * eventValueBuffer
1114
+ * \param eventValueBuffer Returns the event value(s)
1115
+ *
1116
+ * \retval CUPTI_SUCCESS
1117
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1118
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
1119
+ * \retval CUPTI_ERROR_HARDWARE
1120
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is disabled
1121
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup, \p
1122
+ * eventValueBufferSizeBytes or \p eventValueBuffer is NULL
1123
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if size of \p eventValueBuffer
1124
+ * is not sufficient
1125
+ */
1126
+ CUptiResult CUPTIAPI cuptiEventGroupReadEvent(CUpti_EventGroup eventGroup,
1127
+ CUpti_ReadEventFlags flags,
1128
+ CUpti_EventID event,
1129
+ size_t *eventValueBufferSizeBytes,
1130
+ uint64_t *eventValueBuffer);
1131
+
1132
+ /**
1133
+ * \brief Read the values for all the events in an event group.
1134
+ *
1135
+ * Read the values for all the events in an event group. The event
1136
+ * values are returned in the \p eventValueBuffer buffer. \p
1137
+ * eventValueBufferSizeBytes indicates the size of \p
1138
+ * eventValueBuffer. The buffer must be at least (sizeof(uint64) *
1139
+ * number of events in group) if
1140
+ * ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is not set on
1141
+ * the group containing the events. The buffer must be at least
1142
+ * (sizeof(uint64) * number of domain instances * number of events in
1143
+ * group) if ::CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES is
1144
+ * set on the group.
1145
+ *
1146
+ * The data format returned in \p eventValueBuffer is:
1147
+ * - domain instance 0: event0 event1 ... eventN
1148
+ * - domain instance 1: event0 event1 ... eventN
1149
+ * - ...
1150
+ * - domain instance M: event0 event1 ... eventN
1151
+ *
1152
+ * The event order in \p eventValueBuffer is returned in \p
1153
+ * eventIdArray. The size of \p eventIdArray is specified in \p
1154
+ * eventIdArraySizeBytes. The size should be at least
1155
+ * (sizeof(CUpti_EventID) * number of events in group).
1156
+ *
1157
+ * If any instance of any event counter overflows, the value returned
1158
+ * for that event instance will be ::CUPTI_EVENT_OVERFLOW.
1159
+ *
1160
+ * The only allowed value for \p flags is ::CUPTI_EVENT_READ_FLAG_NONE.
1161
+ *
1162
+ * Reading events from a disabled event group is not allowed. After
1163
+ * being read, an event's value is reset to zero.
1164
+ * \note \b Thread-safety: this function is thread safe but client
1165
+ * must guard against simultaneous destruction or modification of \p
1166
+ * eventGroup (for example, client must guard against simultaneous
1167
+ * calls to \ref cuptiEventGroupDestroy, \ref cuptiEventGroupAddEvent,
1168
+ * etc.), and must guard against simultaneous destruction of the
1169
+ * context in which \p eventGroup was created (for example, client
1170
+ * must guard against simultaneous calls to cudaDeviceReset,
1171
+ * cuCtxDestroy, etc.). If \ref cuptiEventGroupResetAllEvents is
1172
+ * called simultaneously with this function, then returned event
1173
+ * values are undefined.
1174
+ *
1175
+ * \param eventGroup The event group
1176
+ * \param flags Flags controlling the reading mode
1177
+ * \param eventValueBufferSizeBytes The size of \p eventValueBuffer in
1178
+ * bytes, and returns the number of bytes written to \p
1179
+ * eventValueBuffer
1180
+ * \param eventValueBuffer Returns the event values
1181
+ * \param eventIdArraySizeBytes The size of \p eventIdArray in bytes,
1182
+ * and returns the number of bytes written to \p eventIdArray
1183
+ * \param eventIdArray Returns the IDs of the events in the same order
1184
+ * as the values return in eventValueBuffer.
1185
+ * \param numEventIdsRead Returns the number of event IDs returned
1186
+ * in \p eventIdArray
1187
+ *
1188
+ * \retval CUPTI_SUCCESS
1189
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1190
+ * \retval CUPTI_ERROR_HARDWARE
1191
+ * \retval CUPTI_ERROR_INVALID_OPERATION if \p eventGroup is disabled
1192
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroup, \p
1193
+ * eventValueBufferSizeBytes, \p eventValueBuffer, \p
1194
+ * eventIdArraySizeBytes, \p eventIdArray or \p numEventIdsRead is
1195
+ * NULL
1196
+ * \retval CUPTI_ERROR_PARAMETER_SIZE_NOT_SUFFICIENT if size of \p eventValueBuffer
1197
+ * or \p eventIdArray is not sufficient
1198
+ */
1199
+ CUptiResult CUPTIAPI cuptiEventGroupReadAllEvents(CUpti_EventGroup eventGroup,
1200
+ CUpti_ReadEventFlags flags,
1201
+ size_t *eventValueBufferSizeBytes,
1202
+ uint64_t *eventValueBuffer,
1203
+ size_t *eventIdArraySizeBytes,
1204
+ CUpti_EventID *eventIdArray,
1205
+ size_t *numEventIdsRead);
1206
+
1207
+ /**
1208
+ * \brief For a set of events, get the grouping that indicates the
1209
+ * number of passes and the event groups necessary to collect the
1210
+ * events.
1211
+ *
1212
+ * The number of events that can be collected simultaneously varies by
1213
+ * device and by the type of the events. When events can be collected
1214
+ * simultaneously, they may need to be grouped into multiple event
1215
+ * groups because they are from different event domains. This function
1216
+ * takes a set of events and determines how many passes are required
1217
+ * to collect all those events, and which events can be collected
1218
+ * simultaneously in each pass.
1219
+ *
1220
+ * The CUpti_EventGroupSets returned in \p eventGroupPasses indicates
1221
+ * how many passes are required to collect the events with the \p
1222
+ * numSets field. Within each event group set, the \p sets array
1223
+ * indicates the event groups that should be collected on each pass.
1224
+ * \note \b Thread-safety: this function is thread safe, but client
1225
+ * must guard against another thread simultaneously destroying \p
1226
+ * context.
1227
+ *
1228
+ * \param context The context for event collection
1229
+ * \param eventIdArraySizeBytes Size of \p eventIdArray in bytes
1230
+ * \param eventIdArray Array of event IDs that need to be grouped
1231
+ * \param eventGroupPasses Returns a CUpti_EventGroupSets object that
1232
+ * indicates the number of passes required to collect the events and
1233
+ * the events to collect on each pass
1234
+ *
1235
+ * \retval CUPTI_SUCCESS
1236
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1237
+ * \retval CUPTI_ERROR_INVALID_CONTEXT
1238
+ * \retval CUPTI_ERROR_INVALID_EVENT_ID
1239
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventIdArray or
1240
+ * \p eventGroupPasses is NULL
1241
+ */
1242
+ CUptiResult CUPTIAPI cuptiEventGroupSetsCreate(CUcontext context,
1243
+ size_t eventIdArraySizeBytes,
1244
+ CUpti_EventID *eventIdArray,
1245
+ CUpti_EventGroupSets **eventGroupPasses);
1246
+
1247
+ /**
1248
+ * \brief Destroy a event group sets object.
1249
+ *
1250
+ * Destroy a CUpti_EventGroupSets object.
1251
+ * \note \b Thread-safety: this function is thread safe.
1252
+ *
1253
+ * \param eventGroupSets The object to destroy
1254
+ *
1255
+ * \retval CUPTI_SUCCESS
1256
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1257
+ * \retval CUPTI_ERROR_INVALID_OPERATION if any of the event groups
1258
+ * contained in the sets is enabled
1259
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroupSets is NULL
1260
+ */
1261
+ CUptiResult CUPTIAPI cuptiEventGroupSetsDestroy(CUpti_EventGroupSets *eventGroupSets);
1262
+
1263
+
1264
+ /**
1265
+ * \brief Enable an event group set.
1266
+ *
1267
+ * Enable a set of event groups. Enabling a set of event groups zeros the value of
1268
+ * all the events in all the groups and then starts collection of those events.
1269
+ * \note \b Thread-safety: this function is thread safe.
1270
+ *
1271
+ * \param eventGroupSet The pointer to the event group set
1272
+ *
1273
+ * \retval CUPTI_SUCCESS
1274
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1275
+ * \retval CUPTI_ERROR_HARDWARE
1276
+ * \retval CUPTI_ERROR_NOT_READY if \p eventGroup does not contain any events
1277
+ * \retval CUPTI_ERROR_NOT_COMPATIBLE if \p eventGroup cannot be
1278
+ * enabled due to other already enabled event groups
1279
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroupSet is NULL
1280
+ * \retval CUPTI_ERROR_HARDWARE_BUSY if other client is profiling and hardware is
1281
+ * busy
1282
+ */
1283
+ CUptiResult CUPTIAPI cuptiEventGroupSetEnable(CUpti_EventGroupSet *eventGroupSet);
1284
+
1285
+ /**
1286
+ * \brief Disable an event group set.
1287
+ *
1288
+ * Disable a set of event groups. Disabling a set of event groups
1289
+ * stops collection of events contained in the groups.
1290
+ * \note \b Thread-safety: this function is thread safe.
1291
+ * \note \b If this call fails, some of the event groups in the set may be disabled
1292
+ * and other event groups may remain enabled.
1293
+ *
1294
+ * \param eventGroupSet The pointer to the event group set
1295
+ * \retval CUPTI_SUCCESS
1296
+ * \retval CUPTI_ERROR_NOT_INITIALIZED
1297
+ * \retval CUPTI_ERROR_HARDWARE
1298
+ * \retval CUPTI_ERROR_INVALID_PARAMETER if \p eventGroupSet is NULL
1299
+ */
1300
+ CUptiResult CUPTIAPI cuptiEventGroupSetDisable(CUpti_EventGroupSet *eventGroupSet);
1301
+
1302
+ /**
1303
+ * \brief Enable kernel replay mode.
1304
+ *
1305
+ * Set profiling mode for the context to replay mode. In this mode,
1306
+ * any number of events can be collected in one run of the kernel. The
1307
+ * event collection mode will automatically switch to
1308
+ * CUPTI_EVENT_COLLECTION_MODE_KERNEL. In this mode, \ref
1309
+ * cuptiSetEventCollectionMode will return
1310
+ * CUPTI_ERROR_INVALID_OPERATION.
1311
+ * \note \b Kernels might take longer to run if many events are enabled.
1312
+ * \note \b Thread-safety: this function is thread safe.
1313
+ *
1314
+ * \param context The context
1315
+ * \retval CUPTI_SUCCESS
1316
+ */
1317
+ CUptiResult CUPTIAPI cuptiEnableKernelReplayMode(CUcontext context);
1318
+
1319
+ /**
1320
+ * \brief Disable kernel replay mode.
1321
+ *
1322
+ * Set profiling mode for the context to non-replay (default)
1323
+ * mode. Event collection mode will be set to
1324
+ * CUPTI_EVENT_COLLECTION_MODE_KERNEL. All previously enabled
1325
+ * event groups and event group sets will be disabled.
1326
+ * \note \b Thread-safety: this function is thread safe.
1327
+ *
1328
+ * \param context The context
1329
+ * \retval CUPTI_SUCCESS
1330
+ */
1331
+ CUptiResult CUPTIAPI cuptiDisableKernelReplayMode(CUcontext context);
1332
+
1333
+ /**
1334
+ * \brief Function type for getting updates on kernel replay.
1335
+ *
1336
+ * \param kernelName The mangled kernel name
1337
+ * \param numReplaysDone Number of replays done so far
1338
+ * \param customData Pointer of any custom data passed in when subscribing
1339
+ */
1340
+ typedef void (CUPTIAPI *CUpti_KernelReplayUpdateFunc)(
1341
+ const char *kernelName,
1342
+ int numReplaysDone,
1343
+ void *customData);
1344
+
1345
+ /**
1346
+ * \brief Subscribe to kernel replay updates.
1347
+ *
1348
+ * When subscribed, the function pointer passed in will be called each time a
1349
+ * kernel run is finished during kernel replay. Previously subscribed function
1350
+ * pointer will be replaced. Pass in NULL as the function pointer unsubscribes
1351
+ * the update.
1352
+ *
1353
+ * \param updateFunc The update function pointer
1354
+ * \param customData Pointer to any custom data
1355
+ * \retval CUPTI_SUCCESS
1356
+ */
1357
+ CUptiResult CUPTIAPI cuptiKernelReplaySubscribeUpdate(CUpti_KernelReplayUpdateFunc updateFunc, void *customData);
1358
+
1359
+ /** @} */ /* END CUPTI_EVENT_API */
1360
+
1361
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
1362
+ #pragma GCC visibility pop
1363
+ #endif
1364
+
1365
+ #if defined(__cplusplus)
1366
+ }
1367
+ #endif
1368
+
1369
+ #endif /*_CUPTI_EVENTS_H_*/
1370
+
1371
+
llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/include/cupti_profiler_target.h ADDED
@@ -0,0 +1,589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2011-2020 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(_CUPTI_PROFILER_TARGET_H_)
51
+ #define _CUPTI_PROFILER_TARGET_H_
52
+
53
+ #include <cuda.h>
54
+ #include <cupti_result.h>
55
+ #include <stddef.h>
56
+ #include <stdint.h>
57
+
58
+ #ifdef __cplusplus
59
+ extern "C" {
60
+ #endif
61
+
62
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
63
+ #pragma GCC visibility push(default)
64
+ #endif
65
+
66
+ /**
67
+ * \defgroup CUPTI_PROFILER_API CUPTI Profiling API
68
+ * Functions, types, and enums that implement the CUPTI Profiling API.
69
+ * @{
70
+ */
71
+ #ifndef CUPTI_PROFILER_STRUCT_SIZE
72
+ #define CUPTI_PROFILER_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_))
73
+ #endif
74
+
75
+ /**
76
+ * \brief Profiler range attribute
77
+ *
78
+ * A metric enabled in the session's configuration is collected separately per unique range-stack in the pass.
79
+ * This is an attribute to collect metrics around each kernel in a profiling session or in an user defined range.
80
+ */
81
+ typedef enum
82
+ {
83
+ /**
84
+ * Invalid value
85
+ */
86
+ CUPTI_Range_INVALID,
87
+ /**
88
+ * Ranges are auto defined around each kernel in a profiling session
89
+ */
90
+ CUPTI_AutoRange,
91
+ /**
92
+ * A range in which metric data to be collected is defined by the user
93
+ */
94
+ CUPTI_UserRange,
95
+ /**
96
+ * Range count
97
+ */
98
+ CUPTI_Range_COUNT,
99
+ } CUpti_ProfilerRange;
100
+
101
+ /**
102
+ * \brief Profiler replay attribute
103
+ *
104
+ * For metrics which require multipass collection, a replay of the GPU kernel(s) is required.
105
+ * This is an attribute which specify how the replay of the kernel(s) to be measured is done.
106
+ */
107
+ typedef enum
108
+ {
109
+ /**
110
+ * Invalid Value
111
+ */
112
+ CUPTI_Replay_INVALID,
113
+ /**
114
+ * Replay is done by CUPTI user around the process
115
+ */
116
+ CUPTI_ApplicationReplay,
117
+ /**
118
+ * Replay is done around kernel implicitly by CUPTI
119
+ */
120
+ CUPTI_KernelReplay,
121
+ /**
122
+ * Replay is done by CUPTI user within a process
123
+ */
124
+ CUPTI_UserReplay,
125
+ /**
126
+ * Replay count
127
+ */
128
+ CUPTI_Replay_COUNT,
129
+ } CUpti_ProfilerReplayMode;
130
+
131
+ /**
132
+ * \brief Default parameter for cuptiProfilerInitialize
133
+ */
134
+ typedef struct CUpti_Profiler_Initialize_Params
135
+ {
136
+ size_t structSize; //!< [in] CUpti_Profiler_Initialize_Params_STRUCT_SIZE
137
+ void* pPriv; //!< [in] assign to NULL
138
+
139
+ } CUpti_Profiler_Initialize_Params;
140
+ #define CUpti_Profiler_Initialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_Initialize_Params, pPriv)
141
+
142
+ /**
143
+ * \brief Default parameter for cuptiProfilerDeInitialize
144
+ */
145
+ typedef struct CUpti_Profiler_DeInitialize_Params
146
+ {
147
+ size_t structSize; //!< [in] CUpti_Profiler_DeInitialize_Params_STRUCT_SIZE
148
+ void* pPriv; //!< [in] assign to NULL
149
+
150
+ } CUpti_Profiler_DeInitialize_Params;
151
+ #define CUpti_Profiler_DeInitialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_DeInitialize_Params, pPriv)
152
+
153
+ /**
154
+ * \brief Initializes the profiler interface
155
+ *
156
+ * Loads the required libraries in the process address space.
157
+ * Sets up the hooks with the CUDA driver.
158
+ */
159
+ CUptiResult CUPTIAPI cuptiProfilerInitialize(CUpti_Profiler_Initialize_Params *pParams);
160
+
161
+ /**
162
+ * \brief DeInitializes the profiler interface
163
+ */
164
+ CUptiResult CUPTIAPI cuptiProfilerDeInitialize(CUpti_Profiler_DeInitialize_Params *pParams);
165
+
166
+ /**
167
+ * \brief Input parameter to define the counterDataImage
168
+ */
169
+ typedef struct CUpti_Profiler_CounterDataImageOptions
170
+ {
171
+ size_t structSize; //!< [in] CUpti_Profiler_CounterDataImageOptions_Params_STRUCT_SIZE
172
+ void* pPriv; //!< [in] assign to NULL
173
+
174
+ const uint8_t* pCounterDataPrefix; /**< [in] Address of CounterDataPrefix generated from NVPW_CounterDataBuilder_GetCounterDataPrefix().
175
+ Must be align(8).*/
176
+ size_t counterDataPrefixSize; //!< [in] Size of CounterDataPrefix generated from NVPW_CounterDataBuilder_GetCounterDataPrefix().
177
+ uint32_t maxNumRanges; //!< [in] Maximum number of ranges that can be profiled
178
+ uint32_t maxNumRangeTreeNodes; //!< [in] Maximum number of RangeTree nodes; must be >= maxNumRanges
179
+ uint32_t maxRangeNameLength; //!< [in] Maximum string length of each RangeName, including the trailing NULL character
180
+ } CUpti_Profiler_CounterDataImageOptions;
181
+ #define CUpti_Profiler_CounterDataImageOptions_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImageOptions, maxRangeNameLength)
182
+
183
+ /**
184
+ * \brief Params for cuptiProfilerCounterDataImageCalculateSize
185
+ */
186
+ typedef struct CUpti_Profiler_CounterDataImage_CalculateSize_Params
187
+ {
188
+ size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_CalculateSize_Params_STRUCT_SIZE
189
+ void* pPriv; //!< [in] assign to NULL
190
+
191
+ size_t sizeofCounterDataImageOptions; //!< [in] CUpti_Profiler_CounterDataImageOptions_STRUCT_SIZE
192
+ const CUpti_Profiler_CounterDataImageOptions* pOptions; //!< [in] Pointer to Counter Data Image Options
193
+ size_t counterDataImageSize; //!< [out]
194
+ } CUpti_Profiler_CounterDataImage_CalculateSize_Params;
195
+ #define CUpti_Profiler_CounterDataImage_CalculateSize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_CalculateSize_Params, counterDataImageSize)
196
+
197
+ /**
198
+ * \brief Params for cuptiProfilerCounterDataImageInitialize
199
+ */
200
+ typedef struct CUpti_Profiler_CounterDataImage_Initialize_Params
201
+ {
202
+ size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_Initialize_Params_STRUCT_SIZE
203
+ void* pPriv; //!< [in] assign to NULL
204
+
205
+ size_t sizeofCounterDataImageOptions; //!< [in] CUpti_Profiler_CounterDataImageOptions_STRUCT_SIZE
206
+ const CUpti_Profiler_CounterDataImageOptions* pOptions; //!< [in] Pointer to Counter Data Image Options
207
+ size_t counterDataImageSize; //!< [in] Size calculated from cuptiProfilerCounterDataImageCalculateSize
208
+ uint8_t* pCounterDataImage; //!< [in] The buffer to be initialized.
209
+ } CUpti_Profiler_CounterDataImage_Initialize_Params;
210
+ #define CUpti_Profiler_CounterDataImage_Initialize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_Initialize_Params, pCounterDataImage)
211
+
212
+ /**
213
+ * \brief A CounterData image allocates space for values for each counter for each range.
214
+ *
215
+ * User borne the resposibility of managing the counterDataImage allocations.
216
+ * CounterDataPrefix contains meta data about the metrics that will be stored in counterDataImage.
217
+ * Use these APIs to calculate the allocation size and initialize counterData image.
218
+ */
219
+ CUptiResult cuptiProfilerCounterDataImageCalculateSize(CUpti_Profiler_CounterDataImage_CalculateSize_Params* pParams);
220
+ CUptiResult cuptiProfilerCounterDataImageInitialize(CUpti_Profiler_CounterDataImage_Initialize_Params* pParams);
221
+
222
+ /**
223
+ * \brief Params for cuptiProfilerCounterDataImageCalculateScratchBufferSize
224
+ */
225
+ typedef struct CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params
226
+ {
227
+ size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params_STRUCT_SIZE
228
+ void* pPriv; //!< [in] assign to NULL
229
+
230
+ size_t counterDataImageSize; //!< [in] size calculated from cuptiProfilerCounterDataImageCalculateSize
231
+ uint8_t* pCounterDataImage; //!< [in]
232
+ size_t counterDataScratchBufferSize; //!< [out]
233
+ } CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params;
234
+ #define CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params, counterDataScratchBufferSize)
235
+
236
+ /**
237
+ * \brief Params for cuptiProfilerCounterDataImageInitializeScratchBuffer
238
+ */
239
+ typedef struct CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params
240
+ {
241
+ size_t structSize; //!< [in] CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params_STRUCT_SIZE
242
+ void* pPriv; //!< [in] assign to NULL
243
+
244
+ size_t counterDataImageSize; //!< [in] size calculated from cuptiProfilerCounterDataImageCalculateSize
245
+ uint8_t* pCounterDataImage; //!< [in]
246
+ size_t counterDataScratchBufferSize; //!< [in] size calculated using cuptiProfilerCounterDataImageCalculateScratchBufferSize
247
+ uint8_t* pCounterDataScratchBuffer; //!< [in] the scratch buffer to be initialized.
248
+ } CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params;
249
+ #define CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params, pCounterDataScratchBuffer)
250
+
251
+ /**
252
+ * \brief A temporary storage for CounterData image needed for internal operations
253
+ *
254
+ * Use these APIs to calculate the allocation size and initialize counterData image scratch buffer.
255
+ */
256
+ CUptiResult cuptiProfilerCounterDataImageCalculateScratchBufferSize(CUpti_Profiler_CounterDataImage_CalculateScratchBufferSize_Params* pParams);
257
+ CUptiResult cuptiProfilerCounterDataImageInitializeScratchBuffer(CUpti_Profiler_CounterDataImage_InitializeScratchBuffer_Params* pParams);
258
+
259
+ /**
260
+ * \brief Params for cuptiProfilerBeginSession
261
+ */
262
+ typedef struct CUpti_Profiler_BeginSession_Params
263
+ {
264
+ size_t structSize; //!< [in] CUpti_Profiler_BeginSession_Params_STRUCT_SIZE
265
+ void* pPriv; //!< [in] assign to NULL
266
+
267
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
268
+ size_t counterDataImageSize; //!< [in] size calculated from cuptiProfilerCounterDataImageCalculateSize
269
+ uint8_t* pCounterDataImage; //!< [in] address of CounterDataImage
270
+ size_t counterDataScratchBufferSize; //!< [in] size calculated from cuptiProfilerCounterDataImageInitializeScratchBuffer
271
+ uint8_t* pCounterDataScratchBuffer; //!< [in] address of CounterDataImage scratch buffer
272
+ uint8_t bDumpCounterDataInFile; //!< [in] [optional]
273
+ const char* pCounterDataFilePath; //!< [in] [optional]
274
+ CUpti_ProfilerRange range; //!< [in] CUpti_ProfilerRange
275
+ CUpti_ProfilerReplayMode replayMode; //!< [in] CUpti_ProfilerReplayMode
276
+ /* Replay options, required when replay is done by cupti user */
277
+ size_t maxRangesPerPass; //!< [in] Maximum number of ranges that can be recorded in a single pass.
278
+ size_t maxLaunchesPerPass; //!< [in] Maximum number of kernel launches that can be recorded in a single pass; must be >= maxRangesPerPass.
279
+
280
+ } CUpti_Profiler_BeginSession_Params;
281
+ #define CUpti_Profiler_BeginSession_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_BeginSession_Params, maxLaunchesPerPass)
282
+ /**
283
+ * \brief Params for cuptiProfilerEndSession
284
+ */
285
+ typedef struct CUpti_Profiler_EndSession_Params
286
+ {
287
+ size_t structSize; //!< [in] CUpti_Profiler_EndSession_Params_STRUCT_SIZE
288
+ void* pPriv; //!< [in] assign to NULL
289
+
290
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
291
+ } CUpti_Profiler_EndSession_Params;
292
+ #define CUpti_Profiler_EndSession_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_EndSession_Params, ctx)
293
+
294
+ /**
295
+ * \brief Begin profiling session sets up the profiling on the device
296
+ *
297
+ * Although, it doesn't start the profiling but GPU resources needed for profiling are allocated.
298
+ * Outside of a session, the GPU will return to its normal operating state.
299
+ */
300
+ CUptiResult CUPTIAPI cuptiProfilerBeginSession(CUpti_Profiler_BeginSession_Params* pParams);
301
+ /**
302
+ * \brief Ends profiling session
303
+ *
304
+ * Frees up the GPU resources acquired for profiling.
305
+ * Outside of a session, the GPU will return to it's normal operating state.
306
+ */
307
+ CUptiResult CUPTIAPI cuptiProfilerEndSession(CUpti_Profiler_EndSession_Params* pParams);
308
+
309
+ /**
310
+ * \brief Params for cuptiProfilerSetConfig
311
+ */
312
+ typedef struct CUpti_Profiler_SetConfig_Params
313
+ {
314
+ size_t structSize; //!< [in] CUpti_Profiler_SetConfig_Params_STRUCT_SIZE
315
+ void* pPriv; //!< [in] assign to NULL
316
+
317
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
318
+ const uint8_t* pConfig; //!< [in] Config created by NVPW_RawMetricsConfig_GetConfigImage(). Must be align(8).
319
+ size_t configSize; //!< [in] size of config
320
+ uint16_t minNestingLevel; //!< [in] the lowest nesting level to be profiled; must be >= 1
321
+ uint16_t numNestingLevels; //!< [in] the number of nesting levels to profile; must be >= 1
322
+ size_t passIndex; //!< [in] Set this to zero for in-app replay; set this to the output of EndPass() for application replay
323
+ uint16_t targetNestingLevel; //!< [in] Set this to minNestingLevel for in-app replay; set this to the output of EndPass() for application
324
+ } CUpti_Profiler_SetConfig_Params;
325
+
326
+ #define CUpti_Profiler_SetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_SetConfig_Params, targetNestingLevel)
327
+
328
+ /**
329
+ * \brief Params for cuptiProfilerUnsetConfig
330
+ */
331
+ typedef struct CUpti_Profiler_UnsetConfig_Params
332
+ {
333
+ size_t structSize; //!< [in] CUpti_Profiler_UnsetConfig_Params_STRUCT_SIZE
334
+ void* pPriv; //!< [in] assign to NULL
335
+
336
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
337
+ } CUpti_Profiler_UnsetConfig_Params;
338
+ #define CUpti_Profiler_UnsetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_UnsetConfig_Params, ctx)
339
+
340
+ /**
341
+ * \brief Set metrics configuration to be profiled
342
+ *
343
+ * Use these APIs to set the config to profile in a session. It can be used for advanced cases such as where multiple
344
+ * configurations are collected into a single CounterData Image on the need basis, without restarting the session.
345
+ */
346
+ CUptiResult CUPTIAPI cuptiProfilerSetConfig(CUpti_Profiler_SetConfig_Params* pParams);
347
+ /**
348
+ * \brief Unset metrics configuration profiled
349
+ *
350
+ */
351
+ CUptiResult CUPTIAPI cuptiProfilerUnsetConfig(CUpti_Profiler_UnsetConfig_Params* pParams);
352
+
353
+ /**
354
+ * \brief Params for cuptiProfilerBeginPass
355
+ */
356
+ typedef struct CUpti_Profiler_BeginPass_Params
357
+ {
358
+ size_t structSize; //!< [in] CUpti_Profiler_BeginPass_Params_STRUCT_SIZE
359
+ void* pPriv; //!< [in] assign to NULL
360
+
361
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
362
+ } CUpti_Profiler_BeginPass_Params;
363
+ #define CUpti_Profiler_BeginPass_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_BeginPass_Params, ctx)
364
+
365
+ /**
366
+ * \brief Params for cuptiProfilerEndPass
367
+ */
368
+ typedef struct CUpti_Profiler_EndPass_Params
369
+ {
370
+ size_t structSize; //!< [in] CUpti_Profiler_EndPass_Params_STRUCT_SIZE
371
+ void* pPriv; //!< [in] assign to NULL
372
+
373
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
374
+ uint16_t targetNestingLevel; //! [out] The targetNestingLevel that will be collected by the *next* BeginPass.
375
+ size_t passIndex; //!< [out] The passIndex that will be collected by the *next* BeginPass
376
+ uint8_t allPassesSubmitted; //!< [out] becomes true when the last pass has been queued to the GPU
377
+ } CUpti_Profiler_EndPass_Params;
378
+ #define CUpti_Profiler_EndPass_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_EndPass_Params, allPassesSubmitted)
379
+
380
+ /**
381
+ * \brief Replay API: used for multipass collection.
382
+
383
+ * These APIs are used if user chooses to replay by itself \ref CUPTI_UserReplay or \ref CUPTI_ApplicationReplay
384
+ * for multipass collection of the metrics configurations.
385
+ * It's a no-op in case of \ref CUPTI_KernelReplay.
386
+ */
387
+ CUptiResult cuptiProfilerBeginPass(CUpti_Profiler_BeginPass_Params* pParams);
388
+
389
+ /**
390
+ * \brief Replay API: used for multipass collection.
391
+
392
+ * These APIs are used if user chooses to replay by itself \ref CUPTI_UserReplay or \ref CUPTI_ApplicationReplay
393
+ * for multipass collection of the metrics configurations.
394
+ * Its a no-op in case of \ref CUPTI_KernelReplay.
395
+ * Returns information for next pass.
396
+ */
397
+ CUptiResult cuptiProfilerEndPass(CUpti_Profiler_EndPass_Params* pParams);
398
+
399
+ /**
400
+ * \brief Params for cuptiProfilerEnableProfiling
401
+ */
402
+ typedef struct CUpti_Profiler_EnableProfiling_Params
403
+ {
404
+ size_t structSize; //!< [in] CUpti_Profiler_EnableProfiling_Params_STRUCT_SIZE
405
+ void* pPriv; //!< [in] assign to NULL
406
+
407
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
408
+ } CUpti_Profiler_EnableProfiling_Params;
409
+ #define CUpti_Profiler_EnableProfiling_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_EnableProfiling_Params, ctx)
410
+
411
+ /**
412
+ * \brief Params for cuptiProfilerDisableProfiling
413
+ */
414
+ typedef struct CUpti_Profiler_DisableProfiling_Params
415
+ {
416
+ size_t structSize; //!< [in] CUpti_Profiler_DisableProfiling_Params_STRUCT_SIZE
417
+ void* pPriv; //!< [in] assign to NULL
418
+
419
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
420
+ } CUpti_Profiler_DisableProfiling_Params;
421
+ #define CUpti_Profiler_DisableProfiling_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_DisableProfiling_Params, ctx)
422
+
423
+ /**
424
+ * \brief Enables Profiling
425
+ *
426
+ * In \ref CUPTI_AutoRange, these APIs are used to enable/disable profiling for the kernels to be executed in
427
+ * a profiling session.
428
+ */
429
+ CUptiResult CUPTIAPI cuptiProfilerEnableProfiling(CUpti_Profiler_EnableProfiling_Params* pParams);
430
+
431
+ /**
432
+ * \brief Disable Profiling
433
+ *
434
+ * In \ref CUPTI_AutoRange, these APIs are used to enable/disable profiling for the kernels to be executed in
435
+ * a profiling session.
436
+ */
437
+ CUptiResult CUPTIAPI cuptiProfilerDisableProfiling(CUpti_Profiler_DisableProfiling_Params* pParams);
438
+
439
+ /**
440
+ * \brief Params for cuptiProfilerIsPassCollected
441
+ */
442
+ typedef struct CUpti_Profiler_IsPassCollected_Params
443
+ {
444
+ size_t structSize; //!< [in] CUpti_Profiler_IsPassCollected_Params_STRUCT_SIZE
445
+ void* pPriv; //!< [in] assign to NULL
446
+
447
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
448
+ size_t numRangesDropped; //!< [out] number of ranges whose data was dropped in the processed pass
449
+ size_t numTraceBytesDropped; //!< [out] number of bytes not written to TraceBuffer due to buffer full
450
+ uint8_t onePassCollected; //!< [out] true if a pass was successfully decoded
451
+ uint8_t allPassesCollected; //!< [out] becomes true when the last pass has been decoded
452
+ } CUpti_Profiler_IsPassCollected_Params;
453
+ #define CUpti_Profiler_IsPassCollected_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_IsPassCollected_Params, allPassesCollected)
454
+
455
+ /**
456
+ * \brief Asynchronous call to query if the submitted pass to GPU is collected
457
+ *
458
+ */
459
+ CUptiResult CUPTIAPI cuptiProfilerIsPassCollected(CUpti_Profiler_IsPassCollected_Params* pParams);
460
+
461
+ /**
462
+ * \brief Params for cuptiProfilerFlushCounterData
463
+ */
464
+ typedef struct CUpti_Profiler_FlushCounterData_Params
465
+ {
466
+ size_t structSize; //!< [in] CUpti_Profiler_FlushCounterData_Params_STRUCT_SIZE
467
+ void* pPriv; //!< [in] assign to NULL
468
+
469
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
470
+ size_t numRangesDropped; //!< [out] number of ranges whose data was dropped in the processed passes
471
+ size_t numTraceBytesDropped; //!< [out] number of bytes not written to TraceBuffer due to buffer full
472
+ } CUpti_Profiler_FlushCounterData_Params;
473
+ #define CUpti_Profiler_FlushCounterData_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_FlushCounterData_Params, numTraceBytesDropped)
474
+
475
+ /**
476
+ * \brief Decode all the submitted passes
477
+ *
478
+ * Flush Counter data API to ensure every pass is decoded into the counterDataImage passed at beginSession.
479
+ * This will cause the CPU/GPU sync to collect all the undecoded pass.
480
+ */
481
+ CUptiResult CUPTIAPI cuptiProfilerFlushCounterData(CUpti_Profiler_FlushCounterData_Params* pParams);
482
+
483
+ typedef struct CUpti_Profiler_PushRange_Params
484
+ {
485
+ size_t structSize; //!< [in] CUpti_Profiler_PushRange_Params_STRUCT_SIZE
486
+ void* pPriv; //!< [in] assign to NULL
487
+
488
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
489
+ const char* pRangeName; //!< [in] specifies the range for subsequent launches; must not be NULL
490
+ size_t rangeNameLength; //!< [in] assign to strlen(pRangeName) if known; if set to zero, the library will call strlen()
491
+ } CUpti_Profiler_PushRange_Params;
492
+ #define CUpti_Profiler_PushRange_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_PushRange_Params, rangeNameLength)
493
+
494
+ typedef struct CUpti_Profiler_PopRange_Params
495
+ {
496
+ size_t structSize; //!< [in] CUpti_Profiler_PopRange_Params_STRUCT_SIZE
497
+ void* pPriv; //!< [in] assign to NULL
498
+
499
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
500
+ } CUpti_Profiler_PopRange_Params;
501
+ #define CUpti_Profiler_PopRange_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_PopRange_Params, ctx)
502
+
503
+
504
+ /**
505
+ * \brief Range API's : Push user range
506
+ *
507
+ * Counter data is collected per unique range-stack. Identified by a string label passsed by the user.
508
+ * It's an invalid operation in case of \ref CUPTI_AutoRange.
509
+ */
510
+ CUptiResult CUPTIAPI cuptiProfilerPushRange(CUpti_Profiler_PushRange_Params *pParams);
511
+
512
+ /**
513
+ * \brief Range API's : Pop user range
514
+ *
515
+ * Counter data is collected per unique range-stack. Identified by a string label passsed by the user.
516
+ * It's an invalid operation in case of \ref CUPTI_AutoRange.
517
+ */
518
+ CUptiResult CUPTIAPI cuptiProfilerPopRange(CUpti_Profiler_PopRange_Params *pParams);
519
+
520
+ /**
521
+ * \brief Params for cuptiProfilerGetCounterAvailability
522
+ */
523
+ typedef struct CUpti_Profiler_GetCounterAvailability_Params
524
+ {
525
+ size_t structSize; //!< [in] CUpti_Profiler_GetCounterAvailability_Params_STRUCT_SIZE
526
+ void* pPriv; //!< [in] assign to NULL
527
+ CUcontext ctx; //!< [in] if NULL, the current CUcontext is used
528
+ size_t counterAvailabilityImageSize; //!< [in/out] If `pCounterAvailabilityImage` is NULL, then the required size is returned in
529
+ //!< `counterAvailabilityImageSize`, otherwise `counterAvailabilityImageSize` should be set to the size of
530
+ //!< `pCounterAvailabilityImage`, and on return it would be overwritten with number of actual bytes copied
531
+ uint8_t* pCounterAvailabilityImage; //!< [in] buffer receiving counter availability image, may be NULL
532
+ } CUpti_Profiler_GetCounterAvailability_Params;
533
+ #define CUpti_Profiler_GetCounterAvailability_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_GetCounterAvailability_Params, pCounterAvailabilityImage)
534
+
535
+ /**
536
+ * \brief Query counter availibility
537
+ *
538
+ * Use this API to query counter availability information in a buffer which can be used to filter unavailable raw metrics on host.
539
+ * Note: This API may fail, if any profiling or sampling session is active on the specified context or its device.
540
+ */
541
+ CUptiResult CUPTIAPI cuptiProfilerGetCounterAvailability(CUpti_Profiler_GetCounterAvailability_Params *pParams);
542
+
543
+ /// Generic support level enum for CUPTI
544
+ typedef enum
545
+ {
546
+ CUPTI_PROFILER_CONFIGURATION_UNKNOWN = 0, //!< Configuration support level unknown - either detection code errored out before setting this value, or unable to determine it
547
+ CUPTI_PROFILER_CONFIGURATION_UNSUPPORTED, //!< Profiling is unavailable. For specific feature fields, this means that the current configuration of this feature does not work with profiling. For instance, SLI-enabled devices do not support profiling, and this value would be returned for SLI on an SLI-enabled device.
548
+ CUPTI_PROFILER_CONFIGURATION_DISABLED, //!< Profiling would be available for this configuration, but was disabled by the system
549
+ CUPTI_PROFILER_CONFIGURATION_SUPPORTED //!< Profiling is supported. For specific feature fields, this means that the current configuration of this feature works with profiling. For instance, SLI-enabled devices do not support profiling, and this value would only be returned for devices which are not SLI-enabled.
550
+ } CUpti_Profiler_Support_Level;
551
+
552
+ /**
553
+ * \brief Params for cuptiProfilerDeviceSupported
554
+ */
555
+ typedef struct
556
+ {
557
+ size_t structSize; //!< [in] Must be CUpti_Profiler_DeviceSupported_Params_STRUCT_SIZE
558
+ void *pPriv; //!< [in] assign to NULL
559
+ CUdevice cuDevice; //!< [in] if NULL, the current CUcontext is used
560
+
561
+ CUpti_Profiler_Support_Level isSupported; //!< [out] overall SUPPORTED / UNSUPPORTED flag representing whether Profiling and PC Sampling APIs work on the given device and configuration. SUPPORTED if all following flags are SUPPORTED, UNSUPPORTED otherwise.
562
+
563
+ CUpti_Profiler_Support_Level architecture; //!< [out] SUPPORTED if the device architecture level supports the Profiling API (Compute Capability >= 7.0), UNSUPPORTED otherwise
564
+ CUpti_Profiler_Support_Level sli; //!< [out] SUPPORTED if SLI is not enabled, UNSUPPORTED otherwise
565
+ CUpti_Profiler_Support_Level vGpu; //!< [out] SUPPORTED if vGPU is supported and profiling is enabled, DISABLED if profiling is supported but not enabled, UNSUPPORTED otherwise
566
+ CUpti_Profiler_Support_Level confidentialCompute; //!< [out] SUPPORTED if confidential compute is not enabled, UNSUPPORTED otherwise
567
+ CUpti_Profiler_Support_Level cmp; //!< [out] SUPPORTED if not NVIDIA Crypto Mining Processors (CMP), UNSUPPORTED otherwise
568
+ CUpti_Profiler_Support_Level wsl; //!< [out] SUPPORTED if WSL supported, UNSUPPORTED otherwise
569
+ } CUpti_Profiler_DeviceSupported_Params;
570
+ #define CUpti_Profiler_DeviceSupported_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Profiler_DeviceSupported_Params, confidentialCompute)
571
+
572
+ /**
573
+ * \brief Query device compatibility with Profiling API
574
+ *
575
+ * Use this call to determine whether a compute device and configuration are compatible with the Profiling API.
576
+ * If the configuration does not support profiling, one of several flags will indicate why.
577
+ */
578
+ CUptiResult CUPTIAPI cuptiProfilerDeviceSupported(CUpti_Profiler_DeviceSupported_Params *pParams);
579
+
580
+ /** @} */ /* END CUPTI_METRIC_API */
581
+ #if defined(__GNUC__) && defined(CUPTI_LIB)
582
+ #pragma GCC visibility pop
583
+ #endif
584
+
585
+ #ifdef __cplusplus
586
+ } /* extern "C" */
587
+ #endif
588
+
589
+ #endif /*_CUPTI_PROFILER_TARGET_H_*/
llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/include/nvperf_cuda_host.h ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef NVPERF_CUDA_HOST_H
2
+ #define NVPERF_CUDA_HOST_H
3
+
4
+ /*
5
+ * Copyright 2014-2022 NVIDIA Corporation. All rights reserved.
6
+ *
7
+ * NOTICE TO USER:
8
+ *
9
+ * This source code is subject to NVIDIA ownership rights under U.S. and
10
+ * international Copyright laws.
11
+ *
12
+ * This software and the information contained herein is PROPRIETARY and
13
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
14
+ * of a form of NVIDIA software license agreement.
15
+ *
16
+ * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
17
+ * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
18
+ * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
19
+ * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
20
+ * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
21
+ * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
22
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
23
+ * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
24
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
25
+ * OR PERFORMANCE OF THIS SOURCE CODE.
26
+ *
27
+ * U.S. Government End Users. This source code is a "commercial item" as
28
+ * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
29
+ * "commercial computer software" and "commercial computer software
30
+ * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
31
+ * and is provided to the U.S. Government only as a commercial end item.
32
+ * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
33
+ * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
34
+ * source code with only those rights set forth herein.
35
+ *
36
+ * Any use of this source code in individual and commercial software must
37
+ * include, in the user documentation and internal comments to the code,
38
+ * the above Disclaimer and U.S. Government End Users Notice.
39
+ */
40
+
41
+ #include <stddef.h>
42
+ #include <stdint.h>
43
+ #include "nvperf_common.h"
44
+ #include "nvperf_host.h"
45
+
46
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
47
+ #pragma GCC visibility push(default)
48
+ #if !defined(NVPW_LOCAL)
49
+ #define NVPW_LOCAL __attribute__ ((visibility ("hidden")))
50
+ #endif
51
+ #else
52
+ #if !defined(NVPW_LOCAL)
53
+ #define NVPW_LOCAL
54
+ #endif
55
+ #endif
56
+
57
+ #ifdef __cplusplus
58
+ extern "C" {
59
+ #endif
60
+
61
+ /**
62
+ * @file nvperf_cuda_host.h
63
+ */
64
+
65
+ /// 'NVPA_MetricsContext' and its APIs are deprecated, please use 'NVPW_MetricsEvaluator' and its APIs instead.
66
+ typedef struct NVPA_MetricsContext NVPA_MetricsContext;
67
+
68
+ typedef struct NVPW_CUDA_MetricsContext_Create_Params
69
+ {
70
+ /// [in]
71
+ size_t structSize;
72
+ /// [in] assign to NULL
73
+ void* pPriv;
74
+ /// [in]
75
+ const char* pChipName;
76
+ /// [out]
77
+ struct NVPA_MetricsContext* pMetricsContext;
78
+ } NVPW_CUDA_MetricsContext_Create_Params;
79
+ #define NVPW_CUDA_MetricsContext_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_MetricsContext_Create_Params, pMetricsContext)
80
+
81
+ NVPA_Status NVPW_CUDA_MetricsContext_Create(NVPW_CUDA_MetricsContext_Create_Params* pParams);
82
+
83
+ typedef struct NVPW_CUDA_RawMetricsConfig_Create_Params
84
+ {
85
+ /// [in]
86
+ size_t structSize;
87
+ /// [in] assign to NULL
88
+ void* pPriv;
89
+ /// [in]
90
+ NVPA_ActivityKind activityKind;
91
+ /// [in]
92
+ const char* pChipName;
93
+ /// [out] new NVPA_RawMetricsConfig object
94
+ struct NVPA_RawMetricsConfig* pRawMetricsConfig;
95
+ } NVPW_CUDA_RawMetricsConfig_Create_Params;
96
+ #define NVPW_CUDA_RawMetricsConfig_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_RawMetricsConfig_Create_Params, pRawMetricsConfig)
97
+
98
+ NVPA_Status NVPW_CUDA_RawMetricsConfig_Create(NVPW_CUDA_RawMetricsConfig_Create_Params* pParams);
99
+
100
+ typedef struct NVPW_CUDA_RawMetricsConfig_Create_V2_Params
101
+ {
102
+ /// [in]
103
+ size_t structSize;
104
+ /// [in] assign to NULL
105
+ void* pPriv;
106
+ /// [in]
107
+ NVPA_ActivityKind activityKind;
108
+ /// [in] accepted for chips supported at the time-of-release.
109
+ const char* pChipName;
110
+ /// [in] buffer with counter availability image - required for future chip support
111
+ const uint8_t* pCounterAvailabilityImage;
112
+ /// [out] new NVPA_RawMetricsConfig object
113
+ struct NVPA_RawMetricsConfig* pRawMetricsConfig;
114
+ } NVPW_CUDA_RawMetricsConfig_Create_V2_Params;
115
+ #define NVPW_CUDA_RawMetricsConfig_Create_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_RawMetricsConfig_Create_V2_Params, pRawMetricsConfig)
116
+
117
+ /// Use either 'pChipName' or 'pCounterAvailabilityImage'.
118
+ NVPA_Status NVPW_CUDA_RawMetricsConfig_Create_V2(NVPW_CUDA_RawMetricsConfig_Create_V2_Params* pParams);
119
+
120
+ typedef struct NVPW_CUDA_CounterDataBuilder_Create_Params
121
+ {
122
+ /// [in]
123
+ size_t structSize;
124
+ /// [in] assign to NULL
125
+ void* pPriv;
126
+ /// [in] accepted for chips supported at the time-of-release.
127
+ const char* pChipName;
128
+ /// [in] buffer with counter availability image - required for future chip support
129
+ const uint8_t* pCounterAvailabilityImage;
130
+ /// [out] new NVPA_CounterDataBuilder object
131
+ struct NVPA_CounterDataBuilder* pCounterDataBuilder;
132
+ } NVPW_CUDA_CounterDataBuilder_Create_Params;
133
+ #define NVPW_CUDA_CounterDataBuilder_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_CounterDataBuilder_Create_Params, pCounterDataBuilder)
134
+
135
+ /// Use either 'pChipName' or 'pCounterAvailabilityImage'.
136
+ NVPA_Status NVPW_CUDA_CounterDataBuilder_Create(NVPW_CUDA_CounterDataBuilder_Create_Params* pParams);
137
+
138
+ typedef struct NVPW_MetricsEvaluator NVPW_MetricsEvaluator;
139
+
140
+ typedef struct NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params
141
+ {
142
+ /// [in]
143
+ size_t structSize;
144
+ /// [in] assign to NULL
145
+ void* pPriv;
146
+ /// [in] accepted for chips supported at the time-of-release.
147
+ const char* pChipName;
148
+ /// [in] buffer with counter availability image - required for future chip support
149
+ const uint8_t* pCounterAvailabilityImage;
150
+ /// [out]
151
+ size_t scratchBufferSize;
152
+ } NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params;
153
+ #define NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params, scratchBufferSize)
154
+
155
+ /// Use either 'pChipName' or 'pCounterAvailabilityImage'.
156
+ NVPA_Status NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize(NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params* pParams);
157
+
158
+ typedef struct NVPW_CUDA_MetricsEvaluator_Initialize_Params
159
+ {
160
+ /// [in]
161
+ size_t structSize;
162
+ /// [in] assign to NULL
163
+ void* pPriv;
164
+ /// [in]
165
+ uint8_t* pScratchBuffer;
166
+ /// [in] the size of the 'pScratchBuffer' array, should be at least the size of the 'scratchBufferSize' returned
167
+ /// by 'NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize'
168
+ size_t scratchBufferSize;
169
+ /// [in] accepted for chips supported at the time-of-release.
170
+ const char* pChipName;
171
+ /// [in] buffer with counter availability image - required for future chip support
172
+ const uint8_t* pCounterAvailabilityImage;
173
+ /// [in]
174
+ const uint8_t* pCounterDataImage;
175
+ /// [in] must be provided if 'pCounterDataImage' is not NULL
176
+ size_t counterDataImageSize;
177
+ /// [out]
178
+ struct NVPW_MetricsEvaluator* pMetricsEvaluator;
179
+ } NVPW_CUDA_MetricsEvaluator_Initialize_Params;
180
+ #define NVPW_CUDA_MetricsEvaluator_Initialize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_MetricsEvaluator_Initialize_Params, pMetricsEvaluator)
181
+
182
+ /// Use one of 'pChipName', 'pCounterAvailabilityImage', or 'pCounterDataImage'. 'pChipName' or
183
+ /// 'pCounterAvailabilityImage' will create a metrics evaluator based on a virtual device while 'pCounterDataImage'
184
+ /// will create a metrics evaluator based on the actual device.
185
+ NVPA_Status NVPW_CUDA_MetricsEvaluator_Initialize(NVPW_CUDA_MetricsEvaluator_Initialize_Params* pParams);
186
+
187
+
188
+
189
+ #ifdef __cplusplus
190
+ } // extern "C"
191
+ #endif
192
+
193
+ #if defined(__GNUC__) && defined(NVPA_SHARED_LIB)
194
+ #pragma GCC visibility pop
195
+ #endif
196
+
197
+ #endif // NVPERF_CUDA_HOST_H
llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (177 Bytes). View file
 
llava_next/lib/python3.10/site-packages/nvidia/cudnn/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/nvidia/cudnn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (168 Bytes). View file
 
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (176 Bytes). View file
 
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cudnn : Neural Networks Library
51
+
52
+ */
53
+
54
+ #if !defined(CUDNN_H_)
55
+ #define CUDNN_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+ #include "cudnn_ops_train.h"
63
+ #include "cudnn_adv_infer.h"
64
+ #include "cudnn_adv_train.h"
65
+ #include "cudnn_cnn_infer.h"
66
+ #include "cudnn_cnn_train.h"
67
+
68
+ #include "cudnn_backend.h"
69
+
70
+ #if defined(__cplusplus)
71
+ extern "C" {
72
+ #endif
73
+
74
+ #if defined(__cplusplus)
75
+ }
76
+ #endif
77
+
78
+ #endif /* CUDNN_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer.h ADDED
@@ -0,0 +1,658 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cudnn_adv_infer : cuDNN's advanced and experimental features.
51
+
52
+ */
53
+
54
+ #if !defined(CUDNN_ADV_INFER_H_)
55
+ #define CUDNN_ADV_INFER_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_ADV_INFER_MAJOR 8
65
+ #define CUDNN_ADV_INFER_MINOR 9
66
+ #define CUDNN_ADV_INFER_PATCH 2
67
+
68
+ #if (CUDNN_ADV_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_INFER_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_ADV_INFER_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN ADV INFER!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* BASIC RNN API */
78
+
79
+ typedef enum {
80
+ CUDNN_FWD_MODE_INFERENCE = 0,
81
+ CUDNN_FWD_MODE_TRAINING = 1,
82
+ } cudnnForwardMode_t;
83
+
84
+ typedef enum {
85
+ CUDNN_RNN_RELU = 0, /* basic RNN cell type with ReLu activation */
86
+ CUDNN_RNN_TANH = 1, /* basic RNN cell type with tanh activation */
87
+ CUDNN_LSTM = 2, /* LSTM with optional recurrent projection and clipping */
88
+ CUDNN_GRU = 3, /* Using h' = tanh(r * Uh(t-1) + Wx) and h = (1 - z) * h' + z * h(t-1); */
89
+ } cudnnRNNMode_t;
90
+
91
+ typedef enum {
92
+ CUDNN_RNN_NO_BIAS = 0, /* rnn cell formulas do not use biases */
93
+ CUDNN_RNN_SINGLE_INP_BIAS = 1, /* rnn cell formulas use one input bias in input GEMM */
94
+ CUDNN_RNN_DOUBLE_BIAS = 2, /* default, rnn cell formulas use two bias vectors */
95
+ CUDNN_RNN_SINGLE_REC_BIAS = 3 /* rnn cell formulas use one recurrent bias in recurrent GEMM */
96
+ } cudnnRNNBiasMode_t;
97
+
98
+ typedef enum {
99
+ CUDNN_UNIDIRECTIONAL = 0, /* single direction network */
100
+ CUDNN_BIDIRECTIONAL = 1, /* output concatination at each layer */
101
+ } cudnnDirectionMode_t;
102
+
103
+ typedef enum {
104
+ CUDNN_LINEAR_INPUT = 0, /* adjustable weight matrix in first layer input GEMM */
105
+ CUDNN_SKIP_INPUT = 1, /* fixed identity matrix in the first layer input GEMM */
106
+ } cudnnRNNInputMode_t;
107
+
108
+ typedef enum {
109
+ CUDNN_RNN_CLIP_NONE = 0, /* disables LSTM cell clipping */
110
+ CUDNN_RNN_CLIP_MINMAX = 1, /* enables LSTM cell clipping */
111
+ } cudnnRNNClipMode_t;
112
+
113
+ typedef enum {
114
+ CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_UNPACKED = 0, /* padded, outer stride from one time-step to the next */
115
+ CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_PACKED = 1, /* sequence length sorted and packed as in basic RNN api */
116
+ CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED = 2, /* padded, outer stride from one batch to the next */
117
+ } cudnnRNNDataLayout_t;
118
+
119
+ /* Legacy type for backward compatibility */
120
+ typedef unsigned cudnnRNNPaddingMode_t;
121
+
122
+ /* For auxFlags in cudnnSetRNNDescriptor_v8() and cudnnSetRNNPaddingMode() */
123
+ #define CUDNN_RNN_PADDED_IO_DISABLED 0
124
+ #define CUDNN_RNN_PADDED_IO_ENABLED (1U << 0)
125
+
126
+ struct cudnnRNNStruct;
127
+ typedef struct cudnnRNNStruct *cudnnRNNDescriptor_t;
128
+
129
+ struct cudnnPersistentRNNPlan;
130
+ typedef struct cudnnPersistentRNNPlan *cudnnPersistentRNNPlan_t;
131
+
132
+ struct cudnnRNNDataStruct;
133
+ typedef struct cudnnRNNDataStruct *cudnnRNNDataDescriptor_t;
134
+
135
+ cudnnStatus_t CUDNNWINAPI
136
+ cudnnCreateRNNDescriptor(cudnnRNNDescriptor_t *rnnDesc);
137
+
138
+ cudnnStatus_t CUDNNWINAPI
139
+ cudnnDestroyRNNDescriptor(cudnnRNNDescriptor_t rnnDesc);
140
+
141
+ cudnnStatus_t CUDNNWINAPI
142
+ cudnnSetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc,
143
+ cudnnRNNAlgo_t algo,
144
+ cudnnRNNMode_t cellMode,
145
+ cudnnRNNBiasMode_t biasMode,
146
+ cudnnDirectionMode_t dirMode,
147
+ cudnnRNNInputMode_t inputMode,
148
+ cudnnDataType_t dataType,
149
+ cudnnDataType_t mathPrec,
150
+ cudnnMathType_t mathType,
151
+ int32_t inputSize,
152
+ int32_t hiddenSize,
153
+ int32_t projSize,
154
+ int32_t numLayers,
155
+ cudnnDropoutDescriptor_t dropoutDesc,
156
+ uint32_t auxFlags);
157
+
158
+ cudnnStatus_t CUDNNWINAPI
159
+ cudnnGetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc,
160
+ cudnnRNNAlgo_t *algo,
161
+ cudnnRNNMode_t *cellMode,
162
+ cudnnRNNBiasMode_t *biasMode,
163
+ cudnnDirectionMode_t *dirMode,
164
+ cudnnRNNInputMode_t *inputMode,
165
+ cudnnDataType_t *dataType,
166
+ cudnnDataType_t *mathPrec,
167
+ cudnnMathType_t *mathType,
168
+ int32_t *inputSize,
169
+ int32_t *hiddenSize,
170
+ int32_t *projSize,
171
+ int32_t *numLayers,
172
+ cudnnDropoutDescriptor_t *dropoutDesc,
173
+ uint32_t *auxFlags);
174
+
175
+ /*
176
+ * mathPrec in cudnnSetRNNDescriptor_v6() specifies compute precision
177
+ * compute precision is further modified by cudnnSetRNNMatrixMathType()
178
+ * dataType in cudnnGetRNNParamsSize() and wDesc specify weight storage
179
+ * dropout is between RNN layers, not between recurrent steps
180
+ */
181
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
182
+ cudnnSetRNNDescriptor_v6(cudnnHandle_t handle,
183
+ cudnnRNNDescriptor_t rnnDesc,
184
+ const int hiddenSize,
185
+ const int numLayers,
186
+ cudnnDropoutDescriptor_t dropoutDesc,
187
+ cudnnRNNInputMode_t inputMode,
188
+ cudnnDirectionMode_t direction,
189
+ cudnnRNNMode_t cellMode,
190
+ cudnnRNNAlgo_t algo,
191
+ cudnnDataType_t mathPrec);
192
+
193
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
194
+ cudnnGetRNNDescriptor_v6(cudnnHandle_t handle,
195
+ cudnnRNNDescriptor_t rnnDesc,
196
+ int *hiddenSize,
197
+ int *numLayers,
198
+ cudnnDropoutDescriptor_t *dropoutDesc,
199
+ cudnnRNNInputMode_t *inputMode,
200
+ cudnnDirectionMode_t *direction,
201
+ cudnnRNNMode_t *cellMode,
202
+ cudnnRNNAlgo_t *algo,
203
+ cudnnDataType_t *mathPrec);
204
+
205
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
206
+ cudnnSetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t mType);
207
+
208
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
209
+ cudnnGetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t *mType);
210
+
211
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
212
+ cudnnSetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t biasMode);
213
+
214
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
215
+ cudnnGetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t *biasMode);
216
+
217
+ cudnnStatus_t CUDNNWINAPI
218
+ cudnnRNNSetClip_v8(cudnnRNNDescriptor_t rnnDesc,
219
+ cudnnRNNClipMode_t clipMode,
220
+ cudnnNanPropagation_t clipNanOpt,
221
+ double lclip,
222
+ double rclip);
223
+
224
+ cudnnStatus_t CUDNNWINAPI
225
+ cudnnRNNGetClip_v8(cudnnRNNDescriptor_t rnnDesc,
226
+ cudnnRNNClipMode_t *clipMode,
227
+ cudnnNanPropagation_t *clipNanOpt,
228
+ double *lclip,
229
+ double *rclip);
230
+
231
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
232
+ cudnnRNNSetClip(cudnnHandle_t handle,
233
+ cudnnRNNDescriptor_t rnnDesc,
234
+ cudnnRNNClipMode_t clipMode,
235
+ cudnnNanPropagation_t clipNanOpt,
236
+ double lclip,
237
+ double rclip);
238
+
239
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
240
+ cudnnRNNGetClip(cudnnHandle_t handle,
241
+ cudnnRNNDescriptor_t rnnDesc,
242
+ cudnnRNNClipMode_t *clipMode,
243
+ cudnnNanPropagation_t *clipNanOpt,
244
+ double *lclip,
245
+ double *rclip);
246
+
247
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
248
+ cudnnSetRNNProjectionLayers(cudnnHandle_t handle,
249
+ cudnnRNNDescriptor_t rnnDesc,
250
+ const int recProjSize,
251
+ const int outProjSize);
252
+
253
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
254
+ cudnnGetRNNProjectionLayers(cudnnHandle_t handle,
255
+ const cudnnRNNDescriptor_t rnnDesc,
256
+ int *recProjSize,
257
+ int *outProjSize);
258
+
259
+ /* Expensive. Creates the plan for the specific settings. */
260
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
261
+ cudnnCreatePersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc,
262
+ const int minibatch,
263
+ const cudnnDataType_t dataType,
264
+ cudnnPersistentRNNPlan_t *plan);
265
+
266
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
267
+ cudnnDestroyPersistentRNNPlan(cudnnPersistentRNNPlan_t plan);
268
+
269
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
270
+ cudnnSetPersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc, cudnnPersistentRNNPlan_t plan);
271
+
272
+ cudnnStatus_t CUDNNWINAPI
273
+ cudnnBuildRNNDynamic(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, int miniBatch);
274
+
275
+ /* dataType in weight descriptors and input descriptors is used to describe storage */
276
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
277
+ cudnnGetRNNWorkspaceSize(cudnnHandle_t handle,
278
+ const cudnnRNNDescriptor_t rnnDesc,
279
+ const int seqLength,
280
+ const cudnnTensorDescriptor_t *xDesc,
281
+ size_t *sizeInBytes);
282
+
283
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
284
+ cudnnGetRNNTrainingReserveSize(cudnnHandle_t handle,
285
+ const cudnnRNNDescriptor_t rnnDesc,
286
+ const int seqLength,
287
+ const cudnnTensorDescriptor_t *xDesc,
288
+ size_t *sizeInBytes);
289
+
290
+ cudnnStatus_t CUDNNWINAPI
291
+ cudnnGetRNNTempSpaceSizes(cudnnHandle_t handle,
292
+ cudnnRNNDescriptor_t rnnDesc,
293
+ cudnnForwardMode_t fwdMode,
294
+ cudnnRNNDataDescriptor_t xDesc,
295
+ size_t *workSpaceSize,
296
+ size_t *reserveSpaceSize);
297
+
298
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
299
+ cudnnGetRNNParamsSize(cudnnHandle_t handle,
300
+ const cudnnRNNDescriptor_t rnnDesc,
301
+ const cudnnTensorDescriptor_t xDesc,
302
+ size_t *sizeInBytes,
303
+ cudnnDataType_t dataType);
304
+
305
+ cudnnStatus_t CUDNNWINAPI
306
+ cudnnGetRNNWeightSpaceSize(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, size_t *weightSpaceSize);
307
+
308
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
309
+ cudnnGetRNNLinLayerMatrixParams(cudnnHandle_t handle,
310
+ const cudnnRNNDescriptor_t rnnDesc,
311
+ const int pseudoLayer,
312
+ const cudnnTensorDescriptor_t xDesc,
313
+ const cudnnFilterDescriptor_t wDesc,
314
+ const void *w,
315
+ const int linLayerID,
316
+ cudnnFilterDescriptor_t linLayerMatDesc,
317
+ void **linLayerMat);
318
+
319
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
320
+ cudnnGetRNNLinLayerBiasParams(cudnnHandle_t handle,
321
+ const cudnnRNNDescriptor_t rnnDesc,
322
+ const int pseudoLayer,
323
+ const cudnnTensorDescriptor_t xDesc,
324
+ const cudnnFilterDescriptor_t wDesc,
325
+ const void *w,
326
+ const int linLayerID,
327
+ cudnnFilterDescriptor_t linLayerBiasDesc,
328
+ void **linLayerBias);
329
+
330
+ cudnnStatus_t CUDNNWINAPI
331
+ cudnnGetRNNWeightParams(cudnnHandle_t handle,
332
+ cudnnRNNDescriptor_t rnnDesc,
333
+ int32_t pseudoLayer,
334
+ size_t weightSpaceSize,
335
+ const void *weightSpace,
336
+ int32_t linLayerID,
337
+ cudnnTensorDescriptor_t mDesc,
338
+ void **mAddr,
339
+ cudnnTensorDescriptor_t bDesc,
340
+ void **bAddr);
341
+
342
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
343
+ cudnnRNNForwardInference(cudnnHandle_t handle,
344
+ const cudnnRNNDescriptor_t rnnDesc,
345
+ const int seqLength,
346
+ const cudnnTensorDescriptor_t *xDesc,
347
+ const void *x,
348
+ const cudnnTensorDescriptor_t hxDesc,
349
+ const void *hx,
350
+ const cudnnTensorDescriptor_t cxDesc,
351
+ const void *cx,
352
+ const cudnnFilterDescriptor_t wDesc,
353
+ const void *w,
354
+ const cudnnTensorDescriptor_t *yDesc,
355
+ void *y,
356
+ const cudnnTensorDescriptor_t hyDesc,
357
+ void *hy,
358
+ const cudnnTensorDescriptor_t cyDesc,
359
+ void *cy,
360
+ void *workSpace,
361
+ size_t workSpaceSizeInBytes);
362
+
363
+ /* RNN EX API */
364
+
365
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
366
+ cudnnSetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned paddingMode);
367
+
368
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
369
+ cudnnGetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned *paddingMode);
370
+
371
+ cudnnStatus_t CUDNNWINAPI
372
+ cudnnCreateRNNDataDescriptor(cudnnRNNDataDescriptor_t *rnnDataDesc);
373
+
374
+ cudnnStatus_t CUDNNWINAPI
375
+ cudnnDestroyRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc);
376
+
377
+ cudnnStatus_t CUDNNWINAPI
378
+ cudnnSetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc,
379
+ cudnnDataType_t dataType,
380
+ cudnnRNNDataLayout_t layout,
381
+ int maxSeqLength,
382
+ int batchSize,
383
+ int vectorSize,
384
+ const int seqLengthArray[], /* length of each sequence in the batch */
385
+ void *paddingFill); /* symbol for filling padding position in output */
386
+
387
+ cudnnStatus_t CUDNNWINAPI
388
+ cudnnGetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc,
389
+ cudnnDataType_t *dataType,
390
+ cudnnRNNDataLayout_t *layout,
391
+ int *maxSeqLength,
392
+ int *batchSize,
393
+ int *vectorSize,
394
+ int arrayLengthRequested,
395
+ int seqLengthArray[],
396
+ void *paddingFill);
397
+
398
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
399
+ cudnnRNNForwardInferenceEx(cudnnHandle_t handle,
400
+ const cudnnRNNDescriptor_t rnnDesc,
401
+ const cudnnRNNDataDescriptor_t xDesc,
402
+ const void *x,
403
+ const cudnnTensorDescriptor_t hxDesc,
404
+ const void *hx,
405
+ const cudnnTensorDescriptor_t cxDesc,
406
+ const void *cx,
407
+ const cudnnFilterDescriptor_t wDesc,
408
+ const void *w,
409
+ const cudnnRNNDataDescriptor_t yDesc,
410
+ void *y,
411
+ const cudnnTensorDescriptor_t hyDesc,
412
+ void *hy,
413
+ const cudnnTensorDescriptor_t cyDesc,
414
+ void *cy,
415
+ const cudnnRNNDataDescriptor_t kDesc, /* reserved, should pass NULL */
416
+ const void *keys, /* reserved, should pass NULL */
417
+ const cudnnRNNDataDescriptor_t cDesc, /* reserved, should pass NULL */
418
+ void *cAttn, /* reserved, should pass NULL */
419
+ const cudnnRNNDataDescriptor_t iDesc, /* reserved, should pass NULL */
420
+ void *iAttn, /* reserved, should pass NULL */
421
+ const cudnnRNNDataDescriptor_t qDesc, /* reserved, should pass NULL */
422
+ void *queries, /* reserved, should pass NULL */
423
+ void *workSpace,
424
+ size_t workSpaceSizeInBytes);
425
+
426
+ cudnnStatus_t CUDNNWINAPI
427
+ cudnnRNNForward(cudnnHandle_t handle,
428
+ cudnnRNNDescriptor_t rnnDesc,
429
+ cudnnForwardMode_t fwdMode,
430
+ const int32_t devSeqLengths[],
431
+ cudnnRNNDataDescriptor_t xDesc,
432
+ const void *x,
433
+ cudnnRNNDataDescriptor_t yDesc,
434
+ void *y,
435
+ cudnnTensorDescriptor_t hDesc,
436
+ const void *hx,
437
+ void *hy,
438
+ cudnnTensorDescriptor_t cDesc,
439
+ const void *cx,
440
+ void *cy,
441
+ size_t weightSpaceSize,
442
+ const void *weightSpace,
443
+ size_t workSpaceSize,
444
+ void *workSpace,
445
+ size_t reserveSpaceSize,
446
+ void *reserveSpace);
447
+
448
+ /* RNN FIND API */
449
+
450
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
451
+ cudnnSetRNNAlgorithmDescriptor(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, cudnnAlgorithmDescriptor_t algoDesc);
452
+
453
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
454
+ cudnnGetRNNForwardInferenceAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
455
+
456
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
457
+ cudnnFindRNNForwardInferenceAlgorithmEx(cudnnHandle_t handle,
458
+ const cudnnRNNDescriptor_t rnnDesc,
459
+ const int seqLength,
460
+ const cudnnTensorDescriptor_t *xDesc,
461
+ const void *x,
462
+ const cudnnTensorDescriptor_t hxDesc,
463
+ const void *hx,
464
+ const cudnnTensorDescriptor_t cxDesc,
465
+ const void *cx,
466
+ const cudnnFilterDescriptor_t wDesc,
467
+ const void *w,
468
+ const cudnnTensorDescriptor_t *yDesc,
469
+ void *y,
470
+ const cudnnTensorDescriptor_t hyDesc,
471
+ void *hy,
472
+ const cudnnTensorDescriptor_t cyDesc,
473
+ void *cy,
474
+ const float findIntensity,
475
+ const int requestedAlgoCount,
476
+ int *returnedAlgoCount,
477
+ cudnnAlgorithmPerformance_t *perfResults,
478
+ void *workspace,
479
+ size_t workSpaceSizeInBytes);
480
+
481
+ /* Sequence data descriptor */
482
+
483
+ typedef enum {
484
+ CUDNN_SEQDATA_TIME_DIM = 0, /* index in time */
485
+ CUDNN_SEQDATA_BATCH_DIM = 1, /* index in batch */
486
+ CUDNN_SEQDATA_BEAM_DIM = 2, /* index in beam */
487
+ CUDNN_SEQDATA_VECT_DIM = 3 /* index in vector */
488
+ } cudnnSeqDataAxis_t;
489
+
490
+ struct cudnnSeqDataStruct;
491
+ typedef struct cudnnSeqDataStruct *cudnnSeqDataDescriptor_t;
492
+
493
+ #define CUDNN_SEQDATA_DIM_COUNT 4 /* dimension count */
494
+
495
+ cudnnStatus_t CUDNNWINAPI
496
+ cudnnCreateSeqDataDescriptor(cudnnSeqDataDescriptor_t *seqDataDesc);
497
+
498
+ cudnnStatus_t CUDNNWINAPI
499
+ cudnnDestroySeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc);
500
+
501
+ cudnnStatus_t CUDNNWINAPI
502
+ cudnnSetSeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc,
503
+ cudnnDataType_t dataType,
504
+ int nbDims,
505
+ const int dimA[],
506
+ const cudnnSeqDataAxis_t axes[],
507
+ size_t seqLengthArraySize,
508
+ const int seqLengthArray[],
509
+ void *paddingFill);
510
+
511
+ cudnnStatus_t CUDNNWINAPI
512
+ cudnnGetSeqDataDescriptor(const cudnnSeqDataDescriptor_t seqDataDesc,
513
+ cudnnDataType_t *dataType,
514
+ int *nbDims,
515
+ int nbDimsRequested,
516
+ int dimA[],
517
+ cudnnSeqDataAxis_t axes[],
518
+ size_t *seqLengthArraySize,
519
+ size_t seqLengthSizeRequested,
520
+ int seqLengthArray[],
521
+ void *paddingFill);
522
+
523
+ /* Multihead Attention */
524
+
525
+ /* Legacy type for backward compatibility */
526
+ typedef unsigned cudnnAttnQueryMap_t;
527
+
528
+ /*
529
+ * Multi-head attention options passed via 'attnMode' in cudnnSetAttnDescriptor().
530
+ * Use the bitwise OR operator to combine several settings listed below. Additional
531
+ * minor options can be added here w/o changing or introducing new API functions.
532
+ */
533
+ #define CUDNN_ATTN_QUERYMAP_ALL_TO_ONE 0 /* multiple Q-s map to a single (K,V) set when beam size > 1 */
534
+ #define CUDNN_ATTN_QUERYMAP_ONE_TO_ONE (1U << 0) /* multiple Q-s map to multiple (K,V) sets when beam size > 1 */
535
+ #define CUDNN_ATTN_DISABLE_PROJ_BIASES 0 /* no biases in attention input and output projections */
536
+ #define CUDNN_ATTN_ENABLE_PROJ_BIASES (1U << 1) /* use biases in attention input and output projections */
537
+
538
+ struct cudnnAttnStruct;
539
+ typedef struct cudnnAttnStruct *cudnnAttnDescriptor_t;
540
+
541
+ cudnnStatus_t CUDNNWINAPI
542
+ cudnnCreateAttnDescriptor(cudnnAttnDescriptor_t *attnDesc);
543
+
544
+ cudnnStatus_t CUDNNWINAPI
545
+ cudnnDestroyAttnDescriptor(cudnnAttnDescriptor_t attnDesc);
546
+
547
+ cudnnStatus_t CUDNNWINAPI
548
+ cudnnSetAttnDescriptor(cudnnAttnDescriptor_t attnDesc,
549
+ unsigned attnMode,
550
+ int nHeads,
551
+ double smScaler,
552
+ cudnnDataType_t dataType,
553
+ cudnnDataType_t computePrec,
554
+ cudnnMathType_t mathType,
555
+ cudnnDropoutDescriptor_t attnDropoutDesc,
556
+ cudnnDropoutDescriptor_t postDropoutDesc,
557
+ int qSize,
558
+ int kSize,
559
+ int vSize,
560
+ int qProjSize,
561
+ int kProjSize,
562
+ int vProjSize,
563
+ int oProjSize,
564
+ int qoMaxSeqLength,
565
+ int kvMaxSeqLength,
566
+ int maxBatchSize,
567
+ int maxBeamSize);
568
+
569
+ cudnnStatus_t CUDNNWINAPI
570
+ cudnnGetAttnDescriptor(cudnnAttnDescriptor_t attnDesc,
571
+ unsigned *attnMode,
572
+ int *nHeads,
573
+ double *smScaler,
574
+ cudnnDataType_t *dataType,
575
+ cudnnDataType_t *computePrec,
576
+ cudnnMathType_t *mathType,
577
+ cudnnDropoutDescriptor_t *attnDropoutDesc,
578
+ cudnnDropoutDescriptor_t *postDropoutDesc,
579
+ int *qSize,
580
+ int *kSize,
581
+ int *vSize,
582
+ int *qProjSize,
583
+ int *kProjSize,
584
+ int *vProjSize,
585
+ int *oProjSize,
586
+ int *qoMaxSeqLength,
587
+ int *kvMaxSeqLength,
588
+ int *maxBatchSize,
589
+ int *maxBeamSize);
590
+
591
+ cudnnStatus_t CUDNNWINAPI
592
+ cudnnGetMultiHeadAttnBuffers(cudnnHandle_t handle,
593
+ const cudnnAttnDescriptor_t attnDesc,
594
+ size_t *weightSizeInBytes,
595
+ size_t *workSpaceSizeInBytes,
596
+ size_t *reserveSpaceSizeInBytes);
597
+
598
+ typedef enum {
599
+ CUDNN_MH_ATTN_Q_WEIGHTS = 0, /* input projection weights for 'queries' */
600
+ CUDNN_MH_ATTN_K_WEIGHTS = 1, /* input projection weights for 'keys' */
601
+ CUDNN_MH_ATTN_V_WEIGHTS = 2, /* input projection weights for 'values' */
602
+ CUDNN_MH_ATTN_O_WEIGHTS = 3, /* output projection weights */
603
+ CUDNN_MH_ATTN_Q_BIASES = 4, /* input projection bias tensor for 'queries' */
604
+ CUDNN_MH_ATTN_K_BIASES = 5, /* input projection bias for 'keys' */
605
+ CUDNN_MH_ATTN_V_BIASES = 6, /* input projection bias for 'values' */
606
+ CUDNN_MH_ATTN_O_BIASES = 7, /* output projection biases */
607
+ } cudnnMultiHeadAttnWeightKind_t;
608
+
609
+ #define CUDNN_ATTN_WKIND_COUNT 8 /* Number of attention weight/bias tensors */
610
+
611
+ cudnnStatus_t CUDNNWINAPI
612
+ cudnnGetMultiHeadAttnWeights(cudnnHandle_t handle,
613
+ const cudnnAttnDescriptor_t attnDesc,
614
+ cudnnMultiHeadAttnWeightKind_t wKind,
615
+ size_t weightSizeInBytes,
616
+ const void *weights,
617
+ cudnnTensorDescriptor_t wDesc,
618
+ void **wAddr);
619
+
620
+ cudnnStatus_t CUDNNWINAPI
621
+ cudnnMultiHeadAttnForward(cudnnHandle_t handle,
622
+ const cudnnAttnDescriptor_t attnDesc,
623
+ int currIdx,
624
+ const int loWinIdx[],
625
+ const int hiWinIdx[],
626
+ const int devSeqLengthsQO[],
627
+ const int devSeqLengthsKV[],
628
+ const cudnnSeqDataDescriptor_t qDesc,
629
+ const void *queries,
630
+ const void *residuals,
631
+ const cudnnSeqDataDescriptor_t kDesc,
632
+ const void *keys,
633
+ const cudnnSeqDataDescriptor_t vDesc,
634
+ const void *values,
635
+ const cudnnSeqDataDescriptor_t oDesc,
636
+ void *out,
637
+ size_t weightSizeInBytes,
638
+ const void *weights,
639
+ size_t workSpaceSizeInBytes,
640
+ void *workSpace,
641
+ size_t reserveSpaceSizeInBytes,
642
+ void *reserveSpace);
643
+
644
+ /*
645
+ * \brief Cross-library version checker.
646
+ * This function is implemented differently in each sub-library. Each sublib
647
+ * checks whether its own version matches that of its dependencies.
648
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
649
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
650
+ */
651
+ cudnnStatus_t CUDNNWINAPI
652
+ cudnnAdvInferVersionCheck(void);
653
+
654
+ #if defined(__cplusplus)
655
+ }
656
+ #endif
657
+
658
+ #endif /* CUDNN_ADV_INFER_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer_v8.h ADDED
@@ -0,0 +1,658 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cudnn_adv_infer : cuDNN's advanced and experimental features.
51
+
52
+ */
53
+
54
+ #if !defined(CUDNN_ADV_INFER_H_)
55
+ #define CUDNN_ADV_INFER_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_ADV_INFER_MAJOR 8
65
+ #define CUDNN_ADV_INFER_MINOR 9
66
+ #define CUDNN_ADV_INFER_PATCH 2
67
+
68
+ #if (CUDNN_ADV_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_INFER_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_ADV_INFER_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN ADV INFER!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* BASIC RNN API */
78
+
79
+ typedef enum {
80
+ CUDNN_FWD_MODE_INFERENCE = 0,
81
+ CUDNN_FWD_MODE_TRAINING = 1,
82
+ } cudnnForwardMode_t;
83
+
84
+ typedef enum {
85
+ CUDNN_RNN_RELU = 0, /* basic RNN cell type with ReLu activation */
86
+ CUDNN_RNN_TANH = 1, /* basic RNN cell type with tanh activation */
87
+ CUDNN_LSTM = 2, /* LSTM with optional recurrent projection and clipping */
88
+ CUDNN_GRU = 3, /* Using h' = tanh(r * Uh(t-1) + Wx) and h = (1 - z) * h' + z * h(t-1); */
89
+ } cudnnRNNMode_t;
90
+
91
+ typedef enum {
92
+ CUDNN_RNN_NO_BIAS = 0, /* rnn cell formulas do not use biases */
93
+ CUDNN_RNN_SINGLE_INP_BIAS = 1, /* rnn cell formulas use one input bias in input GEMM */
94
+ CUDNN_RNN_DOUBLE_BIAS = 2, /* default, rnn cell formulas use two bias vectors */
95
+ CUDNN_RNN_SINGLE_REC_BIAS = 3 /* rnn cell formulas use one recurrent bias in recurrent GEMM */
96
+ } cudnnRNNBiasMode_t;
97
+
98
+ typedef enum {
99
+ CUDNN_UNIDIRECTIONAL = 0, /* single direction network */
100
+ CUDNN_BIDIRECTIONAL = 1, /* output concatination at each layer */
101
+ } cudnnDirectionMode_t;
102
+
103
+ typedef enum {
104
+ CUDNN_LINEAR_INPUT = 0, /* adjustable weight matrix in first layer input GEMM */
105
+ CUDNN_SKIP_INPUT = 1, /* fixed identity matrix in the first layer input GEMM */
106
+ } cudnnRNNInputMode_t;
107
+
108
+ typedef enum {
109
+ CUDNN_RNN_CLIP_NONE = 0, /* disables LSTM cell clipping */
110
+ CUDNN_RNN_CLIP_MINMAX = 1, /* enables LSTM cell clipping */
111
+ } cudnnRNNClipMode_t;
112
+
113
+ typedef enum {
114
+ CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_UNPACKED = 0, /* padded, outer stride from one time-step to the next */
115
+ CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_PACKED = 1, /* sequence length sorted and packed as in basic RNN api */
116
+ CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED = 2, /* padded, outer stride from one batch to the next */
117
+ } cudnnRNNDataLayout_t;
118
+
119
+ /* Legacy type for backward compatibility */
120
+ typedef unsigned cudnnRNNPaddingMode_t;
121
+
122
+ /* For auxFlags in cudnnSetRNNDescriptor_v8() and cudnnSetRNNPaddingMode() */
123
+ #define CUDNN_RNN_PADDED_IO_DISABLED 0
124
+ #define CUDNN_RNN_PADDED_IO_ENABLED (1U << 0)
125
+
126
+ struct cudnnRNNStruct;
127
+ typedef struct cudnnRNNStruct *cudnnRNNDescriptor_t;
128
+
129
+ struct cudnnPersistentRNNPlan;
130
+ typedef struct cudnnPersistentRNNPlan *cudnnPersistentRNNPlan_t;
131
+
132
+ struct cudnnRNNDataStruct;
133
+ typedef struct cudnnRNNDataStruct *cudnnRNNDataDescriptor_t;
134
+
135
+ cudnnStatus_t CUDNNWINAPI
136
+ cudnnCreateRNNDescriptor(cudnnRNNDescriptor_t *rnnDesc);
137
+
138
+ cudnnStatus_t CUDNNWINAPI
139
+ cudnnDestroyRNNDescriptor(cudnnRNNDescriptor_t rnnDesc);
140
+
141
+ cudnnStatus_t CUDNNWINAPI
142
+ cudnnSetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc,
143
+ cudnnRNNAlgo_t algo,
144
+ cudnnRNNMode_t cellMode,
145
+ cudnnRNNBiasMode_t biasMode,
146
+ cudnnDirectionMode_t dirMode,
147
+ cudnnRNNInputMode_t inputMode,
148
+ cudnnDataType_t dataType,
149
+ cudnnDataType_t mathPrec,
150
+ cudnnMathType_t mathType,
151
+ int32_t inputSize,
152
+ int32_t hiddenSize,
153
+ int32_t projSize,
154
+ int32_t numLayers,
155
+ cudnnDropoutDescriptor_t dropoutDesc,
156
+ uint32_t auxFlags);
157
+
158
+ cudnnStatus_t CUDNNWINAPI
159
+ cudnnGetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc,
160
+ cudnnRNNAlgo_t *algo,
161
+ cudnnRNNMode_t *cellMode,
162
+ cudnnRNNBiasMode_t *biasMode,
163
+ cudnnDirectionMode_t *dirMode,
164
+ cudnnRNNInputMode_t *inputMode,
165
+ cudnnDataType_t *dataType,
166
+ cudnnDataType_t *mathPrec,
167
+ cudnnMathType_t *mathType,
168
+ int32_t *inputSize,
169
+ int32_t *hiddenSize,
170
+ int32_t *projSize,
171
+ int32_t *numLayers,
172
+ cudnnDropoutDescriptor_t *dropoutDesc,
173
+ uint32_t *auxFlags);
174
+
175
+ /*
176
+ * mathPrec in cudnnSetRNNDescriptor_v6() specifies compute precision
177
+ * compute precision is further modified by cudnnSetRNNMatrixMathType()
178
+ * dataType in cudnnGetRNNParamsSize() and wDesc specify weight storage
179
+ * dropout is between RNN layers, not between recurrent steps
180
+ */
181
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
182
+ cudnnSetRNNDescriptor_v6(cudnnHandle_t handle,
183
+ cudnnRNNDescriptor_t rnnDesc,
184
+ const int hiddenSize,
185
+ const int numLayers,
186
+ cudnnDropoutDescriptor_t dropoutDesc,
187
+ cudnnRNNInputMode_t inputMode,
188
+ cudnnDirectionMode_t direction,
189
+ cudnnRNNMode_t cellMode,
190
+ cudnnRNNAlgo_t algo,
191
+ cudnnDataType_t mathPrec);
192
+
193
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
194
+ cudnnGetRNNDescriptor_v6(cudnnHandle_t handle,
195
+ cudnnRNNDescriptor_t rnnDesc,
196
+ int *hiddenSize,
197
+ int *numLayers,
198
+ cudnnDropoutDescriptor_t *dropoutDesc,
199
+ cudnnRNNInputMode_t *inputMode,
200
+ cudnnDirectionMode_t *direction,
201
+ cudnnRNNMode_t *cellMode,
202
+ cudnnRNNAlgo_t *algo,
203
+ cudnnDataType_t *mathPrec);
204
+
205
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
206
+ cudnnSetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t mType);
207
+
208
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
209
+ cudnnGetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t *mType);
210
+
211
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
212
+ cudnnSetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t biasMode);
213
+
214
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
215
+ cudnnGetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t *biasMode);
216
+
217
+ cudnnStatus_t CUDNNWINAPI
218
+ cudnnRNNSetClip_v8(cudnnRNNDescriptor_t rnnDesc,
219
+ cudnnRNNClipMode_t clipMode,
220
+ cudnnNanPropagation_t clipNanOpt,
221
+ double lclip,
222
+ double rclip);
223
+
224
+ cudnnStatus_t CUDNNWINAPI
225
+ cudnnRNNGetClip_v8(cudnnRNNDescriptor_t rnnDesc,
226
+ cudnnRNNClipMode_t *clipMode,
227
+ cudnnNanPropagation_t *clipNanOpt,
228
+ double *lclip,
229
+ double *rclip);
230
+
231
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
232
+ cudnnRNNSetClip(cudnnHandle_t handle,
233
+ cudnnRNNDescriptor_t rnnDesc,
234
+ cudnnRNNClipMode_t clipMode,
235
+ cudnnNanPropagation_t clipNanOpt,
236
+ double lclip,
237
+ double rclip);
238
+
239
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
240
+ cudnnRNNGetClip(cudnnHandle_t handle,
241
+ cudnnRNNDescriptor_t rnnDesc,
242
+ cudnnRNNClipMode_t *clipMode,
243
+ cudnnNanPropagation_t *clipNanOpt,
244
+ double *lclip,
245
+ double *rclip);
246
+
247
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
248
+ cudnnSetRNNProjectionLayers(cudnnHandle_t handle,
249
+ cudnnRNNDescriptor_t rnnDesc,
250
+ const int recProjSize,
251
+ const int outProjSize);
252
+
253
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
254
+ cudnnGetRNNProjectionLayers(cudnnHandle_t handle,
255
+ const cudnnRNNDescriptor_t rnnDesc,
256
+ int *recProjSize,
257
+ int *outProjSize);
258
+
259
+ /* Expensive. Creates the plan for the specific settings. */
260
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
261
+ cudnnCreatePersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc,
262
+ const int minibatch,
263
+ const cudnnDataType_t dataType,
264
+ cudnnPersistentRNNPlan_t *plan);
265
+
266
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
267
+ cudnnDestroyPersistentRNNPlan(cudnnPersistentRNNPlan_t plan);
268
+
269
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
270
+ cudnnSetPersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc, cudnnPersistentRNNPlan_t plan);
271
+
272
+ cudnnStatus_t CUDNNWINAPI
273
+ cudnnBuildRNNDynamic(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, int miniBatch);
274
+
275
+ /* dataType in weight descriptors and input descriptors is used to describe storage */
276
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
277
+ cudnnGetRNNWorkspaceSize(cudnnHandle_t handle,
278
+ const cudnnRNNDescriptor_t rnnDesc,
279
+ const int seqLength,
280
+ const cudnnTensorDescriptor_t *xDesc,
281
+ size_t *sizeInBytes);
282
+
283
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
284
+ cudnnGetRNNTrainingReserveSize(cudnnHandle_t handle,
285
+ const cudnnRNNDescriptor_t rnnDesc,
286
+ const int seqLength,
287
+ const cudnnTensorDescriptor_t *xDesc,
288
+ size_t *sizeInBytes);
289
+
290
+ cudnnStatus_t CUDNNWINAPI
291
+ cudnnGetRNNTempSpaceSizes(cudnnHandle_t handle,
292
+ cudnnRNNDescriptor_t rnnDesc,
293
+ cudnnForwardMode_t fwdMode,
294
+ cudnnRNNDataDescriptor_t xDesc,
295
+ size_t *workSpaceSize,
296
+ size_t *reserveSpaceSize);
297
+
298
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
299
+ cudnnGetRNNParamsSize(cudnnHandle_t handle,
300
+ const cudnnRNNDescriptor_t rnnDesc,
301
+ const cudnnTensorDescriptor_t xDesc,
302
+ size_t *sizeInBytes,
303
+ cudnnDataType_t dataType);
304
+
305
+ cudnnStatus_t CUDNNWINAPI
306
+ cudnnGetRNNWeightSpaceSize(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, size_t *weightSpaceSize);
307
+
308
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
309
+ cudnnGetRNNLinLayerMatrixParams(cudnnHandle_t handle,
310
+ const cudnnRNNDescriptor_t rnnDesc,
311
+ const int pseudoLayer,
312
+ const cudnnTensorDescriptor_t xDesc,
313
+ const cudnnFilterDescriptor_t wDesc,
314
+ const void *w,
315
+ const int linLayerID,
316
+ cudnnFilterDescriptor_t linLayerMatDesc,
317
+ void **linLayerMat);
318
+
319
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
320
+ cudnnGetRNNLinLayerBiasParams(cudnnHandle_t handle,
321
+ const cudnnRNNDescriptor_t rnnDesc,
322
+ const int pseudoLayer,
323
+ const cudnnTensorDescriptor_t xDesc,
324
+ const cudnnFilterDescriptor_t wDesc,
325
+ const void *w,
326
+ const int linLayerID,
327
+ cudnnFilterDescriptor_t linLayerBiasDesc,
328
+ void **linLayerBias);
329
+
330
+ cudnnStatus_t CUDNNWINAPI
331
+ cudnnGetRNNWeightParams(cudnnHandle_t handle,
332
+ cudnnRNNDescriptor_t rnnDesc,
333
+ int32_t pseudoLayer,
334
+ size_t weightSpaceSize,
335
+ const void *weightSpace,
336
+ int32_t linLayerID,
337
+ cudnnTensorDescriptor_t mDesc,
338
+ void **mAddr,
339
+ cudnnTensorDescriptor_t bDesc,
340
+ void **bAddr);
341
+
342
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
343
+ cudnnRNNForwardInference(cudnnHandle_t handle,
344
+ const cudnnRNNDescriptor_t rnnDesc,
345
+ const int seqLength,
346
+ const cudnnTensorDescriptor_t *xDesc,
347
+ const void *x,
348
+ const cudnnTensorDescriptor_t hxDesc,
349
+ const void *hx,
350
+ const cudnnTensorDescriptor_t cxDesc,
351
+ const void *cx,
352
+ const cudnnFilterDescriptor_t wDesc,
353
+ const void *w,
354
+ const cudnnTensorDescriptor_t *yDesc,
355
+ void *y,
356
+ const cudnnTensorDescriptor_t hyDesc,
357
+ void *hy,
358
+ const cudnnTensorDescriptor_t cyDesc,
359
+ void *cy,
360
+ void *workSpace,
361
+ size_t workSpaceSizeInBytes);
362
+
363
+ /* RNN EX API */
364
+
365
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
366
+ cudnnSetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned paddingMode);
367
+
368
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
369
+ cudnnGetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned *paddingMode);
370
+
371
+ cudnnStatus_t CUDNNWINAPI
372
+ cudnnCreateRNNDataDescriptor(cudnnRNNDataDescriptor_t *rnnDataDesc);
373
+
374
+ cudnnStatus_t CUDNNWINAPI
375
+ cudnnDestroyRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc);
376
+
377
+ cudnnStatus_t CUDNNWINAPI
378
+ cudnnSetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc,
379
+ cudnnDataType_t dataType,
380
+ cudnnRNNDataLayout_t layout,
381
+ int maxSeqLength,
382
+ int batchSize,
383
+ int vectorSize,
384
+ const int seqLengthArray[], /* length of each sequence in the batch */
385
+ void *paddingFill); /* symbol for filling padding position in output */
386
+
387
+ cudnnStatus_t CUDNNWINAPI
388
+ cudnnGetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc,
389
+ cudnnDataType_t *dataType,
390
+ cudnnRNNDataLayout_t *layout,
391
+ int *maxSeqLength,
392
+ int *batchSize,
393
+ int *vectorSize,
394
+ int arrayLengthRequested,
395
+ int seqLengthArray[],
396
+ void *paddingFill);
397
+
398
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
399
+ cudnnRNNForwardInferenceEx(cudnnHandle_t handle,
400
+ const cudnnRNNDescriptor_t rnnDesc,
401
+ const cudnnRNNDataDescriptor_t xDesc,
402
+ const void *x,
403
+ const cudnnTensorDescriptor_t hxDesc,
404
+ const void *hx,
405
+ const cudnnTensorDescriptor_t cxDesc,
406
+ const void *cx,
407
+ const cudnnFilterDescriptor_t wDesc,
408
+ const void *w,
409
+ const cudnnRNNDataDescriptor_t yDesc,
410
+ void *y,
411
+ const cudnnTensorDescriptor_t hyDesc,
412
+ void *hy,
413
+ const cudnnTensorDescriptor_t cyDesc,
414
+ void *cy,
415
+ const cudnnRNNDataDescriptor_t kDesc, /* reserved, should pass NULL */
416
+ const void *keys, /* reserved, should pass NULL */
417
+ const cudnnRNNDataDescriptor_t cDesc, /* reserved, should pass NULL */
418
+ void *cAttn, /* reserved, should pass NULL */
419
+ const cudnnRNNDataDescriptor_t iDesc, /* reserved, should pass NULL */
420
+ void *iAttn, /* reserved, should pass NULL */
421
+ const cudnnRNNDataDescriptor_t qDesc, /* reserved, should pass NULL */
422
+ void *queries, /* reserved, should pass NULL */
423
+ void *workSpace,
424
+ size_t workSpaceSizeInBytes);
425
+
426
+ cudnnStatus_t CUDNNWINAPI
427
+ cudnnRNNForward(cudnnHandle_t handle,
428
+ cudnnRNNDescriptor_t rnnDesc,
429
+ cudnnForwardMode_t fwdMode,
430
+ const int32_t devSeqLengths[],
431
+ cudnnRNNDataDescriptor_t xDesc,
432
+ const void *x,
433
+ cudnnRNNDataDescriptor_t yDesc,
434
+ void *y,
435
+ cudnnTensorDescriptor_t hDesc,
436
+ const void *hx,
437
+ void *hy,
438
+ cudnnTensorDescriptor_t cDesc,
439
+ const void *cx,
440
+ void *cy,
441
+ size_t weightSpaceSize,
442
+ const void *weightSpace,
443
+ size_t workSpaceSize,
444
+ void *workSpace,
445
+ size_t reserveSpaceSize,
446
+ void *reserveSpace);
447
+
448
+ /* RNN FIND API */
449
+
450
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
451
+ cudnnSetRNNAlgorithmDescriptor(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, cudnnAlgorithmDescriptor_t algoDesc);
452
+
453
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
454
+ cudnnGetRNNForwardInferenceAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
455
+
456
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
457
+ cudnnFindRNNForwardInferenceAlgorithmEx(cudnnHandle_t handle,
458
+ const cudnnRNNDescriptor_t rnnDesc,
459
+ const int seqLength,
460
+ const cudnnTensorDescriptor_t *xDesc,
461
+ const void *x,
462
+ const cudnnTensorDescriptor_t hxDesc,
463
+ const void *hx,
464
+ const cudnnTensorDescriptor_t cxDesc,
465
+ const void *cx,
466
+ const cudnnFilterDescriptor_t wDesc,
467
+ const void *w,
468
+ const cudnnTensorDescriptor_t *yDesc,
469
+ void *y,
470
+ const cudnnTensorDescriptor_t hyDesc,
471
+ void *hy,
472
+ const cudnnTensorDescriptor_t cyDesc,
473
+ void *cy,
474
+ const float findIntensity,
475
+ const int requestedAlgoCount,
476
+ int *returnedAlgoCount,
477
+ cudnnAlgorithmPerformance_t *perfResults,
478
+ void *workspace,
479
+ size_t workSpaceSizeInBytes);
480
+
481
+ /* Sequence data descriptor */
482
+
483
+ typedef enum {
484
+ CUDNN_SEQDATA_TIME_DIM = 0, /* index in time */
485
+ CUDNN_SEQDATA_BATCH_DIM = 1, /* index in batch */
486
+ CUDNN_SEQDATA_BEAM_DIM = 2, /* index in beam */
487
+ CUDNN_SEQDATA_VECT_DIM = 3 /* index in vector */
488
+ } cudnnSeqDataAxis_t;
489
+
490
+ struct cudnnSeqDataStruct;
491
+ typedef struct cudnnSeqDataStruct *cudnnSeqDataDescriptor_t;
492
+
493
+ #define CUDNN_SEQDATA_DIM_COUNT 4 /* dimension count */
494
+
495
+ cudnnStatus_t CUDNNWINAPI
496
+ cudnnCreateSeqDataDescriptor(cudnnSeqDataDescriptor_t *seqDataDesc);
497
+
498
+ cudnnStatus_t CUDNNWINAPI
499
+ cudnnDestroySeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc);
500
+
501
+ cudnnStatus_t CUDNNWINAPI
502
+ cudnnSetSeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc,
503
+ cudnnDataType_t dataType,
504
+ int nbDims,
505
+ const int dimA[],
506
+ const cudnnSeqDataAxis_t axes[],
507
+ size_t seqLengthArraySize,
508
+ const int seqLengthArray[],
509
+ void *paddingFill);
510
+
511
+ cudnnStatus_t CUDNNWINAPI
512
+ cudnnGetSeqDataDescriptor(const cudnnSeqDataDescriptor_t seqDataDesc,
513
+ cudnnDataType_t *dataType,
514
+ int *nbDims,
515
+ int nbDimsRequested,
516
+ int dimA[],
517
+ cudnnSeqDataAxis_t axes[],
518
+ size_t *seqLengthArraySize,
519
+ size_t seqLengthSizeRequested,
520
+ int seqLengthArray[],
521
+ void *paddingFill);
522
+
523
+ /* Multihead Attention */
524
+
525
+ /* Legacy type for backward compatibility */
526
+ typedef unsigned cudnnAttnQueryMap_t;
527
+
528
+ /*
529
+ * Multi-head attention options passed via 'attnMode' in cudnnSetAttnDescriptor().
530
+ * Use the bitwise OR operator to combine several settings listed below. Additional
531
+ * minor options can be added here w/o changing or introducing new API functions.
532
+ */
533
+ #define CUDNN_ATTN_QUERYMAP_ALL_TO_ONE 0 /* multiple Q-s map to a single (K,V) set when beam size > 1 */
534
+ #define CUDNN_ATTN_QUERYMAP_ONE_TO_ONE (1U << 0) /* multiple Q-s map to multiple (K,V) sets when beam size > 1 */
535
+ #define CUDNN_ATTN_DISABLE_PROJ_BIASES 0 /* no biases in attention input and output projections */
536
+ #define CUDNN_ATTN_ENABLE_PROJ_BIASES (1U << 1) /* use biases in attention input and output projections */
537
+
538
+ struct cudnnAttnStruct;
539
+ typedef struct cudnnAttnStruct *cudnnAttnDescriptor_t;
540
+
541
+ cudnnStatus_t CUDNNWINAPI
542
+ cudnnCreateAttnDescriptor(cudnnAttnDescriptor_t *attnDesc);
543
+
544
+ cudnnStatus_t CUDNNWINAPI
545
+ cudnnDestroyAttnDescriptor(cudnnAttnDescriptor_t attnDesc);
546
+
547
+ cudnnStatus_t CUDNNWINAPI
548
+ cudnnSetAttnDescriptor(cudnnAttnDescriptor_t attnDesc,
549
+ unsigned attnMode,
550
+ int nHeads,
551
+ double smScaler,
552
+ cudnnDataType_t dataType,
553
+ cudnnDataType_t computePrec,
554
+ cudnnMathType_t mathType,
555
+ cudnnDropoutDescriptor_t attnDropoutDesc,
556
+ cudnnDropoutDescriptor_t postDropoutDesc,
557
+ int qSize,
558
+ int kSize,
559
+ int vSize,
560
+ int qProjSize,
561
+ int kProjSize,
562
+ int vProjSize,
563
+ int oProjSize,
564
+ int qoMaxSeqLength,
565
+ int kvMaxSeqLength,
566
+ int maxBatchSize,
567
+ int maxBeamSize);
568
+
569
+ cudnnStatus_t CUDNNWINAPI
570
+ cudnnGetAttnDescriptor(cudnnAttnDescriptor_t attnDesc,
571
+ unsigned *attnMode,
572
+ int *nHeads,
573
+ double *smScaler,
574
+ cudnnDataType_t *dataType,
575
+ cudnnDataType_t *computePrec,
576
+ cudnnMathType_t *mathType,
577
+ cudnnDropoutDescriptor_t *attnDropoutDesc,
578
+ cudnnDropoutDescriptor_t *postDropoutDesc,
579
+ int *qSize,
580
+ int *kSize,
581
+ int *vSize,
582
+ int *qProjSize,
583
+ int *kProjSize,
584
+ int *vProjSize,
585
+ int *oProjSize,
586
+ int *qoMaxSeqLength,
587
+ int *kvMaxSeqLength,
588
+ int *maxBatchSize,
589
+ int *maxBeamSize);
590
+
591
+ cudnnStatus_t CUDNNWINAPI
592
+ cudnnGetMultiHeadAttnBuffers(cudnnHandle_t handle,
593
+ const cudnnAttnDescriptor_t attnDesc,
594
+ size_t *weightSizeInBytes,
595
+ size_t *workSpaceSizeInBytes,
596
+ size_t *reserveSpaceSizeInBytes);
597
+
598
+ typedef enum {
599
+ CUDNN_MH_ATTN_Q_WEIGHTS = 0, /* input projection weights for 'queries' */
600
+ CUDNN_MH_ATTN_K_WEIGHTS = 1, /* input projection weights for 'keys' */
601
+ CUDNN_MH_ATTN_V_WEIGHTS = 2, /* input projection weights for 'values' */
602
+ CUDNN_MH_ATTN_O_WEIGHTS = 3, /* output projection weights */
603
+ CUDNN_MH_ATTN_Q_BIASES = 4, /* input projection bias tensor for 'queries' */
604
+ CUDNN_MH_ATTN_K_BIASES = 5, /* input projection bias for 'keys' */
605
+ CUDNN_MH_ATTN_V_BIASES = 6, /* input projection bias for 'values' */
606
+ CUDNN_MH_ATTN_O_BIASES = 7, /* output projection biases */
607
+ } cudnnMultiHeadAttnWeightKind_t;
608
+
609
+ #define CUDNN_ATTN_WKIND_COUNT 8 /* Number of attention weight/bias tensors */
610
+
611
+ cudnnStatus_t CUDNNWINAPI
612
+ cudnnGetMultiHeadAttnWeights(cudnnHandle_t handle,
613
+ const cudnnAttnDescriptor_t attnDesc,
614
+ cudnnMultiHeadAttnWeightKind_t wKind,
615
+ size_t weightSizeInBytes,
616
+ const void *weights,
617
+ cudnnTensorDescriptor_t wDesc,
618
+ void **wAddr);
619
+
620
+ cudnnStatus_t CUDNNWINAPI
621
+ cudnnMultiHeadAttnForward(cudnnHandle_t handle,
622
+ const cudnnAttnDescriptor_t attnDesc,
623
+ int currIdx,
624
+ const int loWinIdx[],
625
+ const int hiWinIdx[],
626
+ const int devSeqLengthsQO[],
627
+ const int devSeqLengthsKV[],
628
+ const cudnnSeqDataDescriptor_t qDesc,
629
+ const void *queries,
630
+ const void *residuals,
631
+ const cudnnSeqDataDescriptor_t kDesc,
632
+ const void *keys,
633
+ const cudnnSeqDataDescriptor_t vDesc,
634
+ const void *values,
635
+ const cudnnSeqDataDescriptor_t oDesc,
636
+ void *out,
637
+ size_t weightSizeInBytes,
638
+ const void *weights,
639
+ size_t workSpaceSizeInBytes,
640
+ void *workSpace,
641
+ size_t reserveSpaceSizeInBytes,
642
+ void *reserveSpace);
643
+
644
+ /*
645
+ * \brief Cross-library version checker.
646
+ * This function is implemented differently in each sub-library. Each sublib
647
+ * checks whether its own version matches that of its dependencies.
648
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
649
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
650
+ */
651
+ cudnnStatus_t CUDNNWINAPI
652
+ cudnnAdvInferVersionCheck(void);
653
+
654
+ #if defined(__cplusplus)
655
+ }
656
+ #endif
657
+
658
+ #endif /* CUDNN_ADV_INFER_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train.h ADDED
@@ -0,0 +1,540 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cudnn_adv_train : cuDNN's advanced and experimental features.
51
+
52
+ */
53
+
54
+ #if !defined(CUDNN_ADV_TRAIN_H_)
55
+ #define CUDNN_ADV_TRAIN_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+ #include "cudnn_ops_train.h"
63
+ #include "cudnn_adv_infer.h"
64
+
65
+ /* These version numbers are autogenerated, do not edit manually. */
66
+ #define CUDNN_ADV_TRAIN_MAJOR 8
67
+ #define CUDNN_ADV_TRAIN_MINOR 9
68
+ #define CUDNN_ADV_TRAIN_PATCH 2
69
+
70
+ #if (CUDNN_ADV_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_TRAIN_MINOR != CUDNN_MINOR) || \
71
+ (CUDNN_ADV_TRAIN_PATCH != CUDNN_PATCHLEVEL)
72
+ #error Version mismatch in cuDNN ADV TRAIN!!!
73
+ #endif
74
+
75
+ #if defined(__cplusplus)
76
+ extern "C" {
77
+ #endif
78
+
79
+ typedef enum {
80
+ CUDNN_WGRAD_MODE_ADD = 0, /* add partial gradients to wgrad output buffers */
81
+ CUDNN_WGRAD_MODE_SET = 1, /* write partial gradients to wgrad output buffers */
82
+ } cudnnWgradMode_t;
83
+
84
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
85
+ cudnnRNNForwardTraining(cudnnHandle_t handle,
86
+ const cudnnRNNDescriptor_t rnnDesc,
87
+ const int seqLength,
88
+ const cudnnTensorDescriptor_t *xDesc,
89
+ const void *x,
90
+ const cudnnTensorDescriptor_t hxDesc,
91
+ const void *hx,
92
+ const cudnnTensorDescriptor_t cxDesc,
93
+ const void *cx,
94
+ const cudnnFilterDescriptor_t wDesc,
95
+ const void *w,
96
+ const cudnnTensorDescriptor_t *yDesc,
97
+ void *y,
98
+ const cudnnTensorDescriptor_t hyDesc,
99
+ void *hy,
100
+ const cudnnTensorDescriptor_t cyDesc,
101
+ void *cy,
102
+ void *workSpace,
103
+ size_t workSpaceSizeInBytes,
104
+ void *reserveSpace,
105
+ size_t reserveSpaceSizeInBytes);
106
+
107
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
108
+ cudnnRNNBackwardData(cudnnHandle_t handle,
109
+ const cudnnRNNDescriptor_t rnnDesc,
110
+ const int seqLength,
111
+ const cudnnTensorDescriptor_t *yDesc,
112
+ const void *y,
113
+ const cudnnTensorDescriptor_t *dyDesc,
114
+ const void *dy,
115
+ const cudnnTensorDescriptor_t dhyDesc,
116
+ const void *dhy,
117
+ const cudnnTensorDescriptor_t dcyDesc,
118
+ const void *dcy,
119
+ const cudnnFilterDescriptor_t wDesc,
120
+ const void *w,
121
+ const cudnnTensorDescriptor_t hxDesc,
122
+ const void *hx,
123
+ const cudnnTensorDescriptor_t cxDesc,
124
+ const void *cx,
125
+ const cudnnTensorDescriptor_t *dxDesc,
126
+ void *dx,
127
+ const cudnnTensorDescriptor_t dhxDesc,
128
+ void *dhx,
129
+ const cudnnTensorDescriptor_t dcxDesc,
130
+ void *dcx,
131
+ void *workSpace,
132
+ size_t workSpaceSizeInBytes,
133
+ void *reserveSpace,
134
+ size_t reserveSpaceSizeInBytes);
135
+
136
+ cudnnStatus_t CUDNNWINAPI
137
+ cudnnRNNBackwardData_v8(cudnnHandle_t handle,
138
+ cudnnRNNDescriptor_t rnnDesc,
139
+ const int32_t devSeqLengths[],
140
+ cudnnRNNDataDescriptor_t yDesc,
141
+ const void *y,
142
+ const void *dy,
143
+ cudnnRNNDataDescriptor_t xDesc,
144
+ void *dx,
145
+ cudnnTensorDescriptor_t hDesc,
146
+ const void *hx,
147
+ const void *dhy,
148
+ void *dhx,
149
+ cudnnTensorDescriptor_t cDesc,
150
+ const void *cx,
151
+ const void *dcy,
152
+ void *dcx,
153
+ size_t weightSpaceSize,
154
+ const void *weightSpace,
155
+ size_t workSpaceSize,
156
+ void *workSpace,
157
+ size_t reserveSpaceSize,
158
+ void *reserveSpace);
159
+
160
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
161
+ cudnnRNNBackwardWeights(cudnnHandle_t handle,
162
+ const cudnnRNNDescriptor_t rnnDesc,
163
+ const int seqLength,
164
+ const cudnnTensorDescriptor_t *xDesc,
165
+ const void *x,
166
+ const cudnnTensorDescriptor_t hxDesc,
167
+ const void *hx,
168
+ const cudnnTensorDescriptor_t *yDesc,
169
+ const void *y,
170
+ const void *workSpace,
171
+ size_t workSpaceSizeInBytes,
172
+ const cudnnFilterDescriptor_t dwDesc,
173
+ void *dw,
174
+ const void *reserveSpace,
175
+ size_t reserveSpaceSizeInBytes);
176
+
177
+ cudnnStatus_t CUDNNWINAPI
178
+ cudnnRNNBackwardWeights_v8(cudnnHandle_t handle,
179
+ cudnnRNNDescriptor_t rnnDesc,
180
+ cudnnWgradMode_t addGrad,
181
+ const int32_t devSeqLengths[],
182
+ cudnnRNNDataDescriptor_t xDesc,
183
+ const void *x,
184
+ cudnnTensorDescriptor_t hDesc,
185
+ const void *hx,
186
+ cudnnRNNDataDescriptor_t yDesc,
187
+ const void *y,
188
+ size_t weightSpaceSize,
189
+ void *dweightSpace,
190
+ size_t workSpaceSize,
191
+ void *workSpace,
192
+ size_t reserveSpaceSize,
193
+ void *reserveSpace);
194
+
195
+ /* RNN EX API */
196
+
197
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
198
+ cudnnRNNForwardTrainingEx(cudnnHandle_t handle,
199
+ const cudnnRNNDescriptor_t rnnDesc,
200
+ const cudnnRNNDataDescriptor_t xDesc,
201
+ const void *x,
202
+ const cudnnTensorDescriptor_t hxDesc,
203
+ const void *hx,
204
+ const cudnnTensorDescriptor_t cxDesc,
205
+ const void *cx,
206
+ const cudnnFilterDescriptor_t wDesc,
207
+ const void *w,
208
+ const cudnnRNNDataDescriptor_t yDesc,
209
+ void *y,
210
+ const cudnnTensorDescriptor_t hyDesc,
211
+ void *hy,
212
+ const cudnnTensorDescriptor_t cyDesc,
213
+ void *cy,
214
+ const cudnnRNNDataDescriptor_t kDesc, /* reserved, should pass NULL */
215
+ const void *keys, /* reserved, should pass NULL */
216
+ const cudnnRNNDataDescriptor_t cDesc, /* reserved, should pass NULL */
217
+ void *cAttn, /* reserved, should pass NULL */
218
+ const cudnnRNNDataDescriptor_t iDesc, /* reserved, should pass NULL */
219
+ void *iAttn, /* reserved, should pass NULL */
220
+ const cudnnRNNDataDescriptor_t qDesc, /* reserved, should pass NULL */
221
+ void *queries, /* reserved, should pass NULL */
222
+ void *workSpace,
223
+ size_t workSpaceSizeInBytes,
224
+ void *reserveSpace,
225
+ size_t reserveSpaceSizeInBytes);
226
+
227
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
228
+ cudnnRNNBackwardDataEx(cudnnHandle_t handle,
229
+ const cudnnRNNDescriptor_t rnnDesc,
230
+ const cudnnRNNDataDescriptor_t yDesc,
231
+ const void *y,
232
+ const cudnnRNNDataDescriptor_t dyDesc,
233
+ const void *dy,
234
+ const cudnnRNNDataDescriptor_t dcDesc, /* reserved, should pass NULL */
235
+ const void *dcAttn, /* reserved, should pass NULL */
236
+ const cudnnTensorDescriptor_t dhyDesc,
237
+ const void *dhy,
238
+ const cudnnTensorDescriptor_t dcyDesc,
239
+ const void *dcy,
240
+ const cudnnFilterDescriptor_t wDesc,
241
+ const void *w,
242
+ const cudnnTensorDescriptor_t hxDesc,
243
+ const void *hx,
244
+ const cudnnTensorDescriptor_t cxDesc,
245
+ const void *cx,
246
+ const cudnnRNNDataDescriptor_t dxDesc,
247
+ void *dx,
248
+ const cudnnTensorDescriptor_t dhxDesc,
249
+ void *dhx,
250
+ const cudnnTensorDescriptor_t dcxDesc,
251
+ void *dcx,
252
+ const cudnnRNNDataDescriptor_t dkDesc, /* reserved, should pass NULL */
253
+ void *dkeys, /* reserved, should pass NULL */
254
+ void *workSpace,
255
+ size_t workSpaceSizeInBytes,
256
+ void *reserveSpace,
257
+ size_t reserveSpaceSizeInBytes);
258
+
259
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
260
+ cudnnRNNBackwardWeightsEx(cudnnHandle_t handle,
261
+ const cudnnRNNDescriptor_t rnnDesc,
262
+ const cudnnRNNDataDescriptor_t xDesc,
263
+ const void *x,
264
+ const cudnnTensorDescriptor_t hxDesc,
265
+ const void *hx,
266
+ const cudnnRNNDataDescriptor_t yDesc,
267
+ const void *y,
268
+ void *workSpace,
269
+ size_t workSpaceSizeInBytes,
270
+ const cudnnFilterDescriptor_t dwDesc,
271
+ void *dw,
272
+ void *reserveSpace,
273
+ size_t reserveSpaceSizeInBytes);
274
+
275
+ /* RNN FIND API */
276
+
277
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
278
+ cudnnGetRNNForwardTrainingAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
279
+
280
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
281
+ cudnnFindRNNForwardTrainingAlgorithmEx(cudnnHandle_t handle,
282
+ const cudnnRNNDescriptor_t rnnDesc,
283
+ const int seqLength,
284
+ const cudnnTensorDescriptor_t *xDesc,
285
+ const void *x,
286
+ const cudnnTensorDescriptor_t hxDesc,
287
+ const void *hx,
288
+ const cudnnTensorDescriptor_t cxDesc,
289
+ const void *cx,
290
+ const cudnnFilterDescriptor_t wDesc,
291
+ const void *w,
292
+ const cudnnTensorDescriptor_t *yDesc,
293
+ void *y,
294
+ const cudnnTensorDescriptor_t hyDesc,
295
+ void *hy,
296
+ const cudnnTensorDescriptor_t cyDesc,
297
+ void *cy,
298
+ const float findIntensity,
299
+ const int requestedAlgoCount,
300
+ int *returnedAlgoCount,
301
+ cudnnAlgorithmPerformance_t *perfResults,
302
+ void *workspace,
303
+ size_t workSpaceSizeInBytes,
304
+ void *reserveSpace,
305
+ size_t reserveSpaceSizeInBytes);
306
+
307
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
308
+ cudnnGetRNNBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
309
+
310
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
311
+ cudnnFindRNNBackwardDataAlgorithmEx(cudnnHandle_t handle,
312
+ const cudnnRNNDescriptor_t rnnDesc,
313
+ const int seqLength,
314
+ const cudnnTensorDescriptor_t *yDesc,
315
+ const void *y,
316
+ const cudnnTensorDescriptor_t *dyDesc,
317
+ const void *dy,
318
+ const cudnnTensorDescriptor_t dhyDesc,
319
+ const void *dhy,
320
+ const cudnnTensorDescriptor_t dcyDesc,
321
+ const void *dcy,
322
+ const cudnnFilterDescriptor_t wDesc,
323
+ const void *w,
324
+ const cudnnTensorDescriptor_t hxDesc,
325
+ const void *hx,
326
+ const cudnnTensorDescriptor_t cxDesc,
327
+ const void *cx,
328
+ const cudnnTensorDescriptor_t *dxDesc,
329
+ void *dx,
330
+ const cudnnTensorDescriptor_t dhxDesc,
331
+ void *dhx,
332
+ const cudnnTensorDescriptor_t dcxDesc,
333
+ void *dcx,
334
+ const float findIntensity,
335
+ const int requestedAlgoCount,
336
+ int *returnedAlgoCount,
337
+ cudnnAlgorithmPerformance_t *perfResults,
338
+ void *workspace,
339
+ size_t workSpaceSizeInBytes,
340
+ void *reserveSpace,
341
+ size_t reserveSpaceSizeInBytes);
342
+
343
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
344
+ cudnnGetRNNBackwardWeightsAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
345
+
346
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
347
+ cudnnFindRNNBackwardWeightsAlgorithmEx(cudnnHandle_t handle,
348
+ const cudnnRNNDescriptor_t rnnDesc,
349
+ const int seqLength,
350
+ const cudnnTensorDescriptor_t *xDesc,
351
+ const void *x,
352
+ const cudnnTensorDescriptor_t hxDesc,
353
+ const void *hx,
354
+ const cudnnTensorDescriptor_t *yDesc,
355
+ const void *y,
356
+ const float findIntensity,
357
+ const int requestedAlgoCount,
358
+ int *returnedAlgoCount,
359
+ cudnnAlgorithmPerformance_t *perfResults,
360
+ const void *workspace,
361
+ size_t workSpaceSizeInBytes,
362
+ const cudnnFilterDescriptor_t dwDesc,
363
+ void *dw,
364
+ const void *reserveSpace,
365
+ size_t reserveSpaceSizeInBytes);
366
+
367
+ cudnnStatus_t CUDNNWINAPI
368
+ cudnnMultiHeadAttnBackwardData(cudnnHandle_t handle,
369
+ const cudnnAttnDescriptor_t attnDesc,
370
+ const int loWinIdx[],
371
+ const int hiWinIdx[],
372
+ const int devSeqLengthsDQDO[],
373
+ const int devSeqLengthsDKDV[],
374
+ const cudnnSeqDataDescriptor_t doDesc,
375
+ const void *dout,
376
+ const cudnnSeqDataDescriptor_t dqDesc,
377
+ void *dqueries,
378
+ const void *queries,
379
+ const cudnnSeqDataDescriptor_t dkDesc,
380
+ void *dkeys,
381
+ const void *keys,
382
+ const cudnnSeqDataDescriptor_t dvDesc,
383
+ void *dvalues,
384
+ const void *values,
385
+ size_t weightSizeInBytes,
386
+ const void *weights,
387
+ size_t workSpaceSizeInBytes,
388
+ void *workSpace,
389
+ size_t reserveSpaceSizeInBytes,
390
+ void *reserveSpace);
391
+
392
+ cudnnStatus_t CUDNNWINAPI
393
+ cudnnMultiHeadAttnBackwardWeights(cudnnHandle_t handle,
394
+ const cudnnAttnDescriptor_t attnDesc,
395
+ cudnnWgradMode_t addGrad,
396
+ const cudnnSeqDataDescriptor_t qDesc,
397
+ const void *queries,
398
+ const cudnnSeqDataDescriptor_t kDesc,
399
+ const void *keys,
400
+ const cudnnSeqDataDescriptor_t vDesc,
401
+ const void *values,
402
+ const cudnnSeqDataDescriptor_t doDesc,
403
+ const void *dout,
404
+ size_t weightSizeInBytes,
405
+ const void *weights,
406
+ void *dweights,
407
+ size_t workSpaceSizeInBytes,
408
+ void *workSpace,
409
+ size_t reserveSpaceSizeInBytes,
410
+ void *reserveSpace);
411
+
412
+ /*
413
+ * CTC (Connectionist Temporal Classification) loss descriptor create/destory/set/get functions
414
+ */
415
+ /* Input normalization mode for loss function */
416
+ typedef enum {
417
+ CUDNN_LOSS_NORMALIZATION_NONE = 0,
418
+ CUDNN_LOSS_NORMALIZATION_SOFTMAX = 1,
419
+ } cudnnLossNormalizationMode_t;
420
+
421
+ cudnnStatus_t CUDNNWINAPI
422
+ cudnnCreateCTCLossDescriptor(cudnnCTCLossDescriptor_t *ctcLossDesc);
423
+
424
+ cudnnStatus_t CUDNNWINAPI
425
+ cudnnSetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t compType);
426
+
427
+ cudnnStatus_t CUDNNWINAPI
428
+ cudnnSetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc,
429
+ cudnnDataType_t compType,
430
+ cudnnLossNormalizationMode_t normMode,
431
+ cudnnNanPropagation_t gradMode);
432
+
433
+ cudnnStatus_t CUDNNWINAPI
434
+ cudnnSetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc,
435
+ cudnnDataType_t compType,
436
+ cudnnLossNormalizationMode_t normMode,
437
+ cudnnNanPropagation_t gradMode,
438
+ int maxLabelLength);
439
+
440
+ cudnnStatus_t CUDNNWINAPI
441
+ cudnnGetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t *compType);
442
+
443
+ cudnnStatus_t CUDNNWINAPI
444
+ cudnnGetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc,
445
+ cudnnDataType_t *compType,
446
+ cudnnLossNormalizationMode_t *normMode,
447
+ cudnnNanPropagation_t *gradMode);
448
+
449
+ cudnnStatus_t CUDNNWINAPI
450
+ cudnnGetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc,
451
+ cudnnDataType_t *compType,
452
+ cudnnLossNormalizationMode_t *normMode,
453
+ cudnnNanPropagation_t *gradMode,
454
+ int *maxLabelLength);
455
+
456
+ cudnnStatus_t CUDNNWINAPI
457
+ cudnnDestroyCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc);
458
+
459
+ /* return the ctc costs and gradients, given the probabilities and labels */
460
+ cudnnStatus_t CUDNNWINAPI
461
+ cudnnCTCLoss(
462
+ cudnnHandle_t handle,
463
+ const cudnnTensorDescriptor_t
464
+ probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the
465
+ mini batch size, A is the alphabet size) */
466
+ const void *probs, /* probabilities after softmax, in GPU memory */
467
+ const int hostLabels[], /* labels, in CPU memory */
468
+ const int hostLabelLengths[], /* the length of each label, in CPU memory */
469
+ const int hostInputLengths[], /* the lengths of timing steps in each batch, in CPU memory */
470
+ void *costs, /* the returned costs of CTC, in GPU memory */
471
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */
472
+ void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */
473
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
474
+ cudnnCTCLossDescriptor_t ctcLossDesc,
475
+ void *workspace, /* pointer to the workspace, in GPU memory */
476
+ size_t workSpaceSizeInBytes); /* size of the workspace */
477
+
478
+ /* return the ctc costs and gradients, given the probabilities and labels */
479
+ cudnnStatus_t CUDNNWINAPI
480
+ cudnnCTCLoss_v8(
481
+ cudnnHandle_t handle,
482
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
483
+ cudnnCTCLossDescriptor_t ctcLossDesc,
484
+ const cudnnTensorDescriptor_t
485
+ probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the
486
+ mini batch size, A is the alphabet size) */
487
+ const void *probs, /* probabilities after softmax, in GPU memory */
488
+ const int labels[], /* labels, in GPU memory */
489
+ const int labelLengths[], /* the length of each label, in GPU memory */
490
+ const int inputLengths[], /* the lengths of timing steps in each batch, in GPU memory */
491
+ void *costs, /* the returned costs of CTC, in GPU memory */
492
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */
493
+ void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */
494
+ size_t workSpaceSizeInBytes, /* size of the workspace */
495
+ void *workspace); /* pointer to the workspace, in GPU memory */
496
+
497
+ /* return the workspace size needed for ctc */
498
+ cudnnStatus_t CUDNNWINAPI
499
+ cudnnGetCTCLossWorkspaceSize(
500
+ cudnnHandle_t handle,
501
+ const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the
502
+ timing steps, N is the mini batch size, A is the alphabet size) */
503
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the
504
+ dimensions are T,N,A. To compute costs
505
+ only, set it to NULL */
506
+ const int *labels, /* labels, in CPU memory */
507
+ const int *labelLengths, /* the length of each label, in CPU memory */
508
+ const int *inputLengths, /* the lengths of timing steps in each batch, in CPU memory */
509
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
510
+ cudnnCTCLossDescriptor_t ctcLossDesc,
511
+ size_t *sizeInBytes); /* pointer to the returned workspace size */
512
+
513
+ /* return the workspace size needed for ctc */
514
+ cudnnStatus_t CUDNNWINAPI
515
+ cudnnGetCTCLossWorkspaceSize_v8(
516
+ cudnnHandle_t handle,
517
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
518
+ cudnnCTCLossDescriptor_t ctcLossDesc,
519
+ const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the
520
+ timing steps, N is the mini batch size, A is the alphabet size) */
521
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the
522
+ dimensions are T,N,A. To compute costs
523
+ only, set it to NULL */
524
+ size_t *sizeInBytes); /* pointer to the returned workspace size */
525
+
526
+ /*
527
+ * \brief Cross-library version checker.
528
+ * This function is implemented differently in each sub-library. Each sublib
529
+ * checks whether its own version matches that of its dependencies.
530
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
531
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
532
+ */
533
+ cudnnStatus_t CUDNNWINAPI
534
+ cudnnAdvTrainVersionCheck(void);
535
+
536
+ #if defined(__cplusplus)
537
+ }
538
+ #endif
539
+
540
+ #endif /* CUDNN_ADV_TRAIN_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train_v8.h ADDED
@@ -0,0 +1,540 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cudnn_adv_train : cuDNN's advanced and experimental features.
51
+
52
+ */
53
+
54
+ #if !defined(CUDNN_ADV_TRAIN_H_)
55
+ #define CUDNN_ADV_TRAIN_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+ #include "cudnn_ops_train.h"
63
+ #include "cudnn_adv_infer.h"
64
+
65
+ /* These version numbers are autogenerated, do not edit manually. */
66
+ #define CUDNN_ADV_TRAIN_MAJOR 8
67
+ #define CUDNN_ADV_TRAIN_MINOR 9
68
+ #define CUDNN_ADV_TRAIN_PATCH 2
69
+
70
+ #if (CUDNN_ADV_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_TRAIN_MINOR != CUDNN_MINOR) || \
71
+ (CUDNN_ADV_TRAIN_PATCH != CUDNN_PATCHLEVEL)
72
+ #error Version mismatch in cuDNN ADV TRAIN!!!
73
+ #endif
74
+
75
+ #if defined(__cplusplus)
76
+ extern "C" {
77
+ #endif
78
+
79
+ typedef enum {
80
+ CUDNN_WGRAD_MODE_ADD = 0, /* add partial gradients to wgrad output buffers */
81
+ CUDNN_WGRAD_MODE_SET = 1, /* write partial gradients to wgrad output buffers */
82
+ } cudnnWgradMode_t;
83
+
84
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
85
+ cudnnRNNForwardTraining(cudnnHandle_t handle,
86
+ const cudnnRNNDescriptor_t rnnDesc,
87
+ const int seqLength,
88
+ const cudnnTensorDescriptor_t *xDesc,
89
+ const void *x,
90
+ const cudnnTensorDescriptor_t hxDesc,
91
+ const void *hx,
92
+ const cudnnTensorDescriptor_t cxDesc,
93
+ const void *cx,
94
+ const cudnnFilterDescriptor_t wDesc,
95
+ const void *w,
96
+ const cudnnTensorDescriptor_t *yDesc,
97
+ void *y,
98
+ const cudnnTensorDescriptor_t hyDesc,
99
+ void *hy,
100
+ const cudnnTensorDescriptor_t cyDesc,
101
+ void *cy,
102
+ void *workSpace,
103
+ size_t workSpaceSizeInBytes,
104
+ void *reserveSpace,
105
+ size_t reserveSpaceSizeInBytes);
106
+
107
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
108
+ cudnnRNNBackwardData(cudnnHandle_t handle,
109
+ const cudnnRNNDescriptor_t rnnDesc,
110
+ const int seqLength,
111
+ const cudnnTensorDescriptor_t *yDesc,
112
+ const void *y,
113
+ const cudnnTensorDescriptor_t *dyDesc,
114
+ const void *dy,
115
+ const cudnnTensorDescriptor_t dhyDesc,
116
+ const void *dhy,
117
+ const cudnnTensorDescriptor_t dcyDesc,
118
+ const void *dcy,
119
+ const cudnnFilterDescriptor_t wDesc,
120
+ const void *w,
121
+ const cudnnTensorDescriptor_t hxDesc,
122
+ const void *hx,
123
+ const cudnnTensorDescriptor_t cxDesc,
124
+ const void *cx,
125
+ const cudnnTensorDescriptor_t *dxDesc,
126
+ void *dx,
127
+ const cudnnTensorDescriptor_t dhxDesc,
128
+ void *dhx,
129
+ const cudnnTensorDescriptor_t dcxDesc,
130
+ void *dcx,
131
+ void *workSpace,
132
+ size_t workSpaceSizeInBytes,
133
+ void *reserveSpace,
134
+ size_t reserveSpaceSizeInBytes);
135
+
136
+ cudnnStatus_t CUDNNWINAPI
137
+ cudnnRNNBackwardData_v8(cudnnHandle_t handle,
138
+ cudnnRNNDescriptor_t rnnDesc,
139
+ const int32_t devSeqLengths[],
140
+ cudnnRNNDataDescriptor_t yDesc,
141
+ const void *y,
142
+ const void *dy,
143
+ cudnnRNNDataDescriptor_t xDesc,
144
+ void *dx,
145
+ cudnnTensorDescriptor_t hDesc,
146
+ const void *hx,
147
+ const void *dhy,
148
+ void *dhx,
149
+ cudnnTensorDescriptor_t cDesc,
150
+ const void *cx,
151
+ const void *dcy,
152
+ void *dcx,
153
+ size_t weightSpaceSize,
154
+ const void *weightSpace,
155
+ size_t workSpaceSize,
156
+ void *workSpace,
157
+ size_t reserveSpaceSize,
158
+ void *reserveSpace);
159
+
160
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
161
+ cudnnRNNBackwardWeights(cudnnHandle_t handle,
162
+ const cudnnRNNDescriptor_t rnnDesc,
163
+ const int seqLength,
164
+ const cudnnTensorDescriptor_t *xDesc,
165
+ const void *x,
166
+ const cudnnTensorDescriptor_t hxDesc,
167
+ const void *hx,
168
+ const cudnnTensorDescriptor_t *yDesc,
169
+ const void *y,
170
+ const void *workSpace,
171
+ size_t workSpaceSizeInBytes,
172
+ const cudnnFilterDescriptor_t dwDesc,
173
+ void *dw,
174
+ const void *reserveSpace,
175
+ size_t reserveSpaceSizeInBytes);
176
+
177
+ cudnnStatus_t CUDNNWINAPI
178
+ cudnnRNNBackwardWeights_v8(cudnnHandle_t handle,
179
+ cudnnRNNDescriptor_t rnnDesc,
180
+ cudnnWgradMode_t addGrad,
181
+ const int32_t devSeqLengths[],
182
+ cudnnRNNDataDescriptor_t xDesc,
183
+ const void *x,
184
+ cudnnTensorDescriptor_t hDesc,
185
+ const void *hx,
186
+ cudnnRNNDataDescriptor_t yDesc,
187
+ const void *y,
188
+ size_t weightSpaceSize,
189
+ void *dweightSpace,
190
+ size_t workSpaceSize,
191
+ void *workSpace,
192
+ size_t reserveSpaceSize,
193
+ void *reserveSpace);
194
+
195
+ /* RNN EX API */
196
+
197
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
198
+ cudnnRNNForwardTrainingEx(cudnnHandle_t handle,
199
+ const cudnnRNNDescriptor_t rnnDesc,
200
+ const cudnnRNNDataDescriptor_t xDesc,
201
+ const void *x,
202
+ const cudnnTensorDescriptor_t hxDesc,
203
+ const void *hx,
204
+ const cudnnTensorDescriptor_t cxDesc,
205
+ const void *cx,
206
+ const cudnnFilterDescriptor_t wDesc,
207
+ const void *w,
208
+ const cudnnRNNDataDescriptor_t yDesc,
209
+ void *y,
210
+ const cudnnTensorDescriptor_t hyDesc,
211
+ void *hy,
212
+ const cudnnTensorDescriptor_t cyDesc,
213
+ void *cy,
214
+ const cudnnRNNDataDescriptor_t kDesc, /* reserved, should pass NULL */
215
+ const void *keys, /* reserved, should pass NULL */
216
+ const cudnnRNNDataDescriptor_t cDesc, /* reserved, should pass NULL */
217
+ void *cAttn, /* reserved, should pass NULL */
218
+ const cudnnRNNDataDescriptor_t iDesc, /* reserved, should pass NULL */
219
+ void *iAttn, /* reserved, should pass NULL */
220
+ const cudnnRNNDataDescriptor_t qDesc, /* reserved, should pass NULL */
221
+ void *queries, /* reserved, should pass NULL */
222
+ void *workSpace,
223
+ size_t workSpaceSizeInBytes,
224
+ void *reserveSpace,
225
+ size_t reserveSpaceSizeInBytes);
226
+
227
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
228
+ cudnnRNNBackwardDataEx(cudnnHandle_t handle,
229
+ const cudnnRNNDescriptor_t rnnDesc,
230
+ const cudnnRNNDataDescriptor_t yDesc,
231
+ const void *y,
232
+ const cudnnRNNDataDescriptor_t dyDesc,
233
+ const void *dy,
234
+ const cudnnRNNDataDescriptor_t dcDesc, /* reserved, should pass NULL */
235
+ const void *dcAttn, /* reserved, should pass NULL */
236
+ const cudnnTensorDescriptor_t dhyDesc,
237
+ const void *dhy,
238
+ const cudnnTensorDescriptor_t dcyDesc,
239
+ const void *dcy,
240
+ const cudnnFilterDescriptor_t wDesc,
241
+ const void *w,
242
+ const cudnnTensorDescriptor_t hxDesc,
243
+ const void *hx,
244
+ const cudnnTensorDescriptor_t cxDesc,
245
+ const void *cx,
246
+ const cudnnRNNDataDescriptor_t dxDesc,
247
+ void *dx,
248
+ const cudnnTensorDescriptor_t dhxDesc,
249
+ void *dhx,
250
+ const cudnnTensorDescriptor_t dcxDesc,
251
+ void *dcx,
252
+ const cudnnRNNDataDescriptor_t dkDesc, /* reserved, should pass NULL */
253
+ void *dkeys, /* reserved, should pass NULL */
254
+ void *workSpace,
255
+ size_t workSpaceSizeInBytes,
256
+ void *reserveSpace,
257
+ size_t reserveSpaceSizeInBytes);
258
+
259
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
260
+ cudnnRNNBackwardWeightsEx(cudnnHandle_t handle,
261
+ const cudnnRNNDescriptor_t rnnDesc,
262
+ const cudnnRNNDataDescriptor_t xDesc,
263
+ const void *x,
264
+ const cudnnTensorDescriptor_t hxDesc,
265
+ const void *hx,
266
+ const cudnnRNNDataDescriptor_t yDesc,
267
+ const void *y,
268
+ void *workSpace,
269
+ size_t workSpaceSizeInBytes,
270
+ const cudnnFilterDescriptor_t dwDesc,
271
+ void *dw,
272
+ void *reserveSpace,
273
+ size_t reserveSpaceSizeInBytes);
274
+
275
+ /* RNN FIND API */
276
+
277
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
278
+ cudnnGetRNNForwardTrainingAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
279
+
280
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
281
+ cudnnFindRNNForwardTrainingAlgorithmEx(cudnnHandle_t handle,
282
+ const cudnnRNNDescriptor_t rnnDesc,
283
+ const int seqLength,
284
+ const cudnnTensorDescriptor_t *xDesc,
285
+ const void *x,
286
+ const cudnnTensorDescriptor_t hxDesc,
287
+ const void *hx,
288
+ const cudnnTensorDescriptor_t cxDesc,
289
+ const void *cx,
290
+ const cudnnFilterDescriptor_t wDesc,
291
+ const void *w,
292
+ const cudnnTensorDescriptor_t *yDesc,
293
+ void *y,
294
+ const cudnnTensorDescriptor_t hyDesc,
295
+ void *hy,
296
+ const cudnnTensorDescriptor_t cyDesc,
297
+ void *cy,
298
+ const float findIntensity,
299
+ const int requestedAlgoCount,
300
+ int *returnedAlgoCount,
301
+ cudnnAlgorithmPerformance_t *perfResults,
302
+ void *workspace,
303
+ size_t workSpaceSizeInBytes,
304
+ void *reserveSpace,
305
+ size_t reserveSpaceSizeInBytes);
306
+
307
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
308
+ cudnnGetRNNBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
309
+
310
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
311
+ cudnnFindRNNBackwardDataAlgorithmEx(cudnnHandle_t handle,
312
+ const cudnnRNNDescriptor_t rnnDesc,
313
+ const int seqLength,
314
+ const cudnnTensorDescriptor_t *yDesc,
315
+ const void *y,
316
+ const cudnnTensorDescriptor_t *dyDesc,
317
+ const void *dy,
318
+ const cudnnTensorDescriptor_t dhyDesc,
319
+ const void *dhy,
320
+ const cudnnTensorDescriptor_t dcyDesc,
321
+ const void *dcy,
322
+ const cudnnFilterDescriptor_t wDesc,
323
+ const void *w,
324
+ const cudnnTensorDescriptor_t hxDesc,
325
+ const void *hx,
326
+ const cudnnTensorDescriptor_t cxDesc,
327
+ const void *cx,
328
+ const cudnnTensorDescriptor_t *dxDesc,
329
+ void *dx,
330
+ const cudnnTensorDescriptor_t dhxDesc,
331
+ void *dhx,
332
+ const cudnnTensorDescriptor_t dcxDesc,
333
+ void *dcx,
334
+ const float findIntensity,
335
+ const int requestedAlgoCount,
336
+ int *returnedAlgoCount,
337
+ cudnnAlgorithmPerformance_t *perfResults,
338
+ void *workspace,
339
+ size_t workSpaceSizeInBytes,
340
+ void *reserveSpace,
341
+ size_t reserveSpaceSizeInBytes);
342
+
343
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
344
+ cudnnGetRNNBackwardWeightsAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
345
+
346
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
347
+ cudnnFindRNNBackwardWeightsAlgorithmEx(cudnnHandle_t handle,
348
+ const cudnnRNNDescriptor_t rnnDesc,
349
+ const int seqLength,
350
+ const cudnnTensorDescriptor_t *xDesc,
351
+ const void *x,
352
+ const cudnnTensorDescriptor_t hxDesc,
353
+ const void *hx,
354
+ const cudnnTensorDescriptor_t *yDesc,
355
+ const void *y,
356
+ const float findIntensity,
357
+ const int requestedAlgoCount,
358
+ int *returnedAlgoCount,
359
+ cudnnAlgorithmPerformance_t *perfResults,
360
+ const void *workspace,
361
+ size_t workSpaceSizeInBytes,
362
+ const cudnnFilterDescriptor_t dwDesc,
363
+ void *dw,
364
+ const void *reserveSpace,
365
+ size_t reserveSpaceSizeInBytes);
366
+
367
+ cudnnStatus_t CUDNNWINAPI
368
+ cudnnMultiHeadAttnBackwardData(cudnnHandle_t handle,
369
+ const cudnnAttnDescriptor_t attnDesc,
370
+ const int loWinIdx[],
371
+ const int hiWinIdx[],
372
+ const int devSeqLengthsDQDO[],
373
+ const int devSeqLengthsDKDV[],
374
+ const cudnnSeqDataDescriptor_t doDesc,
375
+ const void *dout,
376
+ const cudnnSeqDataDescriptor_t dqDesc,
377
+ void *dqueries,
378
+ const void *queries,
379
+ const cudnnSeqDataDescriptor_t dkDesc,
380
+ void *dkeys,
381
+ const void *keys,
382
+ const cudnnSeqDataDescriptor_t dvDesc,
383
+ void *dvalues,
384
+ const void *values,
385
+ size_t weightSizeInBytes,
386
+ const void *weights,
387
+ size_t workSpaceSizeInBytes,
388
+ void *workSpace,
389
+ size_t reserveSpaceSizeInBytes,
390
+ void *reserveSpace);
391
+
392
+ cudnnStatus_t CUDNNWINAPI
393
+ cudnnMultiHeadAttnBackwardWeights(cudnnHandle_t handle,
394
+ const cudnnAttnDescriptor_t attnDesc,
395
+ cudnnWgradMode_t addGrad,
396
+ const cudnnSeqDataDescriptor_t qDesc,
397
+ const void *queries,
398
+ const cudnnSeqDataDescriptor_t kDesc,
399
+ const void *keys,
400
+ const cudnnSeqDataDescriptor_t vDesc,
401
+ const void *values,
402
+ const cudnnSeqDataDescriptor_t doDesc,
403
+ const void *dout,
404
+ size_t weightSizeInBytes,
405
+ const void *weights,
406
+ void *dweights,
407
+ size_t workSpaceSizeInBytes,
408
+ void *workSpace,
409
+ size_t reserveSpaceSizeInBytes,
410
+ void *reserveSpace);
411
+
412
+ /*
413
+ * CTC (Connectionist Temporal Classification) loss descriptor create/destory/set/get functions
414
+ */
415
+ /* Input normalization mode for loss function */
416
+ typedef enum {
417
+ CUDNN_LOSS_NORMALIZATION_NONE = 0,
418
+ CUDNN_LOSS_NORMALIZATION_SOFTMAX = 1,
419
+ } cudnnLossNormalizationMode_t;
420
+
421
+ cudnnStatus_t CUDNNWINAPI
422
+ cudnnCreateCTCLossDescriptor(cudnnCTCLossDescriptor_t *ctcLossDesc);
423
+
424
+ cudnnStatus_t CUDNNWINAPI
425
+ cudnnSetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t compType);
426
+
427
+ cudnnStatus_t CUDNNWINAPI
428
+ cudnnSetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc,
429
+ cudnnDataType_t compType,
430
+ cudnnLossNormalizationMode_t normMode,
431
+ cudnnNanPropagation_t gradMode);
432
+
433
+ cudnnStatus_t CUDNNWINAPI
434
+ cudnnSetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc,
435
+ cudnnDataType_t compType,
436
+ cudnnLossNormalizationMode_t normMode,
437
+ cudnnNanPropagation_t gradMode,
438
+ int maxLabelLength);
439
+
440
+ cudnnStatus_t CUDNNWINAPI
441
+ cudnnGetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t *compType);
442
+
443
+ cudnnStatus_t CUDNNWINAPI
444
+ cudnnGetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc,
445
+ cudnnDataType_t *compType,
446
+ cudnnLossNormalizationMode_t *normMode,
447
+ cudnnNanPropagation_t *gradMode);
448
+
449
+ cudnnStatus_t CUDNNWINAPI
450
+ cudnnGetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc,
451
+ cudnnDataType_t *compType,
452
+ cudnnLossNormalizationMode_t *normMode,
453
+ cudnnNanPropagation_t *gradMode,
454
+ int *maxLabelLength);
455
+
456
+ cudnnStatus_t CUDNNWINAPI
457
+ cudnnDestroyCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc);
458
+
459
+ /* return the ctc costs and gradients, given the probabilities and labels */
460
+ cudnnStatus_t CUDNNWINAPI
461
+ cudnnCTCLoss(
462
+ cudnnHandle_t handle,
463
+ const cudnnTensorDescriptor_t
464
+ probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the
465
+ mini batch size, A is the alphabet size) */
466
+ const void *probs, /* probabilities after softmax, in GPU memory */
467
+ const int hostLabels[], /* labels, in CPU memory */
468
+ const int hostLabelLengths[], /* the length of each label, in CPU memory */
469
+ const int hostInputLengths[], /* the lengths of timing steps in each batch, in CPU memory */
470
+ void *costs, /* the returned costs of CTC, in GPU memory */
471
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */
472
+ void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */
473
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
474
+ cudnnCTCLossDescriptor_t ctcLossDesc,
475
+ void *workspace, /* pointer to the workspace, in GPU memory */
476
+ size_t workSpaceSizeInBytes); /* size of the workspace */
477
+
478
+ /* return the ctc costs and gradients, given the probabilities and labels */
479
+ cudnnStatus_t CUDNNWINAPI
480
+ cudnnCTCLoss_v8(
481
+ cudnnHandle_t handle,
482
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
483
+ cudnnCTCLossDescriptor_t ctcLossDesc,
484
+ const cudnnTensorDescriptor_t
485
+ probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the
486
+ mini batch size, A is the alphabet size) */
487
+ const void *probs, /* probabilities after softmax, in GPU memory */
488
+ const int labels[], /* labels, in GPU memory */
489
+ const int labelLengths[], /* the length of each label, in GPU memory */
490
+ const int inputLengths[], /* the lengths of timing steps in each batch, in GPU memory */
491
+ void *costs, /* the returned costs of CTC, in GPU memory */
492
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */
493
+ void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */
494
+ size_t workSpaceSizeInBytes, /* size of the workspace */
495
+ void *workspace); /* pointer to the workspace, in GPU memory */
496
+
497
+ /* return the workspace size needed for ctc */
498
+ cudnnStatus_t CUDNNWINAPI
499
+ cudnnGetCTCLossWorkspaceSize(
500
+ cudnnHandle_t handle,
501
+ const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the
502
+ timing steps, N is the mini batch size, A is the alphabet size) */
503
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the
504
+ dimensions are T,N,A. To compute costs
505
+ only, set it to NULL */
506
+ const int *labels, /* labels, in CPU memory */
507
+ const int *labelLengths, /* the length of each label, in CPU memory */
508
+ const int *inputLengths, /* the lengths of timing steps in each batch, in CPU memory */
509
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
510
+ cudnnCTCLossDescriptor_t ctcLossDesc,
511
+ size_t *sizeInBytes); /* pointer to the returned workspace size */
512
+
513
+ /* return the workspace size needed for ctc */
514
+ cudnnStatus_t CUDNNWINAPI
515
+ cudnnGetCTCLossWorkspaceSize_v8(
516
+ cudnnHandle_t handle,
517
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
518
+ cudnnCTCLossDescriptor_t ctcLossDesc,
519
+ const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the
520
+ timing steps, N is the mini batch size, A is the alphabet size) */
521
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the
522
+ dimensions are T,N,A. To compute costs
523
+ only, set it to NULL */
524
+ size_t *sizeInBytes); /* pointer to the returned workspace size */
525
+
526
+ /*
527
+ * \brief Cross-library version checker.
528
+ * This function is implemented differently in each sub-library. Each sublib
529
+ * checks whether its own version matches that of its dependencies.
530
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
531
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
532
+ */
533
+ cudnnStatus_t CUDNNWINAPI
534
+ cudnnAdvTrainVersionCheck(void);
535
+
536
+ #if defined(__cplusplus)
537
+ }
538
+ #endif
539
+
540
+ #endif /* CUDNN_ADV_TRAIN_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend.h ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDNN_BACKEND_H_
51
+ #define _CUDNN_BACKEND_H_
52
+
53
+ /*
54
+ * The content in this header file is under development to be included in cudnn.h in the future
55
+ * Production code should have all include of this header file remove.
56
+ */
57
+
58
+ #include "cudnn_ops_infer.h"
59
+ #include "cudnn_cnn_infer.h"
60
+
61
+ /* NOTE: definition in extern "C" to be copied later to public header */
62
+ #if defined(__cplusplus)
63
+ extern "C" {
64
+ #endif
65
+
66
+ typedef void *cudnnBackendDescriptor_t;
67
+
68
+ typedef struct cudnnFractionStruct {
69
+ int64_t numerator;
70
+ int64_t denominator;
71
+ } cudnnFraction_t;
72
+
73
+ typedef enum {
74
+ CUDNN_POINTWISE_ADD = 0,
75
+ CUDNN_POINTWISE_ADD_SQUARE = 5,
76
+ CUDNN_POINTWISE_DIV = 6,
77
+ CUDNN_POINTWISE_MAX = 3,
78
+ CUDNN_POINTWISE_MIN = 2,
79
+ CUDNN_POINTWISE_MOD = 7,
80
+ CUDNN_POINTWISE_MUL = 1,
81
+ CUDNN_POINTWISE_POW = 8,
82
+ CUDNN_POINTWISE_SUB = 9,
83
+
84
+ CUDNN_POINTWISE_ABS = 10,
85
+ CUDNN_POINTWISE_CEIL = 11,
86
+ CUDNN_POINTWISE_COS = 12,
87
+ CUDNN_POINTWISE_EXP = 13,
88
+ CUDNN_POINTWISE_FLOOR = 14,
89
+ CUDNN_POINTWISE_LOG = 15,
90
+ CUDNN_POINTWISE_NEG = 16,
91
+ CUDNN_POINTWISE_RSQRT = 17,
92
+ CUDNN_POINTWISE_SIN = 18,
93
+ CUDNN_POINTWISE_SQRT = 4,
94
+ CUDNN_POINTWISE_TAN = 19,
95
+ CUDNN_POINTWISE_ERF = 20,
96
+ CUDNN_POINTWISE_IDENTITY = 21,
97
+ CUDNN_POINTWISE_RECIPROCAL = 22,
98
+
99
+ CUDNN_POINTWISE_RELU_FWD = 100,
100
+ CUDNN_POINTWISE_TANH_FWD = 101,
101
+ CUDNN_POINTWISE_SIGMOID_FWD = 102,
102
+ CUDNN_POINTWISE_ELU_FWD = 103,
103
+ CUDNN_POINTWISE_GELU_FWD = 104,
104
+ CUDNN_POINTWISE_SOFTPLUS_FWD = 105,
105
+ CUDNN_POINTWISE_SWISH_FWD = 106,
106
+ CUDNN_POINTWISE_GELU_APPROX_TANH_FWD = 107,
107
+
108
+ CUDNN_POINTWISE_RELU_BWD = 200,
109
+ CUDNN_POINTWISE_TANH_BWD = 201,
110
+ CUDNN_POINTWISE_SIGMOID_BWD = 202,
111
+ CUDNN_POINTWISE_ELU_BWD = 203,
112
+ CUDNN_POINTWISE_GELU_BWD = 204,
113
+ CUDNN_POINTWISE_SOFTPLUS_BWD = 205,
114
+ CUDNN_POINTWISE_SWISH_BWD = 206,
115
+ CUDNN_POINTWISE_GELU_APPROX_TANH_BWD = 207,
116
+
117
+ CUDNN_POINTWISE_CMP_EQ = 300,
118
+ CUDNN_POINTWISE_CMP_NEQ = 301,
119
+ CUDNN_POINTWISE_CMP_GT = 302,
120
+ CUDNN_POINTWISE_CMP_GE = 303,
121
+ CUDNN_POINTWISE_CMP_LT = 304,
122
+ CUDNN_POINTWISE_CMP_LE = 305,
123
+
124
+ CUDNN_POINTWISE_LOGICAL_AND = 400,
125
+ CUDNN_POINTWISE_LOGICAL_OR = 401,
126
+ CUDNN_POINTWISE_LOGICAL_NOT = 402,
127
+
128
+ CUDNN_POINTWISE_GEN_INDEX = 501,
129
+
130
+ CUDNN_POINTWISE_BINARY_SELECT = 601,
131
+ } cudnnPointwiseMode_t;
132
+
133
+ typedef enum {
134
+ CUDNN_RESAMPLE_NEAREST = 0,
135
+ CUDNN_RESAMPLE_BILINEAR = 1,
136
+ CUDNN_RESAMPLE_AVGPOOL = 2,
137
+ CUDNN_RESAMPLE_AVGPOOL_INCLUDE_PADDING = 2,
138
+ CUDNN_RESAMPLE_AVGPOOL_EXCLUDE_PADDING = 4,
139
+ CUDNN_RESAMPLE_MAXPOOL = 3,
140
+ } cudnnResampleMode_t;
141
+
142
+ typedef enum {
143
+ CUDNN_SIGNAL_SET = 0,
144
+ CUDNN_SIGNAL_WAIT = 1,
145
+ } cudnnSignalMode_t;
146
+
147
+ typedef enum {
148
+ CUDNN_GENSTATS_SUM_SQSUM = 0,
149
+ } cudnnGenStatsMode_t;
150
+
151
+ typedef enum {
152
+ CUDNN_BN_FINALIZE_STATISTICS_TRAINING = 0,
153
+ CUDNN_BN_FINALIZE_STATISTICS_INFERENCE = 1,
154
+ } cudnnBnFinalizeStatsMode_t;
155
+
156
+ typedef enum {
157
+ CUDNN_RNG_DISTRIBUTION_BERNOULLI,
158
+ CUDNN_RNG_DISTRIBUTION_UNIFORM,
159
+ CUDNN_RNG_DISTRIBUTION_NORMAL,
160
+ } cudnnRngDistribution_t;
161
+
162
+ typedef enum {
163
+ CUDNN_ATTR_POINTWISE_MODE = 0,
164
+ CUDNN_ATTR_POINTWISE_MATH_PREC = 1,
165
+ CUDNN_ATTR_POINTWISE_NAN_PROPAGATION = 2,
166
+ CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP = 3,
167
+ CUDNN_ATTR_POINTWISE_RELU_UPPER_CLIP = 4,
168
+ CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP_SLOPE = 5,
169
+ CUDNN_ATTR_POINTWISE_ELU_ALPHA = 6,
170
+ CUDNN_ATTR_POINTWISE_SOFTPLUS_BETA = 7,
171
+ CUDNN_ATTR_POINTWISE_SWISH_BETA = 8,
172
+ CUDNN_ATTR_POINTWISE_AXIS = 9,
173
+
174
+ CUDNN_ATTR_CONVOLUTION_COMP_TYPE = 100,
175
+ CUDNN_ATTR_CONVOLUTION_CONV_MODE = 101,
176
+ CUDNN_ATTR_CONVOLUTION_DILATIONS = 102,
177
+ CUDNN_ATTR_CONVOLUTION_FILTER_STRIDES = 103,
178
+ CUDNN_ATTR_CONVOLUTION_POST_PADDINGS = 104,
179
+ CUDNN_ATTR_CONVOLUTION_PRE_PADDINGS = 105,
180
+ CUDNN_ATTR_CONVOLUTION_SPATIAL_DIMS = 106,
181
+
182
+ CUDNN_ATTR_ENGINEHEUR_MODE = 200,
183
+ CUDNN_ATTR_ENGINEHEUR_OPERATION_GRAPH = 201,
184
+ CUDNN_ATTR_ENGINEHEUR_RESULTS = 202,
185
+
186
+ CUDNN_ATTR_ENGINECFG_ENGINE = 300,
187
+ CUDNN_ATTR_ENGINECFG_INTERMEDIATE_INFO = 301,
188
+ CUDNN_ATTR_ENGINECFG_KNOB_CHOICES = 302,
189
+
190
+ CUDNN_ATTR_EXECUTION_PLAN_HANDLE = 400,
191
+ CUDNN_ATTR_EXECUTION_PLAN_ENGINE_CONFIG = 401,
192
+ CUDNN_ATTR_EXECUTION_PLAN_WORKSPACE_SIZE = 402,
193
+ CUDNN_ATTR_EXECUTION_PLAN_COMPUTED_INTERMEDIATE_UIDS = 403,
194
+ CUDNN_ATTR_EXECUTION_PLAN_RUN_ONLY_INTERMEDIATE_UIDS = 404,
195
+ CUDNN_ATTR_EXECUTION_PLAN_JSON_REPRESENTATION = 405,
196
+
197
+ CUDNN_ATTR_INTERMEDIATE_INFO_UNIQUE_ID = 500,
198
+ CUDNN_ATTR_INTERMEDIATE_INFO_SIZE = 501,
199
+ CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_DATA_UIDS = 502,
200
+ CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_ATTRIBUTES = 503,
201
+
202
+ CUDNN_ATTR_KNOB_CHOICE_KNOB_TYPE = 600,
203
+ CUDNN_ATTR_KNOB_CHOICE_KNOB_VALUE = 601,
204
+
205
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_ALPHA = 700,
206
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_BETA = 701,
207
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_CONV_DESC = 702,
208
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_W = 703,
209
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_X = 704,
210
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_Y = 705,
211
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_ALPHA = 706,
212
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_BETA = 707,
213
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_CONV_DESC = 708,
214
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_W = 709,
215
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DX = 710,
216
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DY = 711,
217
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_ALPHA = 712,
218
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_BETA = 713,
219
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_CONV_DESC = 714,
220
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DW = 715,
221
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_X = 716,
222
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DY = 717,
223
+
224
+ CUDNN_ATTR_OPERATION_POINTWISE_PW_DESCRIPTOR = 750,
225
+ CUDNN_ATTR_OPERATION_POINTWISE_XDESC = 751,
226
+ CUDNN_ATTR_OPERATION_POINTWISE_BDESC = 752,
227
+ CUDNN_ATTR_OPERATION_POINTWISE_YDESC = 753,
228
+ CUDNN_ATTR_OPERATION_POINTWISE_ALPHA1 = 754,
229
+ CUDNN_ATTR_OPERATION_POINTWISE_ALPHA2 = 755,
230
+ CUDNN_ATTR_OPERATION_POINTWISE_DXDESC = 756,
231
+ CUDNN_ATTR_OPERATION_POINTWISE_DYDESC = 757,
232
+ CUDNN_ATTR_OPERATION_POINTWISE_TDESC = 758,
233
+
234
+ CUDNN_ATTR_OPERATION_GENSTATS_MODE = 770,
235
+ CUDNN_ATTR_OPERATION_GENSTATS_MATH_PREC = 771,
236
+ CUDNN_ATTR_OPERATION_GENSTATS_XDESC = 772,
237
+ CUDNN_ATTR_OPERATION_GENSTATS_SUMDESC = 773,
238
+ CUDNN_ATTR_OPERATION_GENSTATS_SQSUMDESC = 774,
239
+
240
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_STATS_MODE = 780,
241
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_MATH_PREC = 781,
242
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SUM_DESC = 782,
243
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SQ_SUM_DESC = 783,
244
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_SCALE_DESC = 784,
245
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_BIAS_DESC = 785,
246
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_MEAN_DESC = 786,
247
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_VAR_DESC = 787,
248
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_MEAN_DESC = 788,
249
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_VAR_DESC = 789,
250
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_MEAN_DESC = 790,
251
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_INV_STD_DESC = 791,
252
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_SCALE_DESC = 792,
253
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_BIAS_DESC = 793,
254
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_ACCUM_COUNT_DESC = 794,
255
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EPSILON_DESC = 795,
256
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EXP_AVERATE_FACTOR_DESC = 796,
257
+
258
+ CUDNN_ATTR_OPERATIONGRAPH_HANDLE = 800,
259
+ CUDNN_ATTR_OPERATIONGRAPH_OPS = 801,
260
+ CUDNN_ATTR_OPERATIONGRAPH_ENGINE_GLOBAL_COUNT = 802,
261
+
262
+ CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT = 900,
263
+ CUDNN_ATTR_TENSOR_DATA_TYPE = 901,
264
+ CUDNN_ATTR_TENSOR_DIMENSIONS = 902,
265
+ CUDNN_ATTR_TENSOR_STRIDES = 903,
266
+ CUDNN_ATTR_TENSOR_VECTOR_COUNT = 904,
267
+ CUDNN_ATTR_TENSOR_VECTORIZED_DIMENSION = 905,
268
+ CUDNN_ATTR_TENSOR_UNIQUE_ID = 906,
269
+ CUDNN_ATTR_TENSOR_IS_VIRTUAL = 907,
270
+ CUDNN_ATTR_TENSOR_IS_BY_VALUE = 908,
271
+ CUDNN_ATTR_TENSOR_REORDERING_MODE = 909,
272
+ CUDNN_ATTR_TENSOR_RAGGED_OFFSET_DESC = 913,
273
+
274
+ CUDNN_ATTR_VARIANT_PACK_UNIQUE_IDS = 1000,
275
+ CUDNN_ATTR_VARIANT_PACK_DATA_POINTERS = 1001,
276
+ CUDNN_ATTR_VARIANT_PACK_INTERMEDIATES = 1002,
277
+ CUDNN_ATTR_VARIANT_PACK_WORKSPACE = 1003,
278
+
279
+ CUDNN_ATTR_LAYOUT_INFO_TENSOR_UID = 1100,
280
+ CUDNN_ATTR_LAYOUT_INFO_TYPES = 1101,
281
+
282
+ CUDNN_ATTR_KNOB_INFO_TYPE = 1200,
283
+ CUDNN_ATTR_KNOB_INFO_MAXIMUM_VALUE = 1201,
284
+ CUDNN_ATTR_KNOB_INFO_MINIMUM_VALUE = 1202,
285
+ CUDNN_ATTR_KNOB_INFO_STRIDE = 1203,
286
+
287
+ CUDNN_ATTR_ENGINE_OPERATION_GRAPH = 1300,
288
+ CUDNN_ATTR_ENGINE_GLOBAL_INDEX = 1301,
289
+ CUDNN_ATTR_ENGINE_KNOB_INFO = 1302,
290
+ CUDNN_ATTR_ENGINE_NUMERICAL_NOTE = 1303,
291
+ CUDNN_ATTR_ENGINE_LAYOUT_INFO = 1304,
292
+ CUDNN_ATTR_ENGINE_BEHAVIOR_NOTE = 1305,
293
+
294
+ CUDNN_ATTR_MATMUL_COMP_TYPE = 1500,
295
+ CUDNN_ATTR_MATMUL_PADDING_VALUE = 1503,
296
+
297
+ CUDNN_ATTR_OPERATION_MATMUL_ADESC = 1520,
298
+ CUDNN_ATTR_OPERATION_MATMUL_BDESC = 1521,
299
+ CUDNN_ATTR_OPERATION_MATMUL_CDESC = 1522,
300
+ CUDNN_ATTR_OPERATION_MATMUL_DESC = 1523,
301
+ CUDNN_ATTR_OPERATION_MATMUL_IRREGULARLY_STRIDED_BATCH_COUNT = 1524,
302
+ CUDNN_ATTR_OPERATION_MATMUL_GEMM_M_OVERRIDE_DESC = 1525,
303
+ CUDNN_ATTR_OPERATION_MATMUL_GEMM_N_OVERRIDE_DESC = 1526,
304
+ CUDNN_ATTR_OPERATION_MATMUL_GEMM_K_OVERRIDE_DESC = 1527,
305
+
306
+ CUDNN_ATTR_REDUCTION_OPERATOR = 1600,
307
+ CUDNN_ATTR_REDUCTION_COMP_TYPE = 1601,
308
+
309
+ CUDNN_ATTR_OPERATION_REDUCTION_XDESC = 1610,
310
+ CUDNN_ATTR_OPERATION_REDUCTION_YDESC = 1611,
311
+ CUDNN_ATTR_OPERATION_REDUCTION_DESC = 1612,
312
+
313
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MATH_PREC = 1620,
314
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MEAN_DESC = 1621,
315
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_INVSTD_DESC = 1622,
316
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_BN_SCALE_DESC = 1623,
317
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_X_DESC = 1624,
318
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DY_DESC = 1625,
319
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_SCALE_DESC = 1626,
320
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_BIAS_DESC = 1627,
321
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_DY_SCALE_DESC = 1628,
322
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_X_SCALE_DESC = 1629,
323
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_BIAS = 1630,
324
+
325
+ CUDNN_ATTR_RESAMPLE_MODE = 1700,
326
+ CUDNN_ATTR_RESAMPLE_COMP_TYPE = 1701,
327
+ CUDNN_ATTR_RESAMPLE_SPATIAL_DIMS = 1702,
328
+ CUDNN_ATTR_RESAMPLE_POST_PADDINGS = 1703,
329
+ CUDNN_ATTR_RESAMPLE_PRE_PADDINGS = 1704,
330
+ CUDNN_ATTR_RESAMPLE_STRIDES = 1705,
331
+ CUDNN_ATTR_RESAMPLE_WINDOW_DIMS = 1706,
332
+ CUDNN_ATTR_RESAMPLE_NAN_PROPAGATION = 1707,
333
+ CUDNN_ATTR_RESAMPLE_PADDING_MODE = 1708,
334
+
335
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_XDESC = 1710,
336
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_YDESC = 1711,
337
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_IDXDESC = 1712,
338
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_ALPHA = 1713,
339
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_BETA = 1714,
340
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_DESC = 1716,
341
+
342
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DXDESC = 1720,
343
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DYDESC = 1721,
344
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_IDXDESC = 1722,
345
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_ALPHA = 1723,
346
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_BETA = 1724,
347
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DESC = 1725,
348
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_XDESC = 1726,
349
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_YDESC = 1727,
350
+
351
+ CUDNN_ATTR_OPERATION_CONCAT_AXIS = 1800,
352
+ CUDNN_ATTR_OPERATION_CONCAT_INPUT_DESCS = 1801,
353
+ CUDNN_ATTR_OPERATION_CONCAT_INPLACE_INDEX = 1802,
354
+ CUDNN_ATTR_OPERATION_CONCAT_OUTPUT_DESC = 1803,
355
+
356
+ CUDNN_ATTR_OPERATION_SIGNAL_MODE = 1900,
357
+ CUDNN_ATTR_OPERATION_SIGNAL_FLAGDESC = 1901,
358
+ CUDNN_ATTR_OPERATION_SIGNAL_VALUE = 1902,
359
+ CUDNN_ATTR_OPERATION_SIGNAL_XDESC = 1903,
360
+ CUDNN_ATTR_OPERATION_SIGNAL_YDESC = 1904,
361
+
362
+ CUDNN_ATTR_OPERATION_NORM_FWD_MODE = 2000,
363
+ CUDNN_ATTR_OPERATION_NORM_FWD_PHASE = 2001,
364
+ CUDNN_ATTR_OPERATION_NORM_FWD_XDESC = 2002,
365
+ CUDNN_ATTR_OPERATION_NORM_FWD_MEAN_DESC = 2003,
366
+ CUDNN_ATTR_OPERATION_NORM_FWD_INV_VARIANCE_DESC = 2004,
367
+ CUDNN_ATTR_OPERATION_NORM_FWD_SCALE_DESC = 2005,
368
+ CUDNN_ATTR_OPERATION_NORM_FWD_BIAS_DESC = 2006,
369
+ CUDNN_ATTR_OPERATION_NORM_FWD_EPSILON_DESC = 2007,
370
+ CUDNN_ATTR_OPERATION_NORM_FWD_EXP_AVG_FACTOR_DESC = 2008,
371
+ CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_MEAN_DESC = 2009,
372
+ CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_VAR_DESC = 2010,
373
+ CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_MEAN_DESC = 2011,
374
+ CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_VAR_DESC = 2012,
375
+ CUDNN_ATTR_OPERATION_NORM_FWD_YDESC = 2013,
376
+ CUDNN_ATTR_OPERATION_NORM_FWD_PEER_STAT_DESCS = 2014,
377
+
378
+ CUDNN_ATTR_OPERATION_NORM_BWD_MODE = 2100,
379
+ CUDNN_ATTR_OPERATION_NORM_BWD_XDESC = 2101,
380
+ CUDNN_ATTR_OPERATION_NORM_BWD_MEAN_DESC = 2102,
381
+ CUDNN_ATTR_OPERATION_NORM_BWD_INV_VARIANCE_DESC = 2103,
382
+ CUDNN_ATTR_OPERATION_NORM_BWD_DYDESC = 2104,
383
+ CUDNN_ATTR_OPERATION_NORM_BWD_SCALE_DESC = 2105,
384
+ CUDNN_ATTR_OPERATION_NORM_BWD_EPSILON_DESC = 2106,
385
+ CUDNN_ATTR_OPERATION_NORM_BWD_DSCALE_DESC = 2107,
386
+ CUDNN_ATTR_OPERATION_NORM_BWD_DBIAS_DESC = 2108,
387
+ CUDNN_ATTR_OPERATION_NORM_BWD_DXDESC = 2109,
388
+ CUDNN_ATTR_OPERATION_NORM_BWD_PEER_STAT_DESCS = 2110,
389
+
390
+ CUDNN_ATTR_OPERATION_RESHAPE_XDESC = 2200,
391
+ CUDNN_ATTR_OPERATION_RESHAPE_YDESC = 2201,
392
+
393
+ CUDNN_ATTR_RNG_DISTRIBUTION = 2300,
394
+ CUDNN_ATTR_RNG_NORMAL_DIST_MEAN = 2301,
395
+ CUDNN_ATTR_RNG_NORMAL_DIST_STANDARD_DEVIATION = 2302,
396
+ CUDNN_ATTR_RNG_UNIFORM_DIST_MAXIMUM = 2303,
397
+ CUDNN_ATTR_RNG_UNIFORM_DIST_MINIMUM = 2304,
398
+ CUDNN_ATTR_RNG_BERNOULLI_DIST_PROBABILITY = 2305,
399
+
400
+ CUDNN_ATTR_OPERATION_RNG_YDESC = 2310,
401
+ CUDNN_ATTR_OPERATION_RNG_SEED = 2311,
402
+ CUDNN_ATTR_OPERATION_RNG_DESC = 2312,
403
+ CUDNN_ATTR_OPERATION_RNG_OFFSET_DESC = 2313,
404
+
405
+ } cudnnBackendAttributeName_t;
406
+
407
+ typedef enum {
408
+ CUDNN_TYPE_HANDLE = 0,
409
+ CUDNN_TYPE_DATA_TYPE,
410
+ CUDNN_TYPE_BOOLEAN,
411
+ CUDNN_TYPE_INT64,
412
+ CUDNN_TYPE_FLOAT,
413
+ CUDNN_TYPE_DOUBLE,
414
+ CUDNN_TYPE_VOID_PTR,
415
+ CUDNN_TYPE_CONVOLUTION_MODE,
416
+ CUDNN_TYPE_HEUR_MODE,
417
+ CUDNN_TYPE_KNOB_TYPE,
418
+ CUDNN_TYPE_NAN_PROPOGATION,
419
+ CUDNN_TYPE_NUMERICAL_NOTE,
420
+ CUDNN_TYPE_LAYOUT_TYPE,
421
+ CUDNN_TYPE_ATTRIB_NAME,
422
+ CUDNN_TYPE_POINTWISE_MODE,
423
+ CUDNN_TYPE_BACKEND_DESCRIPTOR,
424
+ CUDNN_TYPE_GENSTATS_MODE,
425
+ CUDNN_TYPE_BN_FINALIZE_STATS_MODE,
426
+ CUDNN_TYPE_REDUCTION_OPERATOR_TYPE,
427
+ CUDNN_TYPE_BEHAVIOR_NOTE,
428
+ CUDNN_TYPE_TENSOR_REORDERING_MODE,
429
+ CUDNN_TYPE_RESAMPLE_MODE,
430
+ CUDNN_TYPE_PADDING_MODE,
431
+ CUDNN_TYPE_INT32,
432
+ CUDNN_TYPE_CHAR,
433
+ CUDNN_TYPE_SIGNAL_MODE,
434
+ CUDNN_TYPE_FRACTION,
435
+ CUDNN_TYPE_NORM_MODE,
436
+ CUDNN_TYPE_NORM_FWD_PHASE,
437
+ CUDNN_TYPE_RNG_DISTRIBUTION
438
+ } cudnnBackendAttributeType_t;
439
+
440
+ typedef enum {
441
+ CUDNN_BACKEND_POINTWISE_DESCRIPTOR = 0,
442
+ CUDNN_BACKEND_CONVOLUTION_DESCRIPTOR,
443
+ CUDNN_BACKEND_ENGINE_DESCRIPTOR,
444
+ CUDNN_BACKEND_ENGINECFG_DESCRIPTOR,
445
+ CUDNN_BACKEND_ENGINEHEUR_DESCRIPTOR,
446
+ CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR,
447
+ CUDNN_BACKEND_INTERMEDIATE_INFO_DESCRIPTOR,
448
+ CUDNN_BACKEND_KNOB_CHOICE_DESCRIPTOR,
449
+ CUDNN_BACKEND_KNOB_INFO_DESCRIPTOR,
450
+ CUDNN_BACKEND_LAYOUT_INFO_DESCRIPTOR,
451
+ CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR,
452
+ CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_FILTER_DESCRIPTOR,
453
+ CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_DATA_DESCRIPTOR,
454
+ CUDNN_BACKEND_OPERATION_POINTWISE_DESCRIPTOR,
455
+ CUDNN_BACKEND_OPERATION_GEN_STATS_DESCRIPTOR,
456
+ CUDNN_BACKEND_OPERATIONGRAPH_DESCRIPTOR,
457
+ CUDNN_BACKEND_VARIANT_PACK_DESCRIPTOR,
458
+ CUDNN_BACKEND_TENSOR_DESCRIPTOR,
459
+ CUDNN_BACKEND_MATMUL_DESCRIPTOR,
460
+ CUDNN_BACKEND_OPERATION_MATMUL_DESCRIPTOR,
461
+ CUDNN_BACKEND_OPERATION_BN_FINALIZE_STATISTICS_DESCRIPTOR,
462
+ CUDNN_BACKEND_REDUCTION_DESCRIPTOR,
463
+ CUDNN_BACKEND_OPERATION_REDUCTION_DESCRIPTOR,
464
+ CUDNN_BACKEND_OPERATION_BN_BWD_WEIGHTS_DESCRIPTOR,
465
+ CUDNN_BACKEND_RESAMPLE_DESCRIPTOR,
466
+ CUDNN_BACKEND_OPERATION_RESAMPLE_FWD_DESCRIPTOR,
467
+ CUDNN_BACKEND_OPERATION_RESAMPLE_BWD_DESCRIPTOR,
468
+ CUDNN_BACKEND_OPERATION_CONCAT_DESCRIPTOR,
469
+ CUDNN_BACKEND_OPERATION_SIGNAL_DESCRIPTOR,
470
+ CUDNN_BACKEND_OPERATION_NORM_FORWARD_DESCRIPTOR,
471
+ CUDNN_BACKEND_OPERATION_NORM_BACKWARD_DESCRIPTOR,
472
+ CUDNN_BACKEND_OPERATION_RESHAPE_DESCRIPTOR,
473
+ CUDNN_BACKEND_RNG_DESCRIPTOR,
474
+ CUDNN_BACKEND_OPERATION_RNG_DESCRIPTOR
475
+ } cudnnBackendDescriptorType_t;
476
+
477
+ typedef enum {
478
+ CUDNN_NUMERICAL_NOTE_TENSOR_CORE = 0,
479
+ CUDNN_NUMERICAL_NOTE_DOWN_CONVERT_INPUTS,
480
+ CUDNN_NUMERICAL_NOTE_REDUCED_PRECISION_REDUCTION,
481
+ CUDNN_NUMERICAL_NOTE_FFT,
482
+ CUDNN_NUMERICAL_NOTE_NONDETERMINISTIC,
483
+ CUDNN_NUMERICAL_NOTE_WINOGRAD,
484
+ CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_4x4,
485
+ CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_6x6,
486
+ CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_13x13,
487
+ CUDNN_NUMERICAL_NOTE_TYPE_COUNT,
488
+ } cudnnBackendNumericalNote_t;
489
+
490
+ typedef enum {
491
+ CUDNN_BEHAVIOR_NOTE_RUNTIME_COMPILATION = 0,
492
+ CUDNN_BEHAVIOR_NOTE_REQUIRES_FILTER_INT8x32_REORDER = 1,
493
+ CUDNN_BEHAVIOR_NOTE_REQUIRES_BIAS_INT8x32_REORDER = 2,
494
+ CUDNN_BEHAVIOR_NOTE_TYPE_COUNT,
495
+ } cudnnBackendBehaviorNote_t;
496
+
497
+ typedef enum {
498
+ CUDNN_KNOB_TYPE_SPLIT_K = 0,
499
+ CUDNN_KNOB_TYPE_SWIZZLE = 1,
500
+ CUDNN_KNOB_TYPE_TILE_SIZE = 2,
501
+ CUDNN_KNOB_TYPE_USE_TEX = 3,
502
+ CUDNN_KNOB_TYPE_EDGE = 4,
503
+ CUDNN_KNOB_TYPE_KBLOCK = 5,
504
+ CUDNN_KNOB_TYPE_LDGA = 6,
505
+ CUDNN_KNOB_TYPE_LDGB = 7,
506
+ CUDNN_KNOB_TYPE_CHUNK_K = 8,
507
+ CUDNN_KNOB_TYPE_SPLIT_H = 9,
508
+ CUDNN_KNOB_TYPE_WINO_TILE = 10,
509
+ CUDNN_KNOB_TYPE_MULTIPLY = 11,
510
+ CUDNN_KNOB_TYPE_SPLIT_K_BUF = 12,
511
+ CUDNN_KNOB_TYPE_TILEK = 13,
512
+ CUDNN_KNOB_TYPE_STAGES = 14,
513
+ CUDNN_KNOB_TYPE_REDUCTION_MODE = 15,
514
+ CUDNN_KNOB_TYPE_CTA_SPLIT_K_MODE = 16,
515
+ CUDNN_KNOB_TYPE_SPLIT_K_SLC = 17,
516
+ CUDNN_KNOB_TYPE_IDX_MODE = 18,
517
+ CUDNN_KNOB_TYPE_SLICED = 19,
518
+ CUDNN_KNOB_TYPE_SPLIT_RS = 20,
519
+ CUDNN_KNOB_TYPE_SINGLEBUFFER = 21,
520
+ CUDNN_KNOB_TYPE_LDGC = 22,
521
+ CUDNN_KNOB_TYPE_SPECFILT = 23,
522
+ CUDNN_KNOB_TYPE_KERNEL_CFG = 24,
523
+ CUDNN_KNOB_TYPE_WORKSPACE = 25,
524
+ CUDNN_KNOB_TYPE_TILE_CGA = 26,
525
+ CUDNN_KNOB_TYPE_TILE_CGA_M = 27,
526
+ CUDNN_KNOB_TYPE_TILE_CGA_N = 28,
527
+ CUDNN_KNOB_TYPE_BLOCK_SIZE = 29,
528
+ CUDNN_KNOB_TYPE_OCCUPANCY = 30,
529
+ CUDNN_KNOB_TYPE_ARRAY_SIZE_PER_THREAD = 31,
530
+ CUDNN_KNOB_TYPE_NUM_C_PER_BLOCK = 32,
531
+ CUDNN_KNOB_TYPE_COUNTS,
532
+ } cudnnBackendKnobType_t;
533
+
534
+ typedef enum {
535
+ CUDNN_LAYOUT_TYPE_PREFERRED_NCHW = 0,
536
+ CUDNN_LAYOUT_TYPE_PREFERRED_NHWC = 1,
537
+ CUDNN_LAYOUT_TYPE_PREFERRED_PAD4CK = 2,
538
+ CUDNN_LAYOUT_TYPE_PREFERRED_PAD8CK = 3,
539
+ CUDNN_LAYOUT_TYPE_COUNT = 4,
540
+ } cudnnBackendLayoutType_t;
541
+
542
+ typedef enum {
543
+ CUDNN_HEUR_MODE_INSTANT = 0,
544
+ CUDNN_HEUR_MODE_B = 1,
545
+ CUDNN_HEUR_MODE_FALLBACK = 2,
546
+ CUDNN_HEUR_MODE_A = 3,
547
+ CUDNN_HEUR_MODES_COUNT = 4,
548
+ } cudnnBackendHeurMode_t;
549
+
550
+ typedef enum {
551
+ CUDNN_TENSOR_REORDERING_NONE = 0,
552
+ CUDNN_TENSOR_REORDERING_INT8x32 = 1,
553
+ CUDNN_TENSOR_REORDERING_F16x16 = 2,
554
+ } cudnnBackendTensorReordering_t;
555
+
556
+ typedef enum {
557
+ CUDNN_ZERO_PAD = 0,
558
+ CUDNN_NEG_INF_PAD = 1,
559
+ CUDNN_EDGE_VAL_PAD = 2,
560
+ } cudnnPaddingMode_t;
561
+
562
+ typedef enum {
563
+ CUDNN_LAYER_NORM = 0,
564
+ CUDNN_INSTANCE_NORM = 1,
565
+ CUDNN_BATCH_NORM = 2,
566
+ CUDNN_GROUP_NORM = 3,
567
+ } cudnnBackendNormMode_t;
568
+
569
+ typedef enum {
570
+ CUDNN_NORM_FWD_INFERENCE = 0,
571
+ CUDNN_NORM_FWD_TRAINING = 1,
572
+ } cudnnBackendNormFwdPhase_t;
573
+
574
+ cudnnStatus_t CUDNNWINAPI
575
+ cudnnBackendCreateDescriptor(cudnnBackendDescriptorType_t descriptorType, cudnnBackendDescriptor_t *descriptor);
576
+
577
+ cudnnStatus_t CUDNNWINAPI
578
+ cudnnBackendDestroyDescriptor(cudnnBackendDescriptor_t descriptor);
579
+
580
+ cudnnStatus_t CUDNNWINAPI
581
+ cudnnBackendInitialize(cudnnBackendDescriptor_t descriptor);
582
+
583
+ cudnnStatus_t CUDNNWINAPI
584
+ cudnnBackendFinalize(cudnnBackendDescriptor_t descriptor);
585
+
586
+ cudnnStatus_t CUDNNWINAPI
587
+ cudnnBackendSetAttribute(cudnnBackendDescriptor_t descriptor,
588
+ cudnnBackendAttributeName_t attributeName,
589
+ cudnnBackendAttributeType_t attributeType,
590
+ int64_t elementCount,
591
+ const void *arrayOfElements);
592
+
593
+ cudnnStatus_t CUDNNWINAPI
594
+ cudnnBackendGetAttribute(cudnnBackendDescriptor_t const descriptor,
595
+ cudnnBackendAttributeName_t attributeName,
596
+ cudnnBackendAttributeType_t attributeType,
597
+ int64_t requestedElementCount,
598
+ int64_t *elementCount,
599
+ void *arrayOfElements);
600
+
601
+ cudnnStatus_t CUDNNWINAPI
602
+ cudnnBackendExecute(cudnnHandle_t handle, cudnnBackendDescriptor_t executionPlan, cudnnBackendDescriptor_t variantPack);
603
+
604
+ #if defined(__cplusplus)
605
+ }
606
+ #endif
607
+
608
+ #endif /* _CUDNN_BACKEND_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend_v8.h ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDNN_BACKEND_H_
51
+ #define _CUDNN_BACKEND_H_
52
+
53
+ /*
54
+ * The content in this header file is under development to be included in cudnn.h in the future
55
+ * Production code should have all include of this header file remove.
56
+ */
57
+
58
+ #include "cudnn_ops_infer.h"
59
+ #include "cudnn_cnn_infer.h"
60
+
61
+ /* NOTE: definition in extern "C" to be copied later to public header */
62
+ #if defined(__cplusplus)
63
+ extern "C" {
64
+ #endif
65
+
66
+ typedef void *cudnnBackendDescriptor_t;
67
+
68
+ typedef struct cudnnFractionStruct {
69
+ int64_t numerator;
70
+ int64_t denominator;
71
+ } cudnnFraction_t;
72
+
73
+ typedef enum {
74
+ CUDNN_POINTWISE_ADD = 0,
75
+ CUDNN_POINTWISE_ADD_SQUARE = 5,
76
+ CUDNN_POINTWISE_DIV = 6,
77
+ CUDNN_POINTWISE_MAX = 3,
78
+ CUDNN_POINTWISE_MIN = 2,
79
+ CUDNN_POINTWISE_MOD = 7,
80
+ CUDNN_POINTWISE_MUL = 1,
81
+ CUDNN_POINTWISE_POW = 8,
82
+ CUDNN_POINTWISE_SUB = 9,
83
+
84
+ CUDNN_POINTWISE_ABS = 10,
85
+ CUDNN_POINTWISE_CEIL = 11,
86
+ CUDNN_POINTWISE_COS = 12,
87
+ CUDNN_POINTWISE_EXP = 13,
88
+ CUDNN_POINTWISE_FLOOR = 14,
89
+ CUDNN_POINTWISE_LOG = 15,
90
+ CUDNN_POINTWISE_NEG = 16,
91
+ CUDNN_POINTWISE_RSQRT = 17,
92
+ CUDNN_POINTWISE_SIN = 18,
93
+ CUDNN_POINTWISE_SQRT = 4,
94
+ CUDNN_POINTWISE_TAN = 19,
95
+ CUDNN_POINTWISE_ERF = 20,
96
+ CUDNN_POINTWISE_IDENTITY = 21,
97
+ CUDNN_POINTWISE_RECIPROCAL = 22,
98
+
99
+ CUDNN_POINTWISE_RELU_FWD = 100,
100
+ CUDNN_POINTWISE_TANH_FWD = 101,
101
+ CUDNN_POINTWISE_SIGMOID_FWD = 102,
102
+ CUDNN_POINTWISE_ELU_FWD = 103,
103
+ CUDNN_POINTWISE_GELU_FWD = 104,
104
+ CUDNN_POINTWISE_SOFTPLUS_FWD = 105,
105
+ CUDNN_POINTWISE_SWISH_FWD = 106,
106
+ CUDNN_POINTWISE_GELU_APPROX_TANH_FWD = 107,
107
+
108
+ CUDNN_POINTWISE_RELU_BWD = 200,
109
+ CUDNN_POINTWISE_TANH_BWD = 201,
110
+ CUDNN_POINTWISE_SIGMOID_BWD = 202,
111
+ CUDNN_POINTWISE_ELU_BWD = 203,
112
+ CUDNN_POINTWISE_GELU_BWD = 204,
113
+ CUDNN_POINTWISE_SOFTPLUS_BWD = 205,
114
+ CUDNN_POINTWISE_SWISH_BWD = 206,
115
+ CUDNN_POINTWISE_GELU_APPROX_TANH_BWD = 207,
116
+
117
+ CUDNN_POINTWISE_CMP_EQ = 300,
118
+ CUDNN_POINTWISE_CMP_NEQ = 301,
119
+ CUDNN_POINTWISE_CMP_GT = 302,
120
+ CUDNN_POINTWISE_CMP_GE = 303,
121
+ CUDNN_POINTWISE_CMP_LT = 304,
122
+ CUDNN_POINTWISE_CMP_LE = 305,
123
+
124
+ CUDNN_POINTWISE_LOGICAL_AND = 400,
125
+ CUDNN_POINTWISE_LOGICAL_OR = 401,
126
+ CUDNN_POINTWISE_LOGICAL_NOT = 402,
127
+
128
+ CUDNN_POINTWISE_GEN_INDEX = 501,
129
+
130
+ CUDNN_POINTWISE_BINARY_SELECT = 601,
131
+ } cudnnPointwiseMode_t;
132
+
133
+ typedef enum {
134
+ CUDNN_RESAMPLE_NEAREST = 0,
135
+ CUDNN_RESAMPLE_BILINEAR = 1,
136
+ CUDNN_RESAMPLE_AVGPOOL = 2,
137
+ CUDNN_RESAMPLE_AVGPOOL_INCLUDE_PADDING = 2,
138
+ CUDNN_RESAMPLE_AVGPOOL_EXCLUDE_PADDING = 4,
139
+ CUDNN_RESAMPLE_MAXPOOL = 3,
140
+ } cudnnResampleMode_t;
141
+
142
+ typedef enum {
143
+ CUDNN_SIGNAL_SET = 0,
144
+ CUDNN_SIGNAL_WAIT = 1,
145
+ } cudnnSignalMode_t;
146
+
147
+ typedef enum {
148
+ CUDNN_GENSTATS_SUM_SQSUM = 0,
149
+ } cudnnGenStatsMode_t;
150
+
151
+ typedef enum {
152
+ CUDNN_BN_FINALIZE_STATISTICS_TRAINING = 0,
153
+ CUDNN_BN_FINALIZE_STATISTICS_INFERENCE = 1,
154
+ } cudnnBnFinalizeStatsMode_t;
155
+
156
+ typedef enum {
157
+ CUDNN_RNG_DISTRIBUTION_BERNOULLI,
158
+ CUDNN_RNG_DISTRIBUTION_UNIFORM,
159
+ CUDNN_RNG_DISTRIBUTION_NORMAL,
160
+ } cudnnRngDistribution_t;
161
+
162
+ typedef enum {
163
+ CUDNN_ATTR_POINTWISE_MODE = 0,
164
+ CUDNN_ATTR_POINTWISE_MATH_PREC = 1,
165
+ CUDNN_ATTR_POINTWISE_NAN_PROPAGATION = 2,
166
+ CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP = 3,
167
+ CUDNN_ATTR_POINTWISE_RELU_UPPER_CLIP = 4,
168
+ CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP_SLOPE = 5,
169
+ CUDNN_ATTR_POINTWISE_ELU_ALPHA = 6,
170
+ CUDNN_ATTR_POINTWISE_SOFTPLUS_BETA = 7,
171
+ CUDNN_ATTR_POINTWISE_SWISH_BETA = 8,
172
+ CUDNN_ATTR_POINTWISE_AXIS = 9,
173
+
174
+ CUDNN_ATTR_CONVOLUTION_COMP_TYPE = 100,
175
+ CUDNN_ATTR_CONVOLUTION_CONV_MODE = 101,
176
+ CUDNN_ATTR_CONVOLUTION_DILATIONS = 102,
177
+ CUDNN_ATTR_CONVOLUTION_FILTER_STRIDES = 103,
178
+ CUDNN_ATTR_CONVOLUTION_POST_PADDINGS = 104,
179
+ CUDNN_ATTR_CONVOLUTION_PRE_PADDINGS = 105,
180
+ CUDNN_ATTR_CONVOLUTION_SPATIAL_DIMS = 106,
181
+
182
+ CUDNN_ATTR_ENGINEHEUR_MODE = 200,
183
+ CUDNN_ATTR_ENGINEHEUR_OPERATION_GRAPH = 201,
184
+ CUDNN_ATTR_ENGINEHEUR_RESULTS = 202,
185
+
186
+ CUDNN_ATTR_ENGINECFG_ENGINE = 300,
187
+ CUDNN_ATTR_ENGINECFG_INTERMEDIATE_INFO = 301,
188
+ CUDNN_ATTR_ENGINECFG_KNOB_CHOICES = 302,
189
+
190
+ CUDNN_ATTR_EXECUTION_PLAN_HANDLE = 400,
191
+ CUDNN_ATTR_EXECUTION_PLAN_ENGINE_CONFIG = 401,
192
+ CUDNN_ATTR_EXECUTION_PLAN_WORKSPACE_SIZE = 402,
193
+ CUDNN_ATTR_EXECUTION_PLAN_COMPUTED_INTERMEDIATE_UIDS = 403,
194
+ CUDNN_ATTR_EXECUTION_PLAN_RUN_ONLY_INTERMEDIATE_UIDS = 404,
195
+ CUDNN_ATTR_EXECUTION_PLAN_JSON_REPRESENTATION = 405,
196
+
197
+ CUDNN_ATTR_INTERMEDIATE_INFO_UNIQUE_ID = 500,
198
+ CUDNN_ATTR_INTERMEDIATE_INFO_SIZE = 501,
199
+ CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_DATA_UIDS = 502,
200
+ CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_ATTRIBUTES = 503,
201
+
202
+ CUDNN_ATTR_KNOB_CHOICE_KNOB_TYPE = 600,
203
+ CUDNN_ATTR_KNOB_CHOICE_KNOB_VALUE = 601,
204
+
205
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_ALPHA = 700,
206
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_BETA = 701,
207
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_CONV_DESC = 702,
208
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_W = 703,
209
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_X = 704,
210
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_Y = 705,
211
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_ALPHA = 706,
212
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_BETA = 707,
213
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_CONV_DESC = 708,
214
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_W = 709,
215
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DX = 710,
216
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DY = 711,
217
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_ALPHA = 712,
218
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_BETA = 713,
219
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_CONV_DESC = 714,
220
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DW = 715,
221
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_X = 716,
222
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DY = 717,
223
+
224
+ CUDNN_ATTR_OPERATION_POINTWISE_PW_DESCRIPTOR = 750,
225
+ CUDNN_ATTR_OPERATION_POINTWISE_XDESC = 751,
226
+ CUDNN_ATTR_OPERATION_POINTWISE_BDESC = 752,
227
+ CUDNN_ATTR_OPERATION_POINTWISE_YDESC = 753,
228
+ CUDNN_ATTR_OPERATION_POINTWISE_ALPHA1 = 754,
229
+ CUDNN_ATTR_OPERATION_POINTWISE_ALPHA2 = 755,
230
+ CUDNN_ATTR_OPERATION_POINTWISE_DXDESC = 756,
231
+ CUDNN_ATTR_OPERATION_POINTWISE_DYDESC = 757,
232
+ CUDNN_ATTR_OPERATION_POINTWISE_TDESC = 758,
233
+
234
+ CUDNN_ATTR_OPERATION_GENSTATS_MODE = 770,
235
+ CUDNN_ATTR_OPERATION_GENSTATS_MATH_PREC = 771,
236
+ CUDNN_ATTR_OPERATION_GENSTATS_XDESC = 772,
237
+ CUDNN_ATTR_OPERATION_GENSTATS_SUMDESC = 773,
238
+ CUDNN_ATTR_OPERATION_GENSTATS_SQSUMDESC = 774,
239
+
240
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_STATS_MODE = 780,
241
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_MATH_PREC = 781,
242
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SUM_DESC = 782,
243
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SQ_SUM_DESC = 783,
244
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_SCALE_DESC = 784,
245
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_BIAS_DESC = 785,
246
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_MEAN_DESC = 786,
247
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_VAR_DESC = 787,
248
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_MEAN_DESC = 788,
249
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_VAR_DESC = 789,
250
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_MEAN_DESC = 790,
251
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_INV_STD_DESC = 791,
252
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_SCALE_DESC = 792,
253
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_BIAS_DESC = 793,
254
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_ACCUM_COUNT_DESC = 794,
255
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EPSILON_DESC = 795,
256
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EXP_AVERATE_FACTOR_DESC = 796,
257
+
258
+ CUDNN_ATTR_OPERATIONGRAPH_HANDLE = 800,
259
+ CUDNN_ATTR_OPERATIONGRAPH_OPS = 801,
260
+ CUDNN_ATTR_OPERATIONGRAPH_ENGINE_GLOBAL_COUNT = 802,
261
+
262
+ CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT = 900,
263
+ CUDNN_ATTR_TENSOR_DATA_TYPE = 901,
264
+ CUDNN_ATTR_TENSOR_DIMENSIONS = 902,
265
+ CUDNN_ATTR_TENSOR_STRIDES = 903,
266
+ CUDNN_ATTR_TENSOR_VECTOR_COUNT = 904,
267
+ CUDNN_ATTR_TENSOR_VECTORIZED_DIMENSION = 905,
268
+ CUDNN_ATTR_TENSOR_UNIQUE_ID = 906,
269
+ CUDNN_ATTR_TENSOR_IS_VIRTUAL = 907,
270
+ CUDNN_ATTR_TENSOR_IS_BY_VALUE = 908,
271
+ CUDNN_ATTR_TENSOR_REORDERING_MODE = 909,
272
+ CUDNN_ATTR_TENSOR_RAGGED_OFFSET_DESC = 913,
273
+
274
+ CUDNN_ATTR_VARIANT_PACK_UNIQUE_IDS = 1000,
275
+ CUDNN_ATTR_VARIANT_PACK_DATA_POINTERS = 1001,
276
+ CUDNN_ATTR_VARIANT_PACK_INTERMEDIATES = 1002,
277
+ CUDNN_ATTR_VARIANT_PACK_WORKSPACE = 1003,
278
+
279
+ CUDNN_ATTR_LAYOUT_INFO_TENSOR_UID = 1100,
280
+ CUDNN_ATTR_LAYOUT_INFO_TYPES = 1101,
281
+
282
+ CUDNN_ATTR_KNOB_INFO_TYPE = 1200,
283
+ CUDNN_ATTR_KNOB_INFO_MAXIMUM_VALUE = 1201,
284
+ CUDNN_ATTR_KNOB_INFO_MINIMUM_VALUE = 1202,
285
+ CUDNN_ATTR_KNOB_INFO_STRIDE = 1203,
286
+
287
+ CUDNN_ATTR_ENGINE_OPERATION_GRAPH = 1300,
288
+ CUDNN_ATTR_ENGINE_GLOBAL_INDEX = 1301,
289
+ CUDNN_ATTR_ENGINE_KNOB_INFO = 1302,
290
+ CUDNN_ATTR_ENGINE_NUMERICAL_NOTE = 1303,
291
+ CUDNN_ATTR_ENGINE_LAYOUT_INFO = 1304,
292
+ CUDNN_ATTR_ENGINE_BEHAVIOR_NOTE = 1305,
293
+
294
+ CUDNN_ATTR_MATMUL_COMP_TYPE = 1500,
295
+ CUDNN_ATTR_MATMUL_PADDING_VALUE = 1503,
296
+
297
+ CUDNN_ATTR_OPERATION_MATMUL_ADESC = 1520,
298
+ CUDNN_ATTR_OPERATION_MATMUL_BDESC = 1521,
299
+ CUDNN_ATTR_OPERATION_MATMUL_CDESC = 1522,
300
+ CUDNN_ATTR_OPERATION_MATMUL_DESC = 1523,
301
+ CUDNN_ATTR_OPERATION_MATMUL_IRREGULARLY_STRIDED_BATCH_COUNT = 1524,
302
+ CUDNN_ATTR_OPERATION_MATMUL_GEMM_M_OVERRIDE_DESC = 1525,
303
+ CUDNN_ATTR_OPERATION_MATMUL_GEMM_N_OVERRIDE_DESC = 1526,
304
+ CUDNN_ATTR_OPERATION_MATMUL_GEMM_K_OVERRIDE_DESC = 1527,
305
+
306
+ CUDNN_ATTR_REDUCTION_OPERATOR = 1600,
307
+ CUDNN_ATTR_REDUCTION_COMP_TYPE = 1601,
308
+
309
+ CUDNN_ATTR_OPERATION_REDUCTION_XDESC = 1610,
310
+ CUDNN_ATTR_OPERATION_REDUCTION_YDESC = 1611,
311
+ CUDNN_ATTR_OPERATION_REDUCTION_DESC = 1612,
312
+
313
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MATH_PREC = 1620,
314
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MEAN_DESC = 1621,
315
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_INVSTD_DESC = 1622,
316
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_BN_SCALE_DESC = 1623,
317
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_X_DESC = 1624,
318
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DY_DESC = 1625,
319
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_SCALE_DESC = 1626,
320
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_BIAS_DESC = 1627,
321
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_DY_SCALE_DESC = 1628,
322
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_X_SCALE_DESC = 1629,
323
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_BIAS = 1630,
324
+
325
+ CUDNN_ATTR_RESAMPLE_MODE = 1700,
326
+ CUDNN_ATTR_RESAMPLE_COMP_TYPE = 1701,
327
+ CUDNN_ATTR_RESAMPLE_SPATIAL_DIMS = 1702,
328
+ CUDNN_ATTR_RESAMPLE_POST_PADDINGS = 1703,
329
+ CUDNN_ATTR_RESAMPLE_PRE_PADDINGS = 1704,
330
+ CUDNN_ATTR_RESAMPLE_STRIDES = 1705,
331
+ CUDNN_ATTR_RESAMPLE_WINDOW_DIMS = 1706,
332
+ CUDNN_ATTR_RESAMPLE_NAN_PROPAGATION = 1707,
333
+ CUDNN_ATTR_RESAMPLE_PADDING_MODE = 1708,
334
+
335
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_XDESC = 1710,
336
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_YDESC = 1711,
337
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_IDXDESC = 1712,
338
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_ALPHA = 1713,
339
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_BETA = 1714,
340
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_DESC = 1716,
341
+
342
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DXDESC = 1720,
343
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DYDESC = 1721,
344
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_IDXDESC = 1722,
345
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_ALPHA = 1723,
346
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_BETA = 1724,
347
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DESC = 1725,
348
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_XDESC = 1726,
349
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_YDESC = 1727,
350
+
351
+ CUDNN_ATTR_OPERATION_CONCAT_AXIS = 1800,
352
+ CUDNN_ATTR_OPERATION_CONCAT_INPUT_DESCS = 1801,
353
+ CUDNN_ATTR_OPERATION_CONCAT_INPLACE_INDEX = 1802,
354
+ CUDNN_ATTR_OPERATION_CONCAT_OUTPUT_DESC = 1803,
355
+
356
+ CUDNN_ATTR_OPERATION_SIGNAL_MODE = 1900,
357
+ CUDNN_ATTR_OPERATION_SIGNAL_FLAGDESC = 1901,
358
+ CUDNN_ATTR_OPERATION_SIGNAL_VALUE = 1902,
359
+ CUDNN_ATTR_OPERATION_SIGNAL_XDESC = 1903,
360
+ CUDNN_ATTR_OPERATION_SIGNAL_YDESC = 1904,
361
+
362
+ CUDNN_ATTR_OPERATION_NORM_FWD_MODE = 2000,
363
+ CUDNN_ATTR_OPERATION_NORM_FWD_PHASE = 2001,
364
+ CUDNN_ATTR_OPERATION_NORM_FWD_XDESC = 2002,
365
+ CUDNN_ATTR_OPERATION_NORM_FWD_MEAN_DESC = 2003,
366
+ CUDNN_ATTR_OPERATION_NORM_FWD_INV_VARIANCE_DESC = 2004,
367
+ CUDNN_ATTR_OPERATION_NORM_FWD_SCALE_DESC = 2005,
368
+ CUDNN_ATTR_OPERATION_NORM_FWD_BIAS_DESC = 2006,
369
+ CUDNN_ATTR_OPERATION_NORM_FWD_EPSILON_DESC = 2007,
370
+ CUDNN_ATTR_OPERATION_NORM_FWD_EXP_AVG_FACTOR_DESC = 2008,
371
+ CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_MEAN_DESC = 2009,
372
+ CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_VAR_DESC = 2010,
373
+ CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_MEAN_DESC = 2011,
374
+ CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_VAR_DESC = 2012,
375
+ CUDNN_ATTR_OPERATION_NORM_FWD_YDESC = 2013,
376
+ CUDNN_ATTR_OPERATION_NORM_FWD_PEER_STAT_DESCS = 2014,
377
+
378
+ CUDNN_ATTR_OPERATION_NORM_BWD_MODE = 2100,
379
+ CUDNN_ATTR_OPERATION_NORM_BWD_XDESC = 2101,
380
+ CUDNN_ATTR_OPERATION_NORM_BWD_MEAN_DESC = 2102,
381
+ CUDNN_ATTR_OPERATION_NORM_BWD_INV_VARIANCE_DESC = 2103,
382
+ CUDNN_ATTR_OPERATION_NORM_BWD_DYDESC = 2104,
383
+ CUDNN_ATTR_OPERATION_NORM_BWD_SCALE_DESC = 2105,
384
+ CUDNN_ATTR_OPERATION_NORM_BWD_EPSILON_DESC = 2106,
385
+ CUDNN_ATTR_OPERATION_NORM_BWD_DSCALE_DESC = 2107,
386
+ CUDNN_ATTR_OPERATION_NORM_BWD_DBIAS_DESC = 2108,
387
+ CUDNN_ATTR_OPERATION_NORM_BWD_DXDESC = 2109,
388
+ CUDNN_ATTR_OPERATION_NORM_BWD_PEER_STAT_DESCS = 2110,
389
+
390
+ CUDNN_ATTR_OPERATION_RESHAPE_XDESC = 2200,
391
+ CUDNN_ATTR_OPERATION_RESHAPE_YDESC = 2201,
392
+
393
+ CUDNN_ATTR_RNG_DISTRIBUTION = 2300,
394
+ CUDNN_ATTR_RNG_NORMAL_DIST_MEAN = 2301,
395
+ CUDNN_ATTR_RNG_NORMAL_DIST_STANDARD_DEVIATION = 2302,
396
+ CUDNN_ATTR_RNG_UNIFORM_DIST_MAXIMUM = 2303,
397
+ CUDNN_ATTR_RNG_UNIFORM_DIST_MINIMUM = 2304,
398
+ CUDNN_ATTR_RNG_BERNOULLI_DIST_PROBABILITY = 2305,
399
+
400
+ CUDNN_ATTR_OPERATION_RNG_YDESC = 2310,
401
+ CUDNN_ATTR_OPERATION_RNG_SEED = 2311,
402
+ CUDNN_ATTR_OPERATION_RNG_DESC = 2312,
403
+ CUDNN_ATTR_OPERATION_RNG_OFFSET_DESC = 2313,
404
+
405
+ } cudnnBackendAttributeName_t;
406
+
407
+ typedef enum {
408
+ CUDNN_TYPE_HANDLE = 0,
409
+ CUDNN_TYPE_DATA_TYPE,
410
+ CUDNN_TYPE_BOOLEAN,
411
+ CUDNN_TYPE_INT64,
412
+ CUDNN_TYPE_FLOAT,
413
+ CUDNN_TYPE_DOUBLE,
414
+ CUDNN_TYPE_VOID_PTR,
415
+ CUDNN_TYPE_CONVOLUTION_MODE,
416
+ CUDNN_TYPE_HEUR_MODE,
417
+ CUDNN_TYPE_KNOB_TYPE,
418
+ CUDNN_TYPE_NAN_PROPOGATION,
419
+ CUDNN_TYPE_NUMERICAL_NOTE,
420
+ CUDNN_TYPE_LAYOUT_TYPE,
421
+ CUDNN_TYPE_ATTRIB_NAME,
422
+ CUDNN_TYPE_POINTWISE_MODE,
423
+ CUDNN_TYPE_BACKEND_DESCRIPTOR,
424
+ CUDNN_TYPE_GENSTATS_MODE,
425
+ CUDNN_TYPE_BN_FINALIZE_STATS_MODE,
426
+ CUDNN_TYPE_REDUCTION_OPERATOR_TYPE,
427
+ CUDNN_TYPE_BEHAVIOR_NOTE,
428
+ CUDNN_TYPE_TENSOR_REORDERING_MODE,
429
+ CUDNN_TYPE_RESAMPLE_MODE,
430
+ CUDNN_TYPE_PADDING_MODE,
431
+ CUDNN_TYPE_INT32,
432
+ CUDNN_TYPE_CHAR,
433
+ CUDNN_TYPE_SIGNAL_MODE,
434
+ CUDNN_TYPE_FRACTION,
435
+ CUDNN_TYPE_NORM_MODE,
436
+ CUDNN_TYPE_NORM_FWD_PHASE,
437
+ CUDNN_TYPE_RNG_DISTRIBUTION
438
+ } cudnnBackendAttributeType_t;
439
+
440
+ typedef enum {
441
+ CUDNN_BACKEND_POINTWISE_DESCRIPTOR = 0,
442
+ CUDNN_BACKEND_CONVOLUTION_DESCRIPTOR,
443
+ CUDNN_BACKEND_ENGINE_DESCRIPTOR,
444
+ CUDNN_BACKEND_ENGINECFG_DESCRIPTOR,
445
+ CUDNN_BACKEND_ENGINEHEUR_DESCRIPTOR,
446
+ CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR,
447
+ CUDNN_BACKEND_INTERMEDIATE_INFO_DESCRIPTOR,
448
+ CUDNN_BACKEND_KNOB_CHOICE_DESCRIPTOR,
449
+ CUDNN_BACKEND_KNOB_INFO_DESCRIPTOR,
450
+ CUDNN_BACKEND_LAYOUT_INFO_DESCRIPTOR,
451
+ CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR,
452
+ CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_FILTER_DESCRIPTOR,
453
+ CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_DATA_DESCRIPTOR,
454
+ CUDNN_BACKEND_OPERATION_POINTWISE_DESCRIPTOR,
455
+ CUDNN_BACKEND_OPERATION_GEN_STATS_DESCRIPTOR,
456
+ CUDNN_BACKEND_OPERATIONGRAPH_DESCRIPTOR,
457
+ CUDNN_BACKEND_VARIANT_PACK_DESCRIPTOR,
458
+ CUDNN_BACKEND_TENSOR_DESCRIPTOR,
459
+ CUDNN_BACKEND_MATMUL_DESCRIPTOR,
460
+ CUDNN_BACKEND_OPERATION_MATMUL_DESCRIPTOR,
461
+ CUDNN_BACKEND_OPERATION_BN_FINALIZE_STATISTICS_DESCRIPTOR,
462
+ CUDNN_BACKEND_REDUCTION_DESCRIPTOR,
463
+ CUDNN_BACKEND_OPERATION_REDUCTION_DESCRIPTOR,
464
+ CUDNN_BACKEND_OPERATION_BN_BWD_WEIGHTS_DESCRIPTOR,
465
+ CUDNN_BACKEND_RESAMPLE_DESCRIPTOR,
466
+ CUDNN_BACKEND_OPERATION_RESAMPLE_FWD_DESCRIPTOR,
467
+ CUDNN_BACKEND_OPERATION_RESAMPLE_BWD_DESCRIPTOR,
468
+ CUDNN_BACKEND_OPERATION_CONCAT_DESCRIPTOR,
469
+ CUDNN_BACKEND_OPERATION_SIGNAL_DESCRIPTOR,
470
+ CUDNN_BACKEND_OPERATION_NORM_FORWARD_DESCRIPTOR,
471
+ CUDNN_BACKEND_OPERATION_NORM_BACKWARD_DESCRIPTOR,
472
+ CUDNN_BACKEND_OPERATION_RESHAPE_DESCRIPTOR,
473
+ CUDNN_BACKEND_RNG_DESCRIPTOR,
474
+ CUDNN_BACKEND_OPERATION_RNG_DESCRIPTOR
475
+ } cudnnBackendDescriptorType_t;
476
+
477
+ typedef enum {
478
+ CUDNN_NUMERICAL_NOTE_TENSOR_CORE = 0,
479
+ CUDNN_NUMERICAL_NOTE_DOWN_CONVERT_INPUTS,
480
+ CUDNN_NUMERICAL_NOTE_REDUCED_PRECISION_REDUCTION,
481
+ CUDNN_NUMERICAL_NOTE_FFT,
482
+ CUDNN_NUMERICAL_NOTE_NONDETERMINISTIC,
483
+ CUDNN_NUMERICAL_NOTE_WINOGRAD,
484
+ CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_4x4,
485
+ CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_6x6,
486
+ CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_13x13,
487
+ CUDNN_NUMERICAL_NOTE_TYPE_COUNT,
488
+ } cudnnBackendNumericalNote_t;
489
+
490
+ typedef enum {
491
+ CUDNN_BEHAVIOR_NOTE_RUNTIME_COMPILATION = 0,
492
+ CUDNN_BEHAVIOR_NOTE_REQUIRES_FILTER_INT8x32_REORDER = 1,
493
+ CUDNN_BEHAVIOR_NOTE_REQUIRES_BIAS_INT8x32_REORDER = 2,
494
+ CUDNN_BEHAVIOR_NOTE_TYPE_COUNT,
495
+ } cudnnBackendBehaviorNote_t;
496
+
497
+ typedef enum {
498
+ CUDNN_KNOB_TYPE_SPLIT_K = 0,
499
+ CUDNN_KNOB_TYPE_SWIZZLE = 1,
500
+ CUDNN_KNOB_TYPE_TILE_SIZE = 2,
501
+ CUDNN_KNOB_TYPE_USE_TEX = 3,
502
+ CUDNN_KNOB_TYPE_EDGE = 4,
503
+ CUDNN_KNOB_TYPE_KBLOCK = 5,
504
+ CUDNN_KNOB_TYPE_LDGA = 6,
505
+ CUDNN_KNOB_TYPE_LDGB = 7,
506
+ CUDNN_KNOB_TYPE_CHUNK_K = 8,
507
+ CUDNN_KNOB_TYPE_SPLIT_H = 9,
508
+ CUDNN_KNOB_TYPE_WINO_TILE = 10,
509
+ CUDNN_KNOB_TYPE_MULTIPLY = 11,
510
+ CUDNN_KNOB_TYPE_SPLIT_K_BUF = 12,
511
+ CUDNN_KNOB_TYPE_TILEK = 13,
512
+ CUDNN_KNOB_TYPE_STAGES = 14,
513
+ CUDNN_KNOB_TYPE_REDUCTION_MODE = 15,
514
+ CUDNN_KNOB_TYPE_CTA_SPLIT_K_MODE = 16,
515
+ CUDNN_KNOB_TYPE_SPLIT_K_SLC = 17,
516
+ CUDNN_KNOB_TYPE_IDX_MODE = 18,
517
+ CUDNN_KNOB_TYPE_SLICED = 19,
518
+ CUDNN_KNOB_TYPE_SPLIT_RS = 20,
519
+ CUDNN_KNOB_TYPE_SINGLEBUFFER = 21,
520
+ CUDNN_KNOB_TYPE_LDGC = 22,
521
+ CUDNN_KNOB_TYPE_SPECFILT = 23,
522
+ CUDNN_KNOB_TYPE_KERNEL_CFG = 24,
523
+ CUDNN_KNOB_TYPE_WORKSPACE = 25,
524
+ CUDNN_KNOB_TYPE_TILE_CGA = 26,
525
+ CUDNN_KNOB_TYPE_TILE_CGA_M = 27,
526
+ CUDNN_KNOB_TYPE_TILE_CGA_N = 28,
527
+ CUDNN_KNOB_TYPE_BLOCK_SIZE = 29,
528
+ CUDNN_KNOB_TYPE_OCCUPANCY = 30,
529
+ CUDNN_KNOB_TYPE_ARRAY_SIZE_PER_THREAD = 31,
530
+ CUDNN_KNOB_TYPE_NUM_C_PER_BLOCK = 32,
531
+ CUDNN_KNOB_TYPE_COUNTS,
532
+ } cudnnBackendKnobType_t;
533
+
534
+ typedef enum {
535
+ CUDNN_LAYOUT_TYPE_PREFERRED_NCHW = 0,
536
+ CUDNN_LAYOUT_TYPE_PREFERRED_NHWC = 1,
537
+ CUDNN_LAYOUT_TYPE_PREFERRED_PAD4CK = 2,
538
+ CUDNN_LAYOUT_TYPE_PREFERRED_PAD8CK = 3,
539
+ CUDNN_LAYOUT_TYPE_COUNT = 4,
540
+ } cudnnBackendLayoutType_t;
541
+
542
+ typedef enum {
543
+ CUDNN_HEUR_MODE_INSTANT = 0,
544
+ CUDNN_HEUR_MODE_B = 1,
545
+ CUDNN_HEUR_MODE_FALLBACK = 2,
546
+ CUDNN_HEUR_MODE_A = 3,
547
+ CUDNN_HEUR_MODES_COUNT = 4,
548
+ } cudnnBackendHeurMode_t;
549
+
550
+ typedef enum {
551
+ CUDNN_TENSOR_REORDERING_NONE = 0,
552
+ CUDNN_TENSOR_REORDERING_INT8x32 = 1,
553
+ CUDNN_TENSOR_REORDERING_F16x16 = 2,
554
+ } cudnnBackendTensorReordering_t;
555
+
556
+ typedef enum {
557
+ CUDNN_ZERO_PAD = 0,
558
+ CUDNN_NEG_INF_PAD = 1,
559
+ CUDNN_EDGE_VAL_PAD = 2,
560
+ } cudnnPaddingMode_t;
561
+
562
+ typedef enum {
563
+ CUDNN_LAYER_NORM = 0,
564
+ CUDNN_INSTANCE_NORM = 1,
565
+ CUDNN_BATCH_NORM = 2,
566
+ CUDNN_GROUP_NORM = 3,
567
+ } cudnnBackendNormMode_t;
568
+
569
+ typedef enum {
570
+ CUDNN_NORM_FWD_INFERENCE = 0,
571
+ CUDNN_NORM_FWD_TRAINING = 1,
572
+ } cudnnBackendNormFwdPhase_t;
573
+
574
+ cudnnStatus_t CUDNNWINAPI
575
+ cudnnBackendCreateDescriptor(cudnnBackendDescriptorType_t descriptorType, cudnnBackendDescriptor_t *descriptor);
576
+
577
+ cudnnStatus_t CUDNNWINAPI
578
+ cudnnBackendDestroyDescriptor(cudnnBackendDescriptor_t descriptor);
579
+
580
+ cudnnStatus_t CUDNNWINAPI
581
+ cudnnBackendInitialize(cudnnBackendDescriptor_t descriptor);
582
+
583
+ cudnnStatus_t CUDNNWINAPI
584
+ cudnnBackendFinalize(cudnnBackendDescriptor_t descriptor);
585
+
586
+ cudnnStatus_t CUDNNWINAPI
587
+ cudnnBackendSetAttribute(cudnnBackendDescriptor_t descriptor,
588
+ cudnnBackendAttributeName_t attributeName,
589
+ cudnnBackendAttributeType_t attributeType,
590
+ int64_t elementCount,
591
+ const void *arrayOfElements);
592
+
593
+ cudnnStatus_t CUDNNWINAPI
594
+ cudnnBackendGetAttribute(cudnnBackendDescriptor_t const descriptor,
595
+ cudnnBackendAttributeName_t attributeName,
596
+ cudnnBackendAttributeType_t attributeType,
597
+ int64_t requestedElementCount,
598
+ int64_t *elementCount,
599
+ void *arrayOfElements);
600
+
601
+ cudnnStatus_t CUDNNWINAPI
602
+ cudnnBackendExecute(cudnnHandle_t handle, cudnnBackendDescriptor_t executionPlan, cudnnBackendDescriptor_t variantPack);
603
+
604
+ #if defined(__cplusplus)
605
+ }
606
+ #endif
607
+
608
+ #endif /* _CUDNN_BACKEND_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer.h ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_cnn_infer : cuDNN's basic definitions and inference CNN functions.
52
+ */
53
+
54
+ #if !defined(CUDNN_CNN_INFER_H_)
55
+ #define CUDNN_CNN_INFER_H_
56
+
57
+ #pragma once
58
+ #include <cuda_runtime.h>
59
+ #include <stdint.h>
60
+
61
+ #include "cudnn_version.h"
62
+ #include "cudnn_ops_infer.h"
63
+
64
+ /* These version numbers are autogenerated, do not edit manually. */
65
+ #define CUDNN_CNN_INFER_MAJOR 8
66
+ #define CUDNN_CNN_INFER_MINOR 9
67
+ #define CUDNN_CNN_INFER_PATCH 2
68
+
69
+ #if (CUDNN_CNN_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_INFER_MINOR != CUDNN_MINOR) || \
70
+ (CUDNN_CNN_INFER_PATCH != CUDNN_PATCHLEVEL)
71
+ #error Version mismatch in cuDNN CNN INFER!!!
72
+ #endif
73
+
74
+ #if defined(__cplusplus)
75
+ extern "C" {
76
+ #endif
77
+
78
+ typedef struct cudnnConvolutionStruct *cudnnConvolutionDescriptor_t;
79
+
80
+ /*
81
+ * convolution mode
82
+ */
83
+ typedef enum { CUDNN_CONVOLUTION = 0, CUDNN_CROSS_CORRELATION = 1 } cudnnConvolutionMode_t;
84
+
85
+ /*
86
+ * CUDNN Reorder
87
+ */
88
+ typedef enum {
89
+ CUDNN_DEFAULT_REORDER = 0,
90
+ CUDNN_NO_REORDER = 1,
91
+ } cudnnReorderType_t;
92
+
93
+ typedef struct cudnnConvolutionFwdAlgoPerfStruct {
94
+ cudnnConvolutionFwdAlgo_t algo;
95
+ cudnnStatus_t status;
96
+ float time;
97
+ size_t memory;
98
+ cudnnDeterminism_t determinism;
99
+ cudnnMathType_t mathType;
100
+ int reserved[3];
101
+ } cudnnConvolutionFwdAlgoPerf_t;
102
+
103
+ /* Create an instance of convolution descriptor */
104
+ cudnnStatus_t CUDNNWINAPI
105
+ cudnnCreateConvolutionDescriptor(cudnnConvolutionDescriptor_t *convDesc);
106
+
107
+ /* Destroy an instance of convolution descriptor */
108
+ cudnnStatus_t CUDNNWINAPI
109
+ cudnnDestroyConvolutionDescriptor(cudnnConvolutionDescriptor_t convDesc);
110
+
111
+ cudnnStatus_t CUDNNWINAPI
112
+ cudnnSetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t mathType);
113
+
114
+ cudnnStatus_t CUDNNWINAPI
115
+ cudnnGetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t *mathType);
116
+
117
+ cudnnStatus_t CUDNNWINAPI
118
+ cudnnSetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int groupCount);
119
+
120
+ cudnnStatus_t CUDNNWINAPI
121
+ cudnnGetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int *groupCount);
122
+
123
+ cudnnStatus_t CUDNNWINAPI
124
+ cudnnSetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t reorderType);
125
+
126
+ cudnnStatus_t CUDNNWINAPI
127
+ cudnnGetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t *reorderType);
128
+
129
+ cudnnStatus_t CUDNNWINAPI
130
+ cudnnSetConvolution2dDescriptor(cudnnConvolutionDescriptor_t convDesc,
131
+ int pad_h, /* zero-padding height */
132
+ int pad_w, /* zero-padding width */
133
+ int u, /* vertical filter stride */
134
+ int v, /* horizontal filter stride */
135
+ int dilation_h, /* filter dilation in the vertical dimension */
136
+ int dilation_w, /* filter dilation in the horizontal dimension */
137
+ cudnnConvolutionMode_t mode,
138
+ cudnnDataType_t computeType);
139
+
140
+ cudnnStatus_t CUDNNWINAPI
141
+ cudnnGetConvolution2dDescriptor(const cudnnConvolutionDescriptor_t convDesc,
142
+ int *pad_h, /* zero-padding height */
143
+ int *pad_w, /* zero-padding width */
144
+ int *u, /* vertical filter stride */
145
+ int *v, /* horizontal filter stride */
146
+ int *dilation_h, /* filter dilation in the vertical dimension */
147
+ int *dilation_w, /* filter dilation in the horizontal dimension */
148
+ cudnnConvolutionMode_t *mode,
149
+ cudnnDataType_t *computeType);
150
+
151
+ cudnnStatus_t CUDNNWINAPI
152
+ cudnnSetConvolutionNdDescriptor(cudnnConvolutionDescriptor_t convDesc,
153
+ int arrayLength, /* nbDims-2 size */
154
+ const int padA[],
155
+ const int filterStrideA[],
156
+ const int dilationA[],
157
+ cudnnConvolutionMode_t mode,
158
+ cudnnDataType_t computeType); /* convolution data type */
159
+
160
+ /* Helper function to return the dimensions of the output tensor given a convolution descriptor */
161
+ cudnnStatus_t CUDNNWINAPI
162
+ cudnnGetConvolutionNdDescriptor(const cudnnConvolutionDescriptor_t convDesc,
163
+ int arrayLengthRequested,
164
+ int *arrayLength,
165
+ int padA[],
166
+ int strideA[],
167
+ int dilationA[],
168
+ cudnnConvolutionMode_t *mode,
169
+ cudnnDataType_t *computeType); /* convolution data type */
170
+
171
+ cudnnStatus_t CUDNNWINAPI
172
+ cudnnGetConvolution2dForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc,
173
+ const cudnnTensorDescriptor_t inputTensorDesc,
174
+ const cudnnFilterDescriptor_t filterDesc,
175
+ int *n,
176
+ int *c,
177
+ int *h,
178
+ int *w);
179
+
180
+ /* Helper function to return the dimensions of the output tensor given a convolution descriptor */
181
+ cudnnStatus_t CUDNNWINAPI
182
+ cudnnGetConvolutionNdForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc,
183
+ const cudnnTensorDescriptor_t inputTensorDesc,
184
+ const cudnnFilterDescriptor_t filterDesc,
185
+ int nbDims,
186
+ int tensorOuputDimA[]);
187
+
188
+ /* helper function to provide the convolution forward algo that fit best the requirement */
189
+ cudnnStatus_t CUDNNWINAPI
190
+ cudnnGetConvolutionForwardAlgorithmMaxCount(cudnnHandle_t handle, int *count);
191
+
192
+ cudnnStatus_t CUDNNWINAPI
193
+ cudnnGetConvolutionForwardAlgorithm_v7(cudnnHandle_t handle,
194
+ const cudnnTensorDescriptor_t srcDesc,
195
+ const cudnnFilterDescriptor_t filterDesc,
196
+ const cudnnConvolutionDescriptor_t convDesc,
197
+ const cudnnTensorDescriptor_t destDesc,
198
+ const int requestedAlgoCount,
199
+ int *returnedAlgoCount,
200
+ cudnnConvolutionFwdAlgoPerf_t *perfResults);
201
+
202
+ cudnnStatus_t CUDNNWINAPI
203
+ cudnnFindConvolutionForwardAlgorithm(cudnnHandle_t handle,
204
+ const cudnnTensorDescriptor_t xDesc,
205
+ const cudnnFilterDescriptor_t wDesc,
206
+ const cudnnConvolutionDescriptor_t convDesc,
207
+ const cudnnTensorDescriptor_t yDesc,
208
+ const int requestedAlgoCount,
209
+ int *returnedAlgoCount,
210
+ cudnnConvolutionFwdAlgoPerf_t *perfResults);
211
+
212
+ cudnnStatus_t CUDNNWINAPI
213
+ cudnnFindConvolutionForwardAlgorithmEx(cudnnHandle_t handle,
214
+ const cudnnTensorDescriptor_t xDesc,
215
+ const void *x,
216
+ const cudnnFilterDescriptor_t wDesc,
217
+ const void *w,
218
+ const cudnnConvolutionDescriptor_t convDesc,
219
+ const cudnnTensorDescriptor_t yDesc,
220
+ void *y,
221
+ const int requestedAlgoCount,
222
+ int *returnedAlgoCount,
223
+ cudnnConvolutionFwdAlgoPerf_t *perfResults,
224
+ void *workSpace,
225
+ size_t workSpaceSizeInBytes);
226
+
227
+ cudnnStatus_t CUDNNWINAPI
228
+ cudnnIm2Col(cudnnHandle_t handle,
229
+ const cudnnTensorDescriptor_t xDesc,
230
+ const void *x,
231
+ const cudnnFilterDescriptor_t wDesc,
232
+ const cudnnConvolutionDescriptor_t convDesc,
233
+ void *colBuffer);
234
+
235
+ cudnnStatus_t CUDNNWINAPI
236
+ cudnnReorderFilterAndBias(cudnnHandle_t handle,
237
+ const cudnnFilterDescriptor_t filterDesc,
238
+ cudnnReorderType_t reorderType,
239
+ const void *filterData,
240
+ void *reorderedFilterData,
241
+ int reorderBias,
242
+ const void *biasData,
243
+ void *reorderedBiasData);
244
+
245
+ /* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/
246
+ cudnnStatus_t CUDNNWINAPI
247
+ cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle_t handle,
248
+ const cudnnTensorDescriptor_t xDesc,
249
+ const cudnnFilterDescriptor_t wDesc,
250
+ const cudnnConvolutionDescriptor_t convDesc,
251
+ const cudnnTensorDescriptor_t yDesc,
252
+ cudnnConvolutionFwdAlgo_t algo,
253
+ size_t *sizeInBytes);
254
+
255
+ /* Convolution functions: All of the form "output = alpha * Op(inputs) + beta * output" */
256
+
257
+ /* Function to perform the forward pass for batch convolution */
258
+ cudnnStatus_t CUDNNWINAPI
259
+ cudnnConvolutionForward(cudnnHandle_t handle,
260
+ const void *alpha,
261
+ const cudnnTensorDescriptor_t xDesc,
262
+ const void *x,
263
+ const cudnnFilterDescriptor_t wDesc,
264
+ const void *w,
265
+ const cudnnConvolutionDescriptor_t convDesc,
266
+ cudnnConvolutionFwdAlgo_t algo,
267
+ void *workSpace,
268
+ size_t workSpaceSizeInBytes,
269
+ const void *beta,
270
+ const cudnnTensorDescriptor_t yDesc,
271
+ void *y);
272
+
273
+ /* Fused conv/bias/activation operation : y = Act( alpha1 * conv(x) + alpha2 * z + bias ) */
274
+ cudnnStatus_t CUDNNWINAPI
275
+ cudnnConvolutionBiasActivationForward(cudnnHandle_t handle,
276
+ const void *alpha1,
277
+ const cudnnTensorDescriptor_t xDesc,
278
+ const void *x,
279
+ const cudnnFilterDescriptor_t wDesc,
280
+ const void *w,
281
+ const cudnnConvolutionDescriptor_t convDesc,
282
+ cudnnConvolutionFwdAlgo_t algo,
283
+ void *workSpace,
284
+ size_t workSpaceSizeInBytes,
285
+ const void *alpha2,
286
+ const cudnnTensorDescriptor_t zDesc,
287
+ const void *z,
288
+ const cudnnTensorDescriptor_t biasDesc,
289
+ const void *bias,
290
+ const cudnnActivationDescriptor_t activationDesc,
291
+ const cudnnTensorDescriptor_t yDesc,
292
+ void *y);
293
+
294
+ /* helper function to provide the convolution backward data algo that fit best the requirement */
295
+
296
+ typedef struct cudnnConvolutionBwdDataAlgoPerfStruct {
297
+ cudnnConvolutionBwdDataAlgo_t algo;
298
+ cudnnStatus_t status;
299
+ float time;
300
+ size_t memory;
301
+ cudnnDeterminism_t determinism;
302
+ cudnnMathType_t mathType;
303
+ int reserved[3];
304
+ } cudnnConvolutionBwdDataAlgoPerf_t;
305
+
306
+ cudnnStatus_t CUDNNWINAPI
307
+ cudnnGetConvolutionBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, int *count);
308
+
309
+ cudnnStatus_t CUDNNWINAPI
310
+ cudnnFindConvolutionBackwardDataAlgorithm(cudnnHandle_t handle,
311
+ const cudnnFilterDescriptor_t wDesc,
312
+ const cudnnTensorDescriptor_t dyDesc,
313
+ const cudnnConvolutionDescriptor_t convDesc,
314
+ const cudnnTensorDescriptor_t dxDesc,
315
+ const int requestedAlgoCount,
316
+ int *returnedAlgoCount,
317
+ cudnnConvolutionBwdDataAlgoPerf_t *perfResults);
318
+
319
+ cudnnStatus_t CUDNNWINAPI
320
+ cudnnFindConvolutionBackwardDataAlgorithmEx(cudnnHandle_t handle,
321
+ const cudnnFilterDescriptor_t wDesc,
322
+ const void *w,
323
+ const cudnnTensorDescriptor_t dyDesc,
324
+ const void *dy,
325
+ const cudnnConvolutionDescriptor_t convDesc,
326
+ const cudnnTensorDescriptor_t dxDesc,
327
+ void *dx,
328
+ const int requestedAlgoCount,
329
+ int *returnedAlgoCount,
330
+ cudnnConvolutionBwdDataAlgoPerf_t *perfResults,
331
+ void *workSpace,
332
+ size_t workSpaceSizeInBytes);
333
+
334
+ cudnnStatus_t CUDNNWINAPI
335
+ cudnnGetConvolutionBackwardDataAlgorithm_v7(cudnnHandle_t handle,
336
+ const cudnnFilterDescriptor_t filterDesc,
337
+ const cudnnTensorDescriptor_t diffDesc,
338
+ const cudnnConvolutionDescriptor_t convDesc,
339
+ const cudnnTensorDescriptor_t gradDesc,
340
+ const int requestedAlgoCount,
341
+ int *returnedAlgoCount,
342
+ cudnnConvolutionBwdDataAlgoPerf_t *perfResults);
343
+
344
+ /*
345
+ * convolution algorithm (which requires potentially some workspace)
346
+ */
347
+
348
+ /* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/
349
+ cudnnStatus_t CUDNNWINAPI
350
+ cudnnGetConvolutionBackwardDataWorkspaceSize(cudnnHandle_t handle,
351
+ const cudnnFilterDescriptor_t wDesc,
352
+ const cudnnTensorDescriptor_t dyDesc,
353
+ const cudnnConvolutionDescriptor_t convDesc,
354
+ const cudnnTensorDescriptor_t dxDesc,
355
+ cudnnConvolutionBwdDataAlgo_t algo,
356
+ size_t *sizeInBytes);
357
+
358
+ cudnnStatus_t CUDNNWINAPI
359
+ cudnnConvolutionBackwardData(cudnnHandle_t handle,
360
+ const void *alpha,
361
+ const cudnnFilterDescriptor_t wDesc,
362
+ const void *w,
363
+ const cudnnTensorDescriptor_t dyDesc,
364
+ const void *dy,
365
+ const cudnnConvolutionDescriptor_t convDesc,
366
+ cudnnConvolutionBwdDataAlgo_t algo,
367
+ void *workSpace,
368
+ size_t workSpaceSizeInBytes,
369
+ const void *beta,
370
+ const cudnnTensorDescriptor_t dxDesc,
371
+ void *dx);
372
+
373
+ /* Helper function to calculate folding descriptors for dgrad */
374
+ cudnnStatus_t CUDNNWINAPI
375
+ cudnnGetFoldedConvBackwardDataDescriptors(const cudnnHandle_t handle,
376
+ const cudnnFilterDescriptor_t filterDesc,
377
+ const cudnnTensorDescriptor_t diffDesc,
378
+ const cudnnConvolutionDescriptor_t convDesc,
379
+ const cudnnTensorDescriptor_t gradDesc,
380
+ const cudnnTensorFormat_t transformFormat,
381
+ cudnnFilterDescriptor_t foldedFilterDesc,
382
+ cudnnTensorDescriptor_t paddedDiffDesc,
383
+ cudnnConvolutionDescriptor_t foldedConvDesc,
384
+ cudnnTensorDescriptor_t foldedGradDesc,
385
+ cudnnTensorTransformDescriptor_t filterFoldTransDesc,
386
+ cudnnTensorTransformDescriptor_t diffPadTransDesc,
387
+ cudnnTensorTransformDescriptor_t gradFoldTransDesc,
388
+ cudnnTensorTransformDescriptor_t gradUnfoldTransDesc);
389
+
390
+ /* cudnnFusedOps... */
391
+ struct cudnnFusedOpsConstParamStruct;
392
+ typedef struct cudnnFusedOpsConstParamStruct *cudnnFusedOpsConstParamPack_t;
393
+
394
+ struct cudnnFusedOpsVariantParamStruct;
395
+ typedef struct cudnnFusedOpsVariantParamStruct *cudnnFusedOpsVariantParamPack_t;
396
+
397
+ struct cudnnFusedOpsPlanStruct;
398
+ typedef struct cudnnFusedOpsPlanStruct *cudnnFusedOpsPlan_t;
399
+
400
+ typedef enum {
401
+ /* each op in [ ] can be disabled by passing NULL ptr */
402
+ /* [per channel scale], [per channel bias], [activation], convolution, [generate BN stats] */
403
+ CUDNN_FUSED_SCALE_BIAS_ACTIVATION_CONV_BNSTATS = 0,
404
+ /* [per channel scale], [per channel bias], [activation], convolutionBackwardWeights */
405
+ CUDNN_FUSED_SCALE_BIAS_ACTIVATION_WGRAD = 1,
406
+ /* utility for BN training in BN-conv fusion */
407
+ /* computes the equivalent scale and bias from ySum ySqSum and learned scale, bias */
408
+ /* optionally update running stats and generate saved stats */
409
+ CUDNN_FUSED_BN_FINALIZE_STATISTICS_TRAINING = 2,
410
+ /* utility for BN inference in BN-conv fusion */
411
+ /* computes the equivalent scale and bias from learned running stats and learned scale, bias */
412
+ CUDNN_FUSED_BN_FINALIZE_STATISTICS_INFERENCE = 3,
413
+ /* reserved for future use: convolution, [per channel scale], [per channel bias], [residual add], [activation] */
414
+ CUDNN_FUSED_CONV_SCALE_BIAS_ADD_ACTIVATION = 4,
415
+ /* reserved for future use: [per channel scale], [per channel bias], [residual add], activation, bitmask */
416
+ CUDNN_FUSED_SCALE_BIAS_ADD_ACTIVATION_GEN_BITMASK = 5,
417
+ /* reserved for future use */
418
+ CUDNN_FUSED_DACTIVATION_FORK_DBATCHNORM = 6,
419
+ } cudnnFusedOps_t;
420
+
421
+ typedef enum {
422
+ /* set XDESC: pass previously initialized cudnnTensorDescriptor_t */
423
+ /* get XDESC: pass previously created cudnnTensorDescriptor_t */
424
+ CUDNN_PARAM_XDESC = 0,
425
+ /* set/get XDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
426
+ CUDNN_PARAM_XDATA_PLACEHOLDER = 1,
427
+ /* set/get BN_MODE: pass cudnnBatchNormMode_t* */
428
+ CUDNN_PARAM_BN_MODE = 2,
429
+ /* set CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */
430
+ /* get CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */
431
+ CUDNN_PARAM_BN_EQSCALEBIAS_DESC = 3,
432
+ /* set/get BN_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
433
+ CUDNN_PARAM_BN_EQSCALE_PLACEHOLDER = 4,
434
+ /* set/get BN_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
435
+ CUDNN_PARAM_BN_EQBIAS_PLACEHOLDER = 5,
436
+ /* set ACTIVATION_DESC: pass previously initialized cudnnActivationDescriptor_t */
437
+ /* get ACTIVATION_DESC: pass previously created cudnnActivationDescriptor_t */
438
+ CUDNN_PARAM_ACTIVATION_DESC = 6,
439
+ /* set CONV_DESC: pass previously initialized cudnnConvolutionDescriptor_t */
440
+ /* get CONV_DESC: pass previously created cudnnConvolutionDescriptor_t */
441
+ CUDNN_PARAM_CONV_DESC = 7,
442
+ /* set WDESC: pass previously initialized cudnnFilterDescriptor_t */
443
+ /* get WDESC: pass previously created cudnnFilterDescriptor_t */
444
+ CUDNN_PARAM_WDESC = 8,
445
+ /* set/get WDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
446
+ CUDNN_PARAM_WDATA_PLACEHOLDER = 9,
447
+ /* set DWDESC: pass previously initialized cudnnFilterDescriptor_t */
448
+ /* get DWDESC: pass previously created cudnnFilterDescriptor_t */
449
+ CUDNN_PARAM_DWDESC = 10,
450
+ /* set/get DWDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
451
+ CUDNN_PARAM_DWDATA_PLACEHOLDER = 11,
452
+ /* set YDESC: pass previously initialized cudnnTensorDescriptor_t */
453
+ /* get YDESC: pass previously created cudnnTensorDescriptor_t */
454
+ CUDNN_PARAM_YDESC = 12,
455
+ /* set/get YDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
456
+ CUDNN_PARAM_YDATA_PLACEHOLDER = 13,
457
+ /* set DYDESC: pass previously initialized cudnnTensorDescriptor_t */
458
+ /* get DYDESC: pass previously created cudnnTensorDescriptor_t */
459
+ CUDNN_PARAM_DYDESC = 14,
460
+ /* set/get DYDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
461
+ CUDNN_PARAM_DYDATA_PLACEHOLDER = 15,
462
+ /* set YSTATS_DESC: pass previously initialized cudnnTensorDescriptor_t */
463
+ /* get YSTATS_DESC: pass previously created cudnnTensorDescriptor_t */
464
+ CUDNN_PARAM_YSTATS_DESC = 16,
465
+ /* set/get YSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
466
+ CUDNN_PARAM_YSUM_PLACEHOLDER = 17,
467
+ /* set/get YSQSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
468
+ CUDNN_PARAM_YSQSUM_PLACEHOLDER = 18,
469
+ /* set CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously initialized cudnnTensorDescriptor_t */
470
+ /* get CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously created cudnnTensorDescriptor_t */
471
+ CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC = 19,
472
+ /* set/get CUDNN_PARAM_BN_SCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
473
+ CUDNN_PARAM_BN_SCALE_PLACEHOLDER = 20,
474
+ /* set/get CUDNN_PARAM_BN_BIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
475
+ CUDNN_PARAM_BN_BIAS_PLACEHOLDER = 21,
476
+ /* set/get CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
477
+ CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER = 22,
478
+ /* set/get CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
479
+ CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER = 23,
480
+ /* set/get CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
481
+ CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER = 24,
482
+ /* set/get CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
483
+ CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER = 25,
484
+
485
+ /* set ZDESC: pass previously initialized cudnnTensorDescriptor_t */
486
+ /* get ZDESC: pass previously created cudnnTensorDescriptor_t */
487
+ CUDNN_PARAM_ZDESC = 26,
488
+ /* set/get ZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
489
+ CUDNN_PARAM_ZDATA_PLACEHOLDER = 27,
490
+ /* set BN_Z_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */
491
+ /* get BN_Z_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */
492
+ CUDNN_PARAM_BN_Z_EQSCALEBIAS_DESC = 28,
493
+ /* set/get BN_Z_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
494
+ CUDNN_PARAM_BN_Z_EQSCALE_PLACEHOLDER = 29,
495
+ /* set/get BN_Z_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
496
+ CUDNN_PARAM_BN_Z_EQBIAS_PLACEHOLDER = 30,
497
+
498
+ /* set ACTIVATION_BITMASK_DESC: pass previously initialized cudnnTensorDescriptor_t */
499
+ /* get ACTIVATION_BITMASK_DESC: pass previously created cudnnTensorDescriptor_t */
500
+ CUDNN_PARAM_ACTIVATION_BITMASK_DESC = 31,
501
+ /* set/get ACTIVATION_BITMASK_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
502
+ CUDNN_PARAM_ACTIVATION_BITMASK_PLACEHOLDER = 32,
503
+
504
+ /* set DXDESC: pass previously initialized cudnnTensorDescriptor_t */
505
+ /* get DXDESC: pass previously created cudnnTensorDescriptor_t */
506
+ CUDNN_PARAM_DXDESC = 33,
507
+ /* set/get DXDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
508
+ CUDNN_PARAM_DXDATA_PLACEHOLDER = 34,
509
+ /* set DZDESC: pass previously initialized cudnnTensorDescriptor_t */
510
+ /* get DZDESC: pass previously created cudnnTensorDescriptor_t */
511
+ CUDNN_PARAM_DZDESC = 35,
512
+ /* set/get DZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
513
+ CUDNN_PARAM_DZDATA_PLACEHOLDER = 36,
514
+ /* set/get CUDNN_PARAM_BN_DSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
515
+ CUDNN_PARAM_BN_DSCALE_PLACEHOLDER = 37,
516
+ /* set/get CUDNN_PARAM_BN_DBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
517
+ CUDNN_PARAM_BN_DBIAS_PLACEHOLDER = 38,
518
+ } cudnnFusedOpsConstParamLabel_t;
519
+
520
+ typedef enum {
521
+ CUDNN_PTR_NULL = 0,
522
+ CUDNN_PTR_ELEM_ALIGNED = 1,
523
+ CUDNN_PTR_16B_ALIGNED = 2,
524
+ } cudnnFusedOpsPointerPlaceHolder_t;
525
+
526
+ typedef enum {
527
+ /* set: pass void* pointing to dev memory */
528
+ /* get: pass void** pointing to host memory */
529
+ CUDNN_PTR_XDATA = 0,
530
+ CUDNN_PTR_BN_EQSCALE = 1,
531
+ CUDNN_PTR_BN_EQBIAS = 2,
532
+ CUDNN_PTR_WDATA = 3,
533
+ CUDNN_PTR_DWDATA = 4,
534
+ CUDNN_PTR_YDATA = 5,
535
+ CUDNN_PTR_DYDATA = 6,
536
+ CUDNN_PTR_YSUM = 7,
537
+ CUDNN_PTR_YSQSUM = 8,
538
+ CUDNN_PTR_WORKSPACE = 9,
539
+ CUDNN_PTR_BN_SCALE = 10,
540
+ CUDNN_PTR_BN_BIAS = 11,
541
+ CUDNN_PTR_BN_SAVED_MEAN = 12,
542
+ CUDNN_PTR_BN_SAVED_INVSTD = 13,
543
+ CUDNN_PTR_BN_RUNNING_MEAN = 14,
544
+ CUDNN_PTR_BN_RUNNING_VAR = 15,
545
+ CUDNN_PTR_ZDATA = 16,
546
+ CUDNN_PTR_BN_Z_EQSCALE = 17,
547
+ CUDNN_PTR_BN_Z_EQBIAS = 18,
548
+ CUDNN_PTR_ACTIVATION_BITMASK = 19,
549
+ CUDNN_PTR_DXDATA = 20,
550
+ CUDNN_PTR_DZDATA = 21,
551
+ CUDNN_PTR_BN_DSCALE = 22,
552
+ CUDNN_PTR_BN_DBIAS = 23,
553
+
554
+ /* set/get: pass size_t* pointing to host memory */
555
+ CUDNN_SCALAR_SIZE_T_WORKSPACE_SIZE_IN_BYTES = 100,
556
+ /* set/get: pass int64_t* pointing to host memory */
557
+ CUDNN_SCALAR_INT64_T_BN_ACCUMULATION_COUNT = 101,
558
+ /* set/get: pass double* pointing to host memory */
559
+ CUDNN_SCALAR_DOUBLE_BN_EXP_AVG_FACTOR = 102,
560
+ /* set/get: pass double* pointing to host memory */
561
+ CUDNN_SCALAR_DOUBLE_BN_EPSILON = 103,
562
+ } cudnnFusedOpsVariantParamLabel_t;
563
+
564
+ cudnnStatus_t CUDNNWINAPI
565
+ cudnnCnnInferVersionCheck(void);
566
+
567
+ #if defined(__cplusplus)
568
+ }
569
+ #endif
570
+
571
+ #endif /* CUDNN_CNN_INFER_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer_v8.h ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_cnn_infer : cuDNN's basic definitions and inference CNN functions.
52
+ */
53
+
54
+ #if !defined(CUDNN_CNN_INFER_H_)
55
+ #define CUDNN_CNN_INFER_H_
56
+
57
+ #pragma once
58
+ #include <cuda_runtime.h>
59
+ #include <stdint.h>
60
+
61
+ #include "cudnn_version.h"
62
+ #include "cudnn_ops_infer.h"
63
+
64
+ /* These version numbers are autogenerated, do not edit manually. */
65
+ #define CUDNN_CNN_INFER_MAJOR 8
66
+ #define CUDNN_CNN_INFER_MINOR 9
67
+ #define CUDNN_CNN_INFER_PATCH 2
68
+
69
+ #if (CUDNN_CNN_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_INFER_MINOR != CUDNN_MINOR) || \
70
+ (CUDNN_CNN_INFER_PATCH != CUDNN_PATCHLEVEL)
71
+ #error Version mismatch in cuDNN CNN INFER!!!
72
+ #endif
73
+
74
+ #if defined(__cplusplus)
75
+ extern "C" {
76
+ #endif
77
+
78
+ typedef struct cudnnConvolutionStruct *cudnnConvolutionDescriptor_t;
79
+
80
+ /*
81
+ * convolution mode
82
+ */
83
+ typedef enum { CUDNN_CONVOLUTION = 0, CUDNN_CROSS_CORRELATION = 1 } cudnnConvolutionMode_t;
84
+
85
+ /*
86
+ * CUDNN Reorder
87
+ */
88
+ typedef enum {
89
+ CUDNN_DEFAULT_REORDER = 0,
90
+ CUDNN_NO_REORDER = 1,
91
+ } cudnnReorderType_t;
92
+
93
+ typedef struct cudnnConvolutionFwdAlgoPerfStruct {
94
+ cudnnConvolutionFwdAlgo_t algo;
95
+ cudnnStatus_t status;
96
+ float time;
97
+ size_t memory;
98
+ cudnnDeterminism_t determinism;
99
+ cudnnMathType_t mathType;
100
+ int reserved[3];
101
+ } cudnnConvolutionFwdAlgoPerf_t;
102
+
103
+ /* Create an instance of convolution descriptor */
104
+ cudnnStatus_t CUDNNWINAPI
105
+ cudnnCreateConvolutionDescriptor(cudnnConvolutionDescriptor_t *convDesc);
106
+
107
+ /* Destroy an instance of convolution descriptor */
108
+ cudnnStatus_t CUDNNWINAPI
109
+ cudnnDestroyConvolutionDescriptor(cudnnConvolutionDescriptor_t convDesc);
110
+
111
+ cudnnStatus_t CUDNNWINAPI
112
+ cudnnSetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t mathType);
113
+
114
+ cudnnStatus_t CUDNNWINAPI
115
+ cudnnGetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t *mathType);
116
+
117
+ cudnnStatus_t CUDNNWINAPI
118
+ cudnnSetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int groupCount);
119
+
120
+ cudnnStatus_t CUDNNWINAPI
121
+ cudnnGetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int *groupCount);
122
+
123
+ cudnnStatus_t CUDNNWINAPI
124
+ cudnnSetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t reorderType);
125
+
126
+ cudnnStatus_t CUDNNWINAPI
127
+ cudnnGetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t *reorderType);
128
+
129
+ cudnnStatus_t CUDNNWINAPI
130
+ cudnnSetConvolution2dDescriptor(cudnnConvolutionDescriptor_t convDesc,
131
+ int pad_h, /* zero-padding height */
132
+ int pad_w, /* zero-padding width */
133
+ int u, /* vertical filter stride */
134
+ int v, /* horizontal filter stride */
135
+ int dilation_h, /* filter dilation in the vertical dimension */
136
+ int dilation_w, /* filter dilation in the horizontal dimension */
137
+ cudnnConvolutionMode_t mode,
138
+ cudnnDataType_t computeType);
139
+
140
+ cudnnStatus_t CUDNNWINAPI
141
+ cudnnGetConvolution2dDescriptor(const cudnnConvolutionDescriptor_t convDesc,
142
+ int *pad_h, /* zero-padding height */
143
+ int *pad_w, /* zero-padding width */
144
+ int *u, /* vertical filter stride */
145
+ int *v, /* horizontal filter stride */
146
+ int *dilation_h, /* filter dilation in the vertical dimension */
147
+ int *dilation_w, /* filter dilation in the horizontal dimension */
148
+ cudnnConvolutionMode_t *mode,
149
+ cudnnDataType_t *computeType);
150
+
151
+ cudnnStatus_t CUDNNWINAPI
152
+ cudnnSetConvolutionNdDescriptor(cudnnConvolutionDescriptor_t convDesc,
153
+ int arrayLength, /* nbDims-2 size */
154
+ const int padA[],
155
+ const int filterStrideA[],
156
+ const int dilationA[],
157
+ cudnnConvolutionMode_t mode,
158
+ cudnnDataType_t computeType); /* convolution data type */
159
+
160
+ /* Helper function to return the dimensions of the output tensor given a convolution descriptor */
161
+ cudnnStatus_t CUDNNWINAPI
162
+ cudnnGetConvolutionNdDescriptor(const cudnnConvolutionDescriptor_t convDesc,
163
+ int arrayLengthRequested,
164
+ int *arrayLength,
165
+ int padA[],
166
+ int strideA[],
167
+ int dilationA[],
168
+ cudnnConvolutionMode_t *mode,
169
+ cudnnDataType_t *computeType); /* convolution data type */
170
+
171
+ cudnnStatus_t CUDNNWINAPI
172
+ cudnnGetConvolution2dForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc,
173
+ const cudnnTensorDescriptor_t inputTensorDesc,
174
+ const cudnnFilterDescriptor_t filterDesc,
175
+ int *n,
176
+ int *c,
177
+ int *h,
178
+ int *w);
179
+
180
+ /* Helper function to return the dimensions of the output tensor given a convolution descriptor */
181
+ cudnnStatus_t CUDNNWINAPI
182
+ cudnnGetConvolutionNdForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc,
183
+ const cudnnTensorDescriptor_t inputTensorDesc,
184
+ const cudnnFilterDescriptor_t filterDesc,
185
+ int nbDims,
186
+ int tensorOuputDimA[]);
187
+
188
+ /* helper function to provide the convolution forward algo that fit best the requirement */
189
+ cudnnStatus_t CUDNNWINAPI
190
+ cudnnGetConvolutionForwardAlgorithmMaxCount(cudnnHandle_t handle, int *count);
191
+
192
+ cudnnStatus_t CUDNNWINAPI
193
+ cudnnGetConvolutionForwardAlgorithm_v7(cudnnHandle_t handle,
194
+ const cudnnTensorDescriptor_t srcDesc,
195
+ const cudnnFilterDescriptor_t filterDesc,
196
+ const cudnnConvolutionDescriptor_t convDesc,
197
+ const cudnnTensorDescriptor_t destDesc,
198
+ const int requestedAlgoCount,
199
+ int *returnedAlgoCount,
200
+ cudnnConvolutionFwdAlgoPerf_t *perfResults);
201
+
202
+ cudnnStatus_t CUDNNWINAPI
203
+ cudnnFindConvolutionForwardAlgorithm(cudnnHandle_t handle,
204
+ const cudnnTensorDescriptor_t xDesc,
205
+ const cudnnFilterDescriptor_t wDesc,
206
+ const cudnnConvolutionDescriptor_t convDesc,
207
+ const cudnnTensorDescriptor_t yDesc,
208
+ const int requestedAlgoCount,
209
+ int *returnedAlgoCount,
210
+ cudnnConvolutionFwdAlgoPerf_t *perfResults);
211
+
212
+ cudnnStatus_t CUDNNWINAPI
213
+ cudnnFindConvolutionForwardAlgorithmEx(cudnnHandle_t handle,
214
+ const cudnnTensorDescriptor_t xDesc,
215
+ const void *x,
216
+ const cudnnFilterDescriptor_t wDesc,
217
+ const void *w,
218
+ const cudnnConvolutionDescriptor_t convDesc,
219
+ const cudnnTensorDescriptor_t yDesc,
220
+ void *y,
221
+ const int requestedAlgoCount,
222
+ int *returnedAlgoCount,
223
+ cudnnConvolutionFwdAlgoPerf_t *perfResults,
224
+ void *workSpace,
225
+ size_t workSpaceSizeInBytes);
226
+
227
+ cudnnStatus_t CUDNNWINAPI
228
+ cudnnIm2Col(cudnnHandle_t handle,
229
+ const cudnnTensorDescriptor_t xDesc,
230
+ const void *x,
231
+ const cudnnFilterDescriptor_t wDesc,
232
+ const cudnnConvolutionDescriptor_t convDesc,
233
+ void *colBuffer);
234
+
235
+ cudnnStatus_t CUDNNWINAPI
236
+ cudnnReorderFilterAndBias(cudnnHandle_t handle,
237
+ const cudnnFilterDescriptor_t filterDesc,
238
+ cudnnReorderType_t reorderType,
239
+ const void *filterData,
240
+ void *reorderedFilterData,
241
+ int reorderBias,
242
+ const void *biasData,
243
+ void *reorderedBiasData);
244
+
245
+ /* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/
246
+ cudnnStatus_t CUDNNWINAPI
247
+ cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle_t handle,
248
+ const cudnnTensorDescriptor_t xDesc,
249
+ const cudnnFilterDescriptor_t wDesc,
250
+ const cudnnConvolutionDescriptor_t convDesc,
251
+ const cudnnTensorDescriptor_t yDesc,
252
+ cudnnConvolutionFwdAlgo_t algo,
253
+ size_t *sizeInBytes);
254
+
255
+ /* Convolution functions: All of the form "output = alpha * Op(inputs) + beta * output" */
256
+
257
+ /* Function to perform the forward pass for batch convolution */
258
+ cudnnStatus_t CUDNNWINAPI
259
+ cudnnConvolutionForward(cudnnHandle_t handle,
260
+ const void *alpha,
261
+ const cudnnTensorDescriptor_t xDesc,
262
+ const void *x,
263
+ const cudnnFilterDescriptor_t wDesc,
264
+ const void *w,
265
+ const cudnnConvolutionDescriptor_t convDesc,
266
+ cudnnConvolutionFwdAlgo_t algo,
267
+ void *workSpace,
268
+ size_t workSpaceSizeInBytes,
269
+ const void *beta,
270
+ const cudnnTensorDescriptor_t yDesc,
271
+ void *y);
272
+
273
+ /* Fused conv/bias/activation operation : y = Act( alpha1 * conv(x) + alpha2 * z + bias ) */
274
+ cudnnStatus_t CUDNNWINAPI
275
+ cudnnConvolutionBiasActivationForward(cudnnHandle_t handle,
276
+ const void *alpha1,
277
+ const cudnnTensorDescriptor_t xDesc,
278
+ const void *x,
279
+ const cudnnFilterDescriptor_t wDesc,
280
+ const void *w,
281
+ const cudnnConvolutionDescriptor_t convDesc,
282
+ cudnnConvolutionFwdAlgo_t algo,
283
+ void *workSpace,
284
+ size_t workSpaceSizeInBytes,
285
+ const void *alpha2,
286
+ const cudnnTensorDescriptor_t zDesc,
287
+ const void *z,
288
+ const cudnnTensorDescriptor_t biasDesc,
289
+ const void *bias,
290
+ const cudnnActivationDescriptor_t activationDesc,
291
+ const cudnnTensorDescriptor_t yDesc,
292
+ void *y);
293
+
294
+ /* helper function to provide the convolution backward data algo that fit best the requirement */
295
+
296
+ typedef struct cudnnConvolutionBwdDataAlgoPerfStruct {
297
+ cudnnConvolutionBwdDataAlgo_t algo;
298
+ cudnnStatus_t status;
299
+ float time;
300
+ size_t memory;
301
+ cudnnDeterminism_t determinism;
302
+ cudnnMathType_t mathType;
303
+ int reserved[3];
304
+ } cudnnConvolutionBwdDataAlgoPerf_t;
305
+
306
+ cudnnStatus_t CUDNNWINAPI
307
+ cudnnGetConvolutionBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, int *count);
308
+
309
+ cudnnStatus_t CUDNNWINAPI
310
+ cudnnFindConvolutionBackwardDataAlgorithm(cudnnHandle_t handle,
311
+ const cudnnFilterDescriptor_t wDesc,
312
+ const cudnnTensorDescriptor_t dyDesc,
313
+ const cudnnConvolutionDescriptor_t convDesc,
314
+ const cudnnTensorDescriptor_t dxDesc,
315
+ const int requestedAlgoCount,
316
+ int *returnedAlgoCount,
317
+ cudnnConvolutionBwdDataAlgoPerf_t *perfResults);
318
+
319
+ cudnnStatus_t CUDNNWINAPI
320
+ cudnnFindConvolutionBackwardDataAlgorithmEx(cudnnHandle_t handle,
321
+ const cudnnFilterDescriptor_t wDesc,
322
+ const void *w,
323
+ const cudnnTensorDescriptor_t dyDesc,
324
+ const void *dy,
325
+ const cudnnConvolutionDescriptor_t convDesc,
326
+ const cudnnTensorDescriptor_t dxDesc,
327
+ void *dx,
328
+ const int requestedAlgoCount,
329
+ int *returnedAlgoCount,
330
+ cudnnConvolutionBwdDataAlgoPerf_t *perfResults,
331
+ void *workSpace,
332
+ size_t workSpaceSizeInBytes);
333
+
334
+ cudnnStatus_t CUDNNWINAPI
335
+ cudnnGetConvolutionBackwardDataAlgorithm_v7(cudnnHandle_t handle,
336
+ const cudnnFilterDescriptor_t filterDesc,
337
+ const cudnnTensorDescriptor_t diffDesc,
338
+ const cudnnConvolutionDescriptor_t convDesc,
339
+ const cudnnTensorDescriptor_t gradDesc,
340
+ const int requestedAlgoCount,
341
+ int *returnedAlgoCount,
342
+ cudnnConvolutionBwdDataAlgoPerf_t *perfResults);
343
+
344
+ /*
345
+ * convolution algorithm (which requires potentially some workspace)
346
+ */
347
+
348
+ /* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/
349
+ cudnnStatus_t CUDNNWINAPI
350
+ cudnnGetConvolutionBackwardDataWorkspaceSize(cudnnHandle_t handle,
351
+ const cudnnFilterDescriptor_t wDesc,
352
+ const cudnnTensorDescriptor_t dyDesc,
353
+ const cudnnConvolutionDescriptor_t convDesc,
354
+ const cudnnTensorDescriptor_t dxDesc,
355
+ cudnnConvolutionBwdDataAlgo_t algo,
356
+ size_t *sizeInBytes);
357
+
358
+ cudnnStatus_t CUDNNWINAPI
359
+ cudnnConvolutionBackwardData(cudnnHandle_t handle,
360
+ const void *alpha,
361
+ const cudnnFilterDescriptor_t wDesc,
362
+ const void *w,
363
+ const cudnnTensorDescriptor_t dyDesc,
364
+ const void *dy,
365
+ const cudnnConvolutionDescriptor_t convDesc,
366
+ cudnnConvolutionBwdDataAlgo_t algo,
367
+ void *workSpace,
368
+ size_t workSpaceSizeInBytes,
369
+ const void *beta,
370
+ const cudnnTensorDescriptor_t dxDesc,
371
+ void *dx);
372
+
373
+ /* Helper function to calculate folding descriptors for dgrad */
374
+ cudnnStatus_t CUDNNWINAPI
375
+ cudnnGetFoldedConvBackwardDataDescriptors(const cudnnHandle_t handle,
376
+ const cudnnFilterDescriptor_t filterDesc,
377
+ const cudnnTensorDescriptor_t diffDesc,
378
+ const cudnnConvolutionDescriptor_t convDesc,
379
+ const cudnnTensorDescriptor_t gradDesc,
380
+ const cudnnTensorFormat_t transformFormat,
381
+ cudnnFilterDescriptor_t foldedFilterDesc,
382
+ cudnnTensorDescriptor_t paddedDiffDesc,
383
+ cudnnConvolutionDescriptor_t foldedConvDesc,
384
+ cudnnTensorDescriptor_t foldedGradDesc,
385
+ cudnnTensorTransformDescriptor_t filterFoldTransDesc,
386
+ cudnnTensorTransformDescriptor_t diffPadTransDesc,
387
+ cudnnTensorTransformDescriptor_t gradFoldTransDesc,
388
+ cudnnTensorTransformDescriptor_t gradUnfoldTransDesc);
389
+
390
+ /* cudnnFusedOps... */
391
+ struct cudnnFusedOpsConstParamStruct;
392
+ typedef struct cudnnFusedOpsConstParamStruct *cudnnFusedOpsConstParamPack_t;
393
+
394
+ struct cudnnFusedOpsVariantParamStruct;
395
+ typedef struct cudnnFusedOpsVariantParamStruct *cudnnFusedOpsVariantParamPack_t;
396
+
397
+ struct cudnnFusedOpsPlanStruct;
398
+ typedef struct cudnnFusedOpsPlanStruct *cudnnFusedOpsPlan_t;
399
+
400
+ typedef enum {
401
+ /* each op in [ ] can be disabled by passing NULL ptr */
402
+ /* [per channel scale], [per channel bias], [activation], convolution, [generate BN stats] */
403
+ CUDNN_FUSED_SCALE_BIAS_ACTIVATION_CONV_BNSTATS = 0,
404
+ /* [per channel scale], [per channel bias], [activation], convolutionBackwardWeights */
405
+ CUDNN_FUSED_SCALE_BIAS_ACTIVATION_WGRAD = 1,
406
+ /* utility for BN training in BN-conv fusion */
407
+ /* computes the equivalent scale and bias from ySum ySqSum and learned scale, bias */
408
+ /* optionally update running stats and generate saved stats */
409
+ CUDNN_FUSED_BN_FINALIZE_STATISTICS_TRAINING = 2,
410
+ /* utility for BN inference in BN-conv fusion */
411
+ /* computes the equivalent scale and bias from learned running stats and learned scale, bias */
412
+ CUDNN_FUSED_BN_FINALIZE_STATISTICS_INFERENCE = 3,
413
+ /* reserved for future use: convolution, [per channel scale], [per channel bias], [residual add], [activation] */
414
+ CUDNN_FUSED_CONV_SCALE_BIAS_ADD_ACTIVATION = 4,
415
+ /* reserved for future use: [per channel scale], [per channel bias], [residual add], activation, bitmask */
416
+ CUDNN_FUSED_SCALE_BIAS_ADD_ACTIVATION_GEN_BITMASK = 5,
417
+ /* reserved for future use */
418
+ CUDNN_FUSED_DACTIVATION_FORK_DBATCHNORM = 6,
419
+ } cudnnFusedOps_t;
420
+
421
+ typedef enum {
422
+ /* set XDESC: pass previously initialized cudnnTensorDescriptor_t */
423
+ /* get XDESC: pass previously created cudnnTensorDescriptor_t */
424
+ CUDNN_PARAM_XDESC = 0,
425
+ /* set/get XDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
426
+ CUDNN_PARAM_XDATA_PLACEHOLDER = 1,
427
+ /* set/get BN_MODE: pass cudnnBatchNormMode_t* */
428
+ CUDNN_PARAM_BN_MODE = 2,
429
+ /* set CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */
430
+ /* get CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */
431
+ CUDNN_PARAM_BN_EQSCALEBIAS_DESC = 3,
432
+ /* set/get BN_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
433
+ CUDNN_PARAM_BN_EQSCALE_PLACEHOLDER = 4,
434
+ /* set/get BN_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
435
+ CUDNN_PARAM_BN_EQBIAS_PLACEHOLDER = 5,
436
+ /* set ACTIVATION_DESC: pass previously initialized cudnnActivationDescriptor_t */
437
+ /* get ACTIVATION_DESC: pass previously created cudnnActivationDescriptor_t */
438
+ CUDNN_PARAM_ACTIVATION_DESC = 6,
439
+ /* set CONV_DESC: pass previously initialized cudnnConvolutionDescriptor_t */
440
+ /* get CONV_DESC: pass previously created cudnnConvolutionDescriptor_t */
441
+ CUDNN_PARAM_CONV_DESC = 7,
442
+ /* set WDESC: pass previously initialized cudnnFilterDescriptor_t */
443
+ /* get WDESC: pass previously created cudnnFilterDescriptor_t */
444
+ CUDNN_PARAM_WDESC = 8,
445
+ /* set/get WDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
446
+ CUDNN_PARAM_WDATA_PLACEHOLDER = 9,
447
+ /* set DWDESC: pass previously initialized cudnnFilterDescriptor_t */
448
+ /* get DWDESC: pass previously created cudnnFilterDescriptor_t */
449
+ CUDNN_PARAM_DWDESC = 10,
450
+ /* set/get DWDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
451
+ CUDNN_PARAM_DWDATA_PLACEHOLDER = 11,
452
+ /* set YDESC: pass previously initialized cudnnTensorDescriptor_t */
453
+ /* get YDESC: pass previously created cudnnTensorDescriptor_t */
454
+ CUDNN_PARAM_YDESC = 12,
455
+ /* set/get YDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
456
+ CUDNN_PARAM_YDATA_PLACEHOLDER = 13,
457
+ /* set DYDESC: pass previously initialized cudnnTensorDescriptor_t */
458
+ /* get DYDESC: pass previously created cudnnTensorDescriptor_t */
459
+ CUDNN_PARAM_DYDESC = 14,
460
+ /* set/get DYDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
461
+ CUDNN_PARAM_DYDATA_PLACEHOLDER = 15,
462
+ /* set YSTATS_DESC: pass previously initialized cudnnTensorDescriptor_t */
463
+ /* get YSTATS_DESC: pass previously created cudnnTensorDescriptor_t */
464
+ CUDNN_PARAM_YSTATS_DESC = 16,
465
+ /* set/get YSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
466
+ CUDNN_PARAM_YSUM_PLACEHOLDER = 17,
467
+ /* set/get YSQSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
468
+ CUDNN_PARAM_YSQSUM_PLACEHOLDER = 18,
469
+ /* set CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously initialized cudnnTensorDescriptor_t */
470
+ /* get CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously created cudnnTensorDescriptor_t */
471
+ CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC = 19,
472
+ /* set/get CUDNN_PARAM_BN_SCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
473
+ CUDNN_PARAM_BN_SCALE_PLACEHOLDER = 20,
474
+ /* set/get CUDNN_PARAM_BN_BIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
475
+ CUDNN_PARAM_BN_BIAS_PLACEHOLDER = 21,
476
+ /* set/get CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
477
+ CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER = 22,
478
+ /* set/get CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
479
+ CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER = 23,
480
+ /* set/get CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
481
+ CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER = 24,
482
+ /* set/get CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
483
+ CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER = 25,
484
+
485
+ /* set ZDESC: pass previously initialized cudnnTensorDescriptor_t */
486
+ /* get ZDESC: pass previously created cudnnTensorDescriptor_t */
487
+ CUDNN_PARAM_ZDESC = 26,
488
+ /* set/get ZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
489
+ CUDNN_PARAM_ZDATA_PLACEHOLDER = 27,
490
+ /* set BN_Z_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */
491
+ /* get BN_Z_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */
492
+ CUDNN_PARAM_BN_Z_EQSCALEBIAS_DESC = 28,
493
+ /* set/get BN_Z_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
494
+ CUDNN_PARAM_BN_Z_EQSCALE_PLACEHOLDER = 29,
495
+ /* set/get BN_Z_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
496
+ CUDNN_PARAM_BN_Z_EQBIAS_PLACEHOLDER = 30,
497
+
498
+ /* set ACTIVATION_BITMASK_DESC: pass previously initialized cudnnTensorDescriptor_t */
499
+ /* get ACTIVATION_BITMASK_DESC: pass previously created cudnnTensorDescriptor_t */
500
+ CUDNN_PARAM_ACTIVATION_BITMASK_DESC = 31,
501
+ /* set/get ACTIVATION_BITMASK_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
502
+ CUDNN_PARAM_ACTIVATION_BITMASK_PLACEHOLDER = 32,
503
+
504
+ /* set DXDESC: pass previously initialized cudnnTensorDescriptor_t */
505
+ /* get DXDESC: pass previously created cudnnTensorDescriptor_t */
506
+ CUDNN_PARAM_DXDESC = 33,
507
+ /* set/get DXDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
508
+ CUDNN_PARAM_DXDATA_PLACEHOLDER = 34,
509
+ /* set DZDESC: pass previously initialized cudnnTensorDescriptor_t */
510
+ /* get DZDESC: pass previously created cudnnTensorDescriptor_t */
511
+ CUDNN_PARAM_DZDESC = 35,
512
+ /* set/get DZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
513
+ CUDNN_PARAM_DZDATA_PLACEHOLDER = 36,
514
+ /* set/get CUDNN_PARAM_BN_DSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
515
+ CUDNN_PARAM_BN_DSCALE_PLACEHOLDER = 37,
516
+ /* set/get CUDNN_PARAM_BN_DBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
517
+ CUDNN_PARAM_BN_DBIAS_PLACEHOLDER = 38,
518
+ } cudnnFusedOpsConstParamLabel_t;
519
+
520
+ typedef enum {
521
+ CUDNN_PTR_NULL = 0,
522
+ CUDNN_PTR_ELEM_ALIGNED = 1,
523
+ CUDNN_PTR_16B_ALIGNED = 2,
524
+ } cudnnFusedOpsPointerPlaceHolder_t;
525
+
526
+ typedef enum {
527
+ /* set: pass void* pointing to dev memory */
528
+ /* get: pass void** pointing to host memory */
529
+ CUDNN_PTR_XDATA = 0,
530
+ CUDNN_PTR_BN_EQSCALE = 1,
531
+ CUDNN_PTR_BN_EQBIAS = 2,
532
+ CUDNN_PTR_WDATA = 3,
533
+ CUDNN_PTR_DWDATA = 4,
534
+ CUDNN_PTR_YDATA = 5,
535
+ CUDNN_PTR_DYDATA = 6,
536
+ CUDNN_PTR_YSUM = 7,
537
+ CUDNN_PTR_YSQSUM = 8,
538
+ CUDNN_PTR_WORKSPACE = 9,
539
+ CUDNN_PTR_BN_SCALE = 10,
540
+ CUDNN_PTR_BN_BIAS = 11,
541
+ CUDNN_PTR_BN_SAVED_MEAN = 12,
542
+ CUDNN_PTR_BN_SAVED_INVSTD = 13,
543
+ CUDNN_PTR_BN_RUNNING_MEAN = 14,
544
+ CUDNN_PTR_BN_RUNNING_VAR = 15,
545
+ CUDNN_PTR_ZDATA = 16,
546
+ CUDNN_PTR_BN_Z_EQSCALE = 17,
547
+ CUDNN_PTR_BN_Z_EQBIAS = 18,
548
+ CUDNN_PTR_ACTIVATION_BITMASK = 19,
549
+ CUDNN_PTR_DXDATA = 20,
550
+ CUDNN_PTR_DZDATA = 21,
551
+ CUDNN_PTR_BN_DSCALE = 22,
552
+ CUDNN_PTR_BN_DBIAS = 23,
553
+
554
+ /* set/get: pass size_t* pointing to host memory */
555
+ CUDNN_SCALAR_SIZE_T_WORKSPACE_SIZE_IN_BYTES = 100,
556
+ /* set/get: pass int64_t* pointing to host memory */
557
+ CUDNN_SCALAR_INT64_T_BN_ACCUMULATION_COUNT = 101,
558
+ /* set/get: pass double* pointing to host memory */
559
+ CUDNN_SCALAR_DOUBLE_BN_EXP_AVG_FACTOR = 102,
560
+ /* set/get: pass double* pointing to host memory */
561
+ CUDNN_SCALAR_DOUBLE_BN_EPSILON = 103,
562
+ } cudnnFusedOpsVariantParamLabel_t;
563
+
564
+ cudnnStatus_t CUDNNWINAPI
565
+ cudnnCnnInferVersionCheck(void);
566
+
567
+ #if defined(__cplusplus)
568
+ }
569
+ #endif
570
+
571
+ #endif /* CUDNN_CNN_INFER_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train.h ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_cnn_train : cuDNN's basic definitions and inference CNN functions.
52
+ */
53
+
54
+ #pragma once
55
+ #include <cuda_runtime.h>
56
+ #include <stdint.h>
57
+
58
+ #include "cudnn_version.h"
59
+ #include "cudnn_ops_infer.h"
60
+ #include "cudnn_ops_train.h"
61
+ #include "cudnn_cnn_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_CNN_TRAIN_MAJOR 8
65
+ #define CUDNN_CNN_TRAIN_MINOR 9
66
+ #define CUDNN_CNN_TRAIN_PATCH 2
67
+
68
+ #if (CUDNN_CNN_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_TRAIN_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_CNN_TRAIN_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN CNN INFER!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* helper function to provide the convolution backward filter algo that fit best the requirement */
78
+
79
+ typedef struct cudnnConvolutionBwdFilterAlgoPerfStruct {
80
+ cudnnConvolutionBwdFilterAlgo_t algo;
81
+ cudnnStatus_t status;
82
+ float time;
83
+ size_t memory;
84
+ cudnnDeterminism_t determinism;
85
+ cudnnMathType_t mathType;
86
+ int reserved[3];
87
+ } cudnnConvolutionBwdFilterAlgoPerf_t;
88
+
89
+ cudnnStatus_t CUDNNWINAPI
90
+ cudnnGetConvolutionBackwardFilterAlgorithmMaxCount(cudnnHandle_t handle, int *count);
91
+
92
+ cudnnStatus_t CUDNNWINAPI
93
+ cudnnFindConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle,
94
+ const cudnnTensorDescriptor_t xDesc,
95
+ const cudnnTensorDescriptor_t dyDesc,
96
+ const cudnnConvolutionDescriptor_t convDesc,
97
+ const cudnnFilterDescriptor_t dwDesc,
98
+ const int requestedAlgoCount,
99
+ int *returnedAlgoCount,
100
+ cudnnConvolutionBwdFilterAlgoPerf_t *perfResults);
101
+
102
+ cudnnStatus_t CUDNNWINAPI
103
+ cudnnFindConvolutionBackwardFilterAlgorithmEx(cudnnHandle_t handle,
104
+ const cudnnTensorDescriptor_t xDesc,
105
+ const void *x,
106
+ const cudnnTensorDescriptor_t dyDesc,
107
+ const void *y,
108
+ const cudnnConvolutionDescriptor_t convDesc,
109
+ const cudnnFilterDescriptor_t dwDesc,
110
+ void *dw,
111
+ const int requestedAlgoCount,
112
+ int *returnedAlgoCount,
113
+ cudnnConvolutionBwdFilterAlgoPerf_t *perfResults,
114
+ void *workSpace,
115
+ size_t workSpaceSizeInBytes);
116
+
117
+ cudnnStatus_t CUDNNWINAPI
118
+ cudnnGetConvolutionBackwardFilterAlgorithm_v7(cudnnHandle_t handle,
119
+ const cudnnTensorDescriptor_t srcDesc,
120
+ const cudnnTensorDescriptor_t diffDesc,
121
+ const cudnnConvolutionDescriptor_t convDesc,
122
+ const cudnnFilterDescriptor_t gradDesc,
123
+ const int requestedAlgoCount,
124
+ int *returnedAlgoCount,
125
+ cudnnConvolutionBwdFilterAlgoPerf_t *perfResults);
126
+
127
+ /*
128
+ * convolution algorithm (which requires potentially some workspace)
129
+ */
130
+
131
+ /* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/
132
+ cudnnStatus_t CUDNNWINAPI
133
+ cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnnHandle_t handle,
134
+ const cudnnTensorDescriptor_t xDesc,
135
+ const cudnnTensorDescriptor_t dyDesc,
136
+ const cudnnConvolutionDescriptor_t convDesc,
137
+ const cudnnFilterDescriptor_t gradDesc,
138
+ cudnnConvolutionBwdFilterAlgo_t algo,
139
+ size_t *sizeInBytes);
140
+
141
+ cudnnStatus_t CUDNNWINAPI
142
+ cudnnConvolutionBackwardFilter(cudnnHandle_t handle,
143
+ const void *alpha,
144
+ const cudnnTensorDescriptor_t xDesc,
145
+ const void *x,
146
+ const cudnnTensorDescriptor_t dyDesc,
147
+ const void *dy,
148
+ const cudnnConvolutionDescriptor_t convDesc,
149
+ cudnnConvolutionBwdFilterAlgo_t algo,
150
+ void *workSpace,
151
+ size_t workSpaceSizeInBytes,
152
+ const void *beta,
153
+ const cudnnFilterDescriptor_t dwDesc,
154
+ void *dw);
155
+
156
+ /* Function to compute the bias gradient for batch convolution */
157
+ cudnnStatus_t CUDNNWINAPI
158
+ cudnnConvolutionBackwardBias(cudnnHandle_t handle,
159
+ const void *alpha,
160
+ const cudnnTensorDescriptor_t dyDesc,
161
+ const void *dy,
162
+ const void *beta,
163
+ const cudnnTensorDescriptor_t dbDesc,
164
+ void *db);
165
+
166
+ cudnnStatus_t CUDNNWINAPI
167
+ cudnnCreateFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t *constPack, cudnnFusedOps_t ops);
168
+
169
+ cudnnStatus_t CUDNNWINAPI
170
+ cudnnDestroyFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t constPack);
171
+
172
+ cudnnStatus_t CUDNNWINAPI
173
+ cudnnSetFusedOpsConstParamPackAttribute(cudnnFusedOpsConstParamPack_t constPack,
174
+ cudnnFusedOpsConstParamLabel_t paramLabel,
175
+ const void *param);
176
+
177
+ cudnnStatus_t CUDNNWINAPI
178
+ cudnnGetFusedOpsConstParamPackAttribute(const cudnnFusedOpsConstParamPack_t constPack,
179
+ cudnnFusedOpsConstParamLabel_t paramLabel,
180
+ void *param,
181
+ int *isNULL);
182
+
183
+ cudnnStatus_t CUDNNWINAPI
184
+ cudnnCreateFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t *varPack, cudnnFusedOps_t ops);
185
+
186
+ cudnnStatus_t CUDNNWINAPI
187
+ cudnnDestroyFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t varPack);
188
+
189
+ cudnnStatus_t CUDNNWINAPI
190
+ cudnnSetFusedOpsVariantParamPackAttribute(cudnnFusedOpsVariantParamPack_t varPack,
191
+ cudnnFusedOpsVariantParamLabel_t paramLabel,
192
+ void *ptr);
193
+
194
+ cudnnStatus_t CUDNNWINAPI
195
+ cudnnGetFusedOpsVariantParamPackAttribute(const cudnnFusedOpsVariantParamPack_t varPack,
196
+ cudnnFusedOpsVariantParamLabel_t paramLabel,
197
+ void *ptr);
198
+
199
+ cudnnStatus_t CUDNNWINAPI
200
+ cudnnCreateFusedOpsPlan(cudnnFusedOpsPlan_t *plan, cudnnFusedOps_t ops);
201
+
202
+ cudnnStatus_t CUDNNWINAPI
203
+ cudnnDestroyFusedOpsPlan(cudnnFusedOpsPlan_t plan);
204
+
205
+ cudnnStatus_t CUDNNWINAPI
206
+ cudnnMakeFusedOpsPlan(cudnnHandle_t handle,
207
+ cudnnFusedOpsPlan_t plan,
208
+ const cudnnFusedOpsConstParamPack_t constPack,
209
+ size_t *workspaceSizeInBytes);
210
+
211
+ cudnnStatus_t CUDNNWINAPI
212
+ cudnnFusedOpsExecute(cudnnHandle_t handle, const cudnnFusedOpsPlan_t plan, cudnnFusedOpsVariantParamPack_t varPack);
213
+
214
+ cudnnStatus_t CUDNNWINAPI
215
+ cudnnCnnTrainVersionCheck(void);
216
+
217
+ #if defined(__cplusplus)
218
+ }
219
+ #endif
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train_v8.h ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_cnn_train : cuDNN's basic definitions and inference CNN functions.
52
+ */
53
+
54
+ #pragma once
55
+ #include <cuda_runtime.h>
56
+ #include <stdint.h>
57
+
58
+ #include "cudnn_version.h"
59
+ #include "cudnn_ops_infer.h"
60
+ #include "cudnn_ops_train.h"
61
+ #include "cudnn_cnn_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_CNN_TRAIN_MAJOR 8
65
+ #define CUDNN_CNN_TRAIN_MINOR 9
66
+ #define CUDNN_CNN_TRAIN_PATCH 2
67
+
68
+ #if (CUDNN_CNN_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_TRAIN_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_CNN_TRAIN_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN CNN INFER!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* helper function to provide the convolution backward filter algo that fit best the requirement */
78
+
79
+ typedef struct cudnnConvolutionBwdFilterAlgoPerfStruct {
80
+ cudnnConvolutionBwdFilterAlgo_t algo;
81
+ cudnnStatus_t status;
82
+ float time;
83
+ size_t memory;
84
+ cudnnDeterminism_t determinism;
85
+ cudnnMathType_t mathType;
86
+ int reserved[3];
87
+ } cudnnConvolutionBwdFilterAlgoPerf_t;
88
+
89
+ cudnnStatus_t CUDNNWINAPI
90
+ cudnnGetConvolutionBackwardFilterAlgorithmMaxCount(cudnnHandle_t handle, int *count);
91
+
92
+ cudnnStatus_t CUDNNWINAPI
93
+ cudnnFindConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle,
94
+ const cudnnTensorDescriptor_t xDesc,
95
+ const cudnnTensorDescriptor_t dyDesc,
96
+ const cudnnConvolutionDescriptor_t convDesc,
97
+ const cudnnFilterDescriptor_t dwDesc,
98
+ const int requestedAlgoCount,
99
+ int *returnedAlgoCount,
100
+ cudnnConvolutionBwdFilterAlgoPerf_t *perfResults);
101
+
102
+ cudnnStatus_t CUDNNWINAPI
103
+ cudnnFindConvolutionBackwardFilterAlgorithmEx(cudnnHandle_t handle,
104
+ const cudnnTensorDescriptor_t xDesc,
105
+ const void *x,
106
+ const cudnnTensorDescriptor_t dyDesc,
107
+ const void *y,
108
+ const cudnnConvolutionDescriptor_t convDesc,
109
+ const cudnnFilterDescriptor_t dwDesc,
110
+ void *dw,
111
+ const int requestedAlgoCount,
112
+ int *returnedAlgoCount,
113
+ cudnnConvolutionBwdFilterAlgoPerf_t *perfResults,
114
+ void *workSpace,
115
+ size_t workSpaceSizeInBytes);
116
+
117
+ cudnnStatus_t CUDNNWINAPI
118
+ cudnnGetConvolutionBackwardFilterAlgorithm_v7(cudnnHandle_t handle,
119
+ const cudnnTensorDescriptor_t srcDesc,
120
+ const cudnnTensorDescriptor_t diffDesc,
121
+ const cudnnConvolutionDescriptor_t convDesc,
122
+ const cudnnFilterDescriptor_t gradDesc,
123
+ const int requestedAlgoCount,
124
+ int *returnedAlgoCount,
125
+ cudnnConvolutionBwdFilterAlgoPerf_t *perfResults);
126
+
127
+ /*
128
+ * convolution algorithm (which requires potentially some workspace)
129
+ */
130
+
131
+ /* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/
132
+ cudnnStatus_t CUDNNWINAPI
133
+ cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnnHandle_t handle,
134
+ const cudnnTensorDescriptor_t xDesc,
135
+ const cudnnTensorDescriptor_t dyDesc,
136
+ const cudnnConvolutionDescriptor_t convDesc,
137
+ const cudnnFilterDescriptor_t gradDesc,
138
+ cudnnConvolutionBwdFilterAlgo_t algo,
139
+ size_t *sizeInBytes);
140
+
141
+ cudnnStatus_t CUDNNWINAPI
142
+ cudnnConvolutionBackwardFilter(cudnnHandle_t handle,
143
+ const void *alpha,
144
+ const cudnnTensorDescriptor_t xDesc,
145
+ const void *x,
146
+ const cudnnTensorDescriptor_t dyDesc,
147
+ const void *dy,
148
+ const cudnnConvolutionDescriptor_t convDesc,
149
+ cudnnConvolutionBwdFilterAlgo_t algo,
150
+ void *workSpace,
151
+ size_t workSpaceSizeInBytes,
152
+ const void *beta,
153
+ const cudnnFilterDescriptor_t dwDesc,
154
+ void *dw);
155
+
156
+ /* Function to compute the bias gradient for batch convolution */
157
+ cudnnStatus_t CUDNNWINAPI
158
+ cudnnConvolutionBackwardBias(cudnnHandle_t handle,
159
+ const void *alpha,
160
+ const cudnnTensorDescriptor_t dyDesc,
161
+ const void *dy,
162
+ const void *beta,
163
+ const cudnnTensorDescriptor_t dbDesc,
164
+ void *db);
165
+
166
+ cudnnStatus_t CUDNNWINAPI
167
+ cudnnCreateFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t *constPack, cudnnFusedOps_t ops);
168
+
169
+ cudnnStatus_t CUDNNWINAPI
170
+ cudnnDestroyFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t constPack);
171
+
172
+ cudnnStatus_t CUDNNWINAPI
173
+ cudnnSetFusedOpsConstParamPackAttribute(cudnnFusedOpsConstParamPack_t constPack,
174
+ cudnnFusedOpsConstParamLabel_t paramLabel,
175
+ const void *param);
176
+
177
+ cudnnStatus_t CUDNNWINAPI
178
+ cudnnGetFusedOpsConstParamPackAttribute(const cudnnFusedOpsConstParamPack_t constPack,
179
+ cudnnFusedOpsConstParamLabel_t paramLabel,
180
+ void *param,
181
+ int *isNULL);
182
+
183
+ cudnnStatus_t CUDNNWINAPI
184
+ cudnnCreateFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t *varPack, cudnnFusedOps_t ops);
185
+
186
+ cudnnStatus_t CUDNNWINAPI
187
+ cudnnDestroyFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t varPack);
188
+
189
+ cudnnStatus_t CUDNNWINAPI
190
+ cudnnSetFusedOpsVariantParamPackAttribute(cudnnFusedOpsVariantParamPack_t varPack,
191
+ cudnnFusedOpsVariantParamLabel_t paramLabel,
192
+ void *ptr);
193
+
194
+ cudnnStatus_t CUDNNWINAPI
195
+ cudnnGetFusedOpsVariantParamPackAttribute(const cudnnFusedOpsVariantParamPack_t varPack,
196
+ cudnnFusedOpsVariantParamLabel_t paramLabel,
197
+ void *ptr);
198
+
199
+ cudnnStatus_t CUDNNWINAPI
200
+ cudnnCreateFusedOpsPlan(cudnnFusedOpsPlan_t *plan, cudnnFusedOps_t ops);
201
+
202
+ cudnnStatus_t CUDNNWINAPI
203
+ cudnnDestroyFusedOpsPlan(cudnnFusedOpsPlan_t plan);
204
+
205
+ cudnnStatus_t CUDNNWINAPI
206
+ cudnnMakeFusedOpsPlan(cudnnHandle_t handle,
207
+ cudnnFusedOpsPlan_t plan,
208
+ const cudnnFusedOpsConstParamPack_t constPack,
209
+ size_t *workspaceSizeInBytes);
210
+
211
+ cudnnStatus_t CUDNNWINAPI
212
+ cudnnFusedOpsExecute(cudnnHandle_t handle, const cudnnFusedOpsPlan_t plan, cudnnFusedOpsVariantParamPack_t varPack);
213
+
214
+ cudnnStatus_t CUDNNWINAPI
215
+ cudnnCnnTrainVersionCheck(void);
216
+
217
+ #if defined(__cplusplus)
218
+ }
219
+ #endif
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer.h ADDED
@@ -0,0 +1,1183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_ops_infer : cuDNN's basic definitions and inference operations.
52
+ */
53
+
54
+ #if !defined(CUDNN_OPS_INFER_H_)
55
+ #define CUDNN_OPS_INFER_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+
62
+ /* These version numbers are autogenerated, do not edit manually. */
63
+ #define CUDNN_OPS_INFER_MAJOR 8
64
+ #define CUDNN_OPS_INFER_MINOR 9
65
+ #define CUDNN_OPS_INFER_PATCH 2
66
+
67
+ #if (CUDNN_OPS_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_INFER_MINOR != CUDNN_MINOR) || \
68
+ (CUDNN_OPS_INFER_PATCH != CUDNN_PATCHLEVEL)
69
+ #error Version mismatch in cuDNN OPS INFER!!!
70
+ #endif
71
+
72
+ #ifndef CUDNNWINAPI
73
+ #ifdef _WIN32
74
+ #define CUDNNWINAPI __stdcall
75
+ #else
76
+ #define CUDNNWINAPI
77
+ #endif
78
+ #endif
79
+
80
+ /* Warnings for deprecated API-s are enabled using the CUDNN_WARN_DEPRECATED macro */
81
+ #if defined(CUDNN_WARN_DEPRECATED) && (defined(__GNUC__) || defined(__clang__))
82
+ /* GCC, Intel C/C++, Cray C/C++, CLANG, IBM XL C/C++ little endian */
83
+ #define CUDNN_DEPRECATED __attribute__((deprecated))
84
+ #elif defined(CUDNN_WARN_DEPRECATED) && defined(_MSC_VER)
85
+ /* Microsoft Visual C++ */
86
+ #define CUDNN_DEPRECATED __declspec(deprecated)
87
+ #elif defined(CUDNN_WARN_DEPRECATED) && (__cplusplus >= 201402L)
88
+ /* C++14 compilers */
89
+ #define CUDNN_DEPRECATED [[deprecated]]
90
+ #else
91
+ /* No support for the deprecated attribute */
92
+ #define CUDNN_DEPRECATED
93
+ #endif
94
+
95
+ #if defined(__cplusplus)
96
+ extern "C" {
97
+ #endif
98
+
99
+ struct cudnnContext;
100
+ typedef struct cudnnContext *cudnnHandle_t;
101
+
102
+ size_t CUDNNWINAPI
103
+ cudnnGetVersion(void);
104
+
105
+ size_t CUDNNWINAPI
106
+ cudnnGetMaxDeviceVersion(void);
107
+
108
+ /* Returns CUDA Runtime version statically linked against cudnn */
109
+ size_t CUDNNWINAPI
110
+ cudnnGetCudartVersion(void);
111
+
112
+ /*
113
+ * CUDNN return codes
114
+ */
115
+ typedef enum {
116
+ CUDNN_STATUS_SUCCESS = 0,
117
+ CUDNN_STATUS_NOT_INITIALIZED = 1,
118
+ CUDNN_STATUS_ALLOC_FAILED = 2,
119
+ CUDNN_STATUS_BAD_PARAM = 3,
120
+ CUDNN_STATUS_INTERNAL_ERROR = 4,
121
+ CUDNN_STATUS_INVALID_VALUE = 5,
122
+ CUDNN_STATUS_ARCH_MISMATCH = 6,
123
+ CUDNN_STATUS_MAPPING_ERROR = 7,
124
+ CUDNN_STATUS_EXECUTION_FAILED = 8,
125
+ CUDNN_STATUS_NOT_SUPPORTED = 9,
126
+ CUDNN_STATUS_LICENSE_ERROR = 10,
127
+ CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING = 11,
128
+ CUDNN_STATUS_RUNTIME_IN_PROGRESS = 12,
129
+ CUDNN_STATUS_RUNTIME_FP_OVERFLOW = 13,
130
+ CUDNN_STATUS_VERSION_MISMATCH = 14,
131
+ } cudnnStatus_t;
132
+
133
+ /* human-readable error messages */
134
+ const char *CUDNNWINAPI
135
+ cudnnGetErrorString(cudnnStatus_t status);
136
+
137
+ /* Forward definition in this version only */
138
+ typedef struct cudnnRuntimeTag_t cudnnRuntimeTag_t;
139
+
140
+ typedef enum {
141
+ CUDNN_ERRQUERY_RAWCODE = 0,
142
+ CUDNN_ERRQUERY_NONBLOCKING = 1,
143
+ CUDNN_ERRQUERY_BLOCKING = 2,
144
+ } cudnnErrQueryMode_t;
145
+
146
+ cudnnStatus_t CUDNNWINAPI
147
+ cudnnQueryRuntimeError(cudnnHandle_t handle, cudnnStatus_t *rstatus, cudnnErrQueryMode_t mode, cudnnRuntimeTag_t *tag);
148
+
149
+ #ifndef __LIBRARY_TYPES_H__
150
+
151
+ typedef enum libraryPropertyType_t { MAJOR_VERSION, MINOR_VERSION, PATCH_LEVEL } libraryPropertyType;
152
+
153
+ #endif
154
+
155
+ cudnnStatus_t CUDNNWINAPI
156
+ cudnnGetProperty(libraryPropertyType type, int *value);
157
+
158
+ cudnnStatus_t CUDNNWINAPI
159
+ cudnnCreate(cudnnHandle_t *handle);
160
+ cudnnStatus_t CUDNNWINAPI
161
+ cudnnDestroy(cudnnHandle_t handle);
162
+ cudnnStatus_t CUDNNWINAPI
163
+ cudnnSetStream(cudnnHandle_t handle, cudaStream_t streamId);
164
+ cudnnStatus_t CUDNNWINAPI
165
+ cudnnGetStream(cudnnHandle_t handle, cudaStream_t *streamId);
166
+
167
+ /* Data structures to represent Image/Filter and the Neural Network Layer */
168
+ typedef struct cudnnTensorStruct *cudnnTensorDescriptor_t;
169
+ typedef struct cudnnPoolingStruct *cudnnPoolingDescriptor_t;
170
+ typedef struct cudnnFilterStruct *cudnnFilterDescriptor_t;
171
+ typedef struct cudnnLRNStruct *cudnnLRNDescriptor_t;
172
+ typedef struct cudnnActivationStruct *cudnnActivationDescriptor_t;
173
+ typedef struct cudnnSpatialTransformerStruct *cudnnSpatialTransformerDescriptor_t;
174
+ typedef struct cudnnOpTensorStruct *cudnnOpTensorDescriptor_t;
175
+ typedef struct cudnnReduceTensorStruct *cudnnReduceTensorDescriptor_t;
176
+ typedef struct cudnnCTCLossStruct *cudnnCTCLossDescriptor_t;
177
+ typedef struct cudnnTensorTransformStruct *cudnnTensorTransformDescriptor_t;
178
+ /*
179
+ * CUDNN data type
180
+ */
181
+ typedef enum {
182
+ CUDNN_DATA_FLOAT = 0,
183
+ CUDNN_DATA_DOUBLE = 1,
184
+ CUDNN_DATA_HALF = 2,
185
+ CUDNN_DATA_INT8 = 3,
186
+ CUDNN_DATA_INT32 = 4,
187
+ CUDNN_DATA_INT8x4 = 5,
188
+ CUDNN_DATA_UINT8 = 6,
189
+ CUDNN_DATA_UINT8x4 = 7,
190
+ CUDNN_DATA_INT8x32 = 8,
191
+ CUDNN_DATA_BFLOAT16 = 9,
192
+ CUDNN_DATA_INT64 = 10,
193
+ CUDNN_DATA_BOOLEAN = 11,
194
+ CUDNN_DATA_FP8_E4M3 = 12,
195
+ CUDNN_DATA_FP8_E5M2 = 13,
196
+ CUDNN_DATA_FAST_FLOAT_FOR_FP8 = 14,
197
+ } cudnnDataType_t;
198
+
199
+ /*
200
+ * CUDNN math type
201
+ */
202
+ typedef enum {
203
+ CUDNN_DEFAULT_MATH = 0,
204
+ CUDNN_TENSOR_OP_MATH = 1,
205
+ CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION = 2,
206
+ CUDNN_FMA_MATH = 3,
207
+ } cudnnMathType_t;
208
+
209
+ /*
210
+ * CUDNN propagate Nan
211
+ */
212
+ typedef enum {
213
+ CUDNN_NOT_PROPAGATE_NAN = 0,
214
+ CUDNN_PROPAGATE_NAN = 1,
215
+ } cudnnNanPropagation_t;
216
+
217
+ /*
218
+ * CUDNN Determinism
219
+ */
220
+ typedef enum {
221
+ CUDNN_NON_DETERMINISTIC = 0,
222
+ CUDNN_DETERMINISTIC = 1,
223
+ } cudnnDeterminism_t;
224
+
225
+ /* Maximum supported number of tensor dimensions */
226
+ #define CUDNN_DIM_MAX 8
227
+
228
+ /* Create an instance of a generic Tensor descriptor */
229
+ cudnnStatus_t CUDNNWINAPI
230
+ cudnnCreateTensorDescriptor(cudnnTensorDescriptor_t *tensorDesc);
231
+
232
+ typedef enum {
233
+ CUDNN_TENSOR_NCHW = 0, /* row major (wStride = 1, hStride = w) */
234
+ CUDNN_TENSOR_NHWC = 1, /* feature maps interleaved ( cStride = 1 )*/
235
+ CUDNN_TENSOR_NCHW_VECT_C = 2, /* each image point is vector of element of C, vector length in data type */
236
+ } cudnnTensorFormat_t;
237
+
238
+ cudnnStatus_t CUDNNWINAPI
239
+ cudnnSetTensor4dDescriptor(cudnnTensorDescriptor_t tensorDesc,
240
+ cudnnTensorFormat_t format,
241
+ cudnnDataType_t dataType, /* image data type */
242
+ int n, /* number of inputs (batch size) */
243
+ int c, /* number of input feature maps */
244
+ int h, /* height of input section */
245
+ int w); /* width of input section */
246
+
247
+ cudnnStatus_t CUDNNWINAPI
248
+ cudnnSetTensor4dDescriptorEx(cudnnTensorDescriptor_t tensorDesc,
249
+ cudnnDataType_t dataType, /* image data type */
250
+ int n, /* number of inputs (batch size) */
251
+ int c, /* number of input feature maps */
252
+ int h, /* height of input section */
253
+ int w, /* width of input section */
254
+ int nStride,
255
+ int cStride,
256
+ int hStride,
257
+ int wStride);
258
+
259
+ cudnnStatus_t CUDNNWINAPI
260
+ cudnnGetTensor4dDescriptor(const cudnnTensorDescriptor_t tensorDesc,
261
+ cudnnDataType_t *dataType, /* image data type */
262
+ int *n, /* number of inputs (batch size) */
263
+ int *c, /* number of input feature maps */
264
+ int *h, /* height of input section */
265
+ int *w, /* width of input section */
266
+ int *nStride,
267
+ int *cStride,
268
+ int *hStride,
269
+ int *wStride);
270
+
271
+ cudnnStatus_t CUDNNWINAPI
272
+ cudnnSetTensorNdDescriptor(cudnnTensorDescriptor_t tensorDesc,
273
+ cudnnDataType_t dataType,
274
+ int nbDims,
275
+ const int dimA[],
276
+ const int strideA[]);
277
+
278
+ cudnnStatus_t CUDNNWINAPI
279
+ cudnnSetTensorNdDescriptorEx(cudnnTensorDescriptor_t tensorDesc,
280
+ cudnnTensorFormat_t format,
281
+ cudnnDataType_t dataType,
282
+ int nbDims,
283
+ const int dimA[]);
284
+
285
+ cudnnStatus_t CUDNNWINAPI
286
+ cudnnGetTensorNdDescriptor(const cudnnTensorDescriptor_t tensorDesc,
287
+ int nbDimsRequested,
288
+ cudnnDataType_t *dataType,
289
+ int *nbDims,
290
+ int dimA[],
291
+ int strideA[]);
292
+
293
+ cudnnStatus_t CUDNNWINAPI
294
+ cudnnGetTensorSizeInBytes(const cudnnTensorDescriptor_t tensorDesc, size_t *size);
295
+
296
+ /* PixelOffset( n, c, h, w ) = n *input_stride + c * feature_stride + h * h_stride + w * w_stride
297
+
298
+ 1)Example of all images in row major order one batch of features after the other (with an optional padding on row)
299
+ input_stride : c x h x h_stride
300
+ feature_stride : h x h_stride
301
+ h_stride : >= w ( h_stride = w if no padding)
302
+ w_stride : 1
303
+
304
+
305
+ 2)Example of all images in row major with features maps interleaved
306
+ input_stride : c x h x h_stride
307
+ feature_stride : 1
308
+ h_stride : w x c
309
+ w_stride : c
310
+
311
+ 3)Example of all images in column major order one batch of features after the other (with optional padding on column)
312
+ input_stride : c x w x w_stride
313
+ feature_stride : w x w_stride
314
+ h_stride : 1
315
+ w_stride : >= h
316
+
317
+ */
318
+
319
+ /* Destroy an instance of Tensor4d descriptor */
320
+ cudnnStatus_t CUDNNWINAPI
321
+ cudnnDestroyTensorDescriptor(cudnnTensorDescriptor_t tensorDesc);
322
+
323
+ /* Fold/unfold transforms */
324
+ typedef enum {
325
+ CUDNN_TRANSFORM_FOLD = 0U,
326
+ CUDNN_TRANSFORM_UNFOLD = 1U,
327
+ } cudnnFoldingDirection_t;
328
+
329
+ /** Create a destination descriptor for cudnnTransformTensor */
330
+ cudnnStatus_t CUDNNWINAPI
331
+ cudnnInitTransformDest(const cudnnTensorTransformDescriptor_t transformDesc,
332
+ const cudnnTensorDescriptor_t srcDesc,
333
+ cudnnTensorDescriptor_t destDesc,
334
+ size_t *destSizeInBytes);
335
+
336
+ /** Create an empty tensor transform descriptor */
337
+ cudnnStatus_t CUDNNWINAPI
338
+ cudnnCreateTensorTransformDescriptor(cudnnTensorTransformDescriptor_t *transformDesc);
339
+
340
+ /** Initialize a previously created tensor transform descriptor. */
341
+ cudnnStatus_t CUDNNWINAPI
342
+ cudnnSetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc,
343
+ const uint32_t nbDims,
344
+ const cudnnTensorFormat_t destFormat,
345
+ const int32_t padBeforeA[],
346
+ const int32_t padAfterA[],
347
+ const uint32_t foldA[],
348
+ const cudnnFoldingDirection_t direction);
349
+
350
+ /**
351
+ * Retrieves the values stored in a previously initialized tensor transform
352
+ * descriptor.
353
+ */
354
+ cudnnStatus_t CUDNNWINAPI
355
+ cudnnGetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc,
356
+ uint32_t nbDimsRequested,
357
+ cudnnTensorFormat_t *destFormat,
358
+ int32_t padBeforeA[],
359
+ int32_t padAfterA[],
360
+ uint32_t foldA[],
361
+ cudnnFoldingDirection_t *direction);
362
+
363
+ /**
364
+ * Destroys a previously created tensor transform descriptor.
365
+ */
366
+ cudnnStatus_t CUDNNWINAPI
367
+ cudnnDestroyTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc);
368
+
369
+ /* Tensor layout conversion helper (y = alpha * x + beta * y) */
370
+ cudnnStatus_t CUDNNWINAPI
371
+ cudnnTransformTensor(cudnnHandle_t handle,
372
+ const void *alpha,
373
+ const cudnnTensorDescriptor_t xDesc,
374
+ const void *x,
375
+ const void *beta,
376
+ const cudnnTensorDescriptor_t yDesc,
377
+ void *y);
378
+
379
+ cudnnStatus_t CUDNNWINAPI
380
+ cudnnTransformTensorEx(cudnnHandle_t handle,
381
+ const cudnnTensorTransformDescriptor_t transDesc,
382
+ const void *alpha,
383
+ const cudnnTensorDescriptor_t srcDesc,
384
+ const void *srcData,
385
+ const void *beta,
386
+ const cudnnTensorDescriptor_t destDesc,
387
+ void *destData);
388
+
389
+ /* Tensor Bias addition : C = alpha * A + beta * C */
390
+ cudnnStatus_t CUDNNWINAPI
391
+ cudnnAddTensor(cudnnHandle_t handle,
392
+ const void *alpha,
393
+ const cudnnTensorDescriptor_t aDesc,
394
+ const void *A,
395
+ const void *beta,
396
+ const cudnnTensorDescriptor_t cDesc,
397
+ void *C);
398
+
399
+ /*
400
+ * CUDNN OpTensor op type
401
+ */
402
+ typedef enum {
403
+ CUDNN_OP_TENSOR_ADD = 0,
404
+ CUDNN_OP_TENSOR_MUL = 1,
405
+ CUDNN_OP_TENSOR_MIN = 2,
406
+ CUDNN_OP_TENSOR_MAX = 3,
407
+ CUDNN_OP_TENSOR_SQRT = 4,
408
+ CUDNN_OP_TENSOR_NOT = 5,
409
+ } cudnnOpTensorOp_t;
410
+
411
+ cudnnStatus_t CUDNNWINAPI
412
+ cudnnCreateOpTensorDescriptor(cudnnOpTensorDescriptor_t *opTensorDesc);
413
+
414
+ cudnnStatus_t CUDNNWINAPI
415
+ cudnnSetOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc,
416
+ cudnnOpTensorOp_t opTensorOp,
417
+ cudnnDataType_t opTensorCompType,
418
+ cudnnNanPropagation_t opTensorNanOpt);
419
+
420
+ cudnnStatus_t CUDNNWINAPI
421
+ cudnnGetOpTensorDescriptor(const cudnnOpTensorDescriptor_t opTensorDesc,
422
+ cudnnOpTensorOp_t *opTensorOp,
423
+ cudnnDataType_t *opTensorCompType,
424
+ cudnnNanPropagation_t *opTensorNanOpt);
425
+
426
+ cudnnStatus_t CUDNNWINAPI
427
+ cudnnDestroyOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc);
428
+
429
+ /* Tensor operation : C = op( alpha1 * A, alpha2 * B ) + beta * C */
430
+ /* B tensor is ignored for CUDNN_OP_TENSOR_SQRT, CUDNN_OP_TENSOR_NOT. */
431
+ cudnnStatus_t CUDNNWINAPI
432
+ cudnnOpTensor(cudnnHandle_t handle,
433
+ const cudnnOpTensorDescriptor_t opTensorDesc,
434
+ const void *alpha1,
435
+ const cudnnTensorDescriptor_t aDesc,
436
+ const void *A,
437
+ const void *alpha2,
438
+ const cudnnTensorDescriptor_t bDesc,
439
+ const void *B,
440
+ const void *beta,
441
+ const cudnnTensorDescriptor_t cDesc,
442
+ void *C);
443
+
444
+ /*
445
+ * CUDNN ReduceTensor op type
446
+ */
447
+ typedef enum {
448
+ CUDNN_REDUCE_TENSOR_ADD = 0,
449
+ CUDNN_REDUCE_TENSOR_MUL = 1,
450
+ CUDNN_REDUCE_TENSOR_MIN = 2,
451
+ CUDNN_REDUCE_TENSOR_MAX = 3,
452
+ CUDNN_REDUCE_TENSOR_AMAX = 4,
453
+ CUDNN_REDUCE_TENSOR_AVG = 5,
454
+ CUDNN_REDUCE_TENSOR_NORM1 = 6,
455
+ CUDNN_REDUCE_TENSOR_NORM2 = 7,
456
+ CUDNN_REDUCE_TENSOR_MUL_NO_ZEROS = 8,
457
+ } cudnnReduceTensorOp_t;
458
+
459
+ /*
460
+ * CUDNN ReduceTensor indices type
461
+ */
462
+ typedef enum {
463
+ CUDNN_REDUCE_TENSOR_NO_INDICES = 0,
464
+ CUDNN_REDUCE_TENSOR_FLATTENED_INDICES = 1,
465
+ } cudnnReduceTensorIndices_t;
466
+
467
+ /*
468
+ * CUDNN tensor indices type size (all unsigned)
469
+ * Currently not supported, default is 32 bit unsigned.
470
+ */
471
+ typedef enum {
472
+ CUDNN_32BIT_INDICES = 0,
473
+ CUDNN_64BIT_INDICES = 1,
474
+ CUDNN_16BIT_INDICES = 2,
475
+ CUDNN_8BIT_INDICES = 3,
476
+ } cudnnIndicesType_t;
477
+
478
+ cudnnStatus_t CUDNNWINAPI
479
+ cudnnCreateReduceTensorDescriptor(cudnnReduceTensorDescriptor_t *reduceTensorDesc);
480
+
481
+ cudnnStatus_t CUDNNWINAPI
482
+ cudnnSetReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc,
483
+ cudnnReduceTensorOp_t reduceTensorOp,
484
+ cudnnDataType_t reduceTensorCompType,
485
+ cudnnNanPropagation_t reduceTensorNanOpt,
486
+ cudnnReduceTensorIndices_t reduceTensorIndices,
487
+ cudnnIndicesType_t reduceTensorIndicesType);
488
+
489
+ cudnnStatus_t CUDNNWINAPI
490
+ cudnnGetReduceTensorDescriptor(const cudnnReduceTensorDescriptor_t reduceTensorDesc,
491
+ cudnnReduceTensorOp_t *reduceTensorOp,
492
+ cudnnDataType_t *reduceTensorCompType,
493
+ cudnnNanPropagation_t *reduceTensorNanOpt,
494
+ cudnnReduceTensorIndices_t *reduceTensorIndices,
495
+ cudnnIndicesType_t *reduceTensorIndicesType);
496
+
497
+ cudnnStatus_t CUDNNWINAPI
498
+ cudnnDestroyReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc);
499
+
500
+ /* Helper function to return the minimum size of the index space to be passed to the reduction given the input and
501
+ * output tensors */
502
+ cudnnStatus_t CUDNNWINAPI
503
+ cudnnGetReductionIndicesSize(cudnnHandle_t handle,
504
+ const cudnnReduceTensorDescriptor_t reduceTensorDesc,
505
+ const cudnnTensorDescriptor_t aDesc,
506
+ const cudnnTensorDescriptor_t cDesc,
507
+ size_t *sizeInBytes);
508
+
509
+ /* Helper function to return the minimum size of the workspace to be passed to the reduction given the input and output
510
+ * tensors */
511
+ cudnnStatus_t CUDNNWINAPI
512
+ cudnnGetReductionWorkspaceSize(cudnnHandle_t handle,
513
+ const cudnnReduceTensorDescriptor_t reduceTensorDesc,
514
+ const cudnnTensorDescriptor_t aDesc,
515
+ const cudnnTensorDescriptor_t cDesc,
516
+ size_t *sizeInBytes);
517
+
518
+ /* Tensor operation : C = reduce op( alpha * A ) + beta * C */
519
+ /* The NaN propagation enum applies to only the min and max reduce ops; the other reduce ops propagate NaN as usual. */
520
+ /* The indices space is ignored for reduce ops other than min or max. */
521
+ cudnnStatus_t CUDNNWINAPI
522
+ cudnnReduceTensor(cudnnHandle_t handle,
523
+ const cudnnReduceTensorDescriptor_t reduceTensorDesc,
524
+ void *indices,
525
+ size_t indicesSizeInBytes,
526
+ void *workspace,
527
+ size_t workspaceSizeInBytes,
528
+ const void *alpha,
529
+ const cudnnTensorDescriptor_t aDesc,
530
+ const void *A,
531
+ const void *beta,
532
+ const cudnnTensorDescriptor_t cDesc,
533
+ void *C);
534
+
535
+ /* Set all values of a tensor to a given value : y[i] = value[0] */
536
+ cudnnStatus_t CUDNNWINAPI
537
+ cudnnSetTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *valuePtr);
538
+
539
+ /* Scale all values of a tensor by a given factor : y[i] = alpha * y[i] */
540
+ cudnnStatus_t CUDNNWINAPI
541
+ cudnnScaleTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *alpha);
542
+
543
+ /* Create an instance of FilterStruct */
544
+ cudnnStatus_t CUDNNWINAPI
545
+ cudnnCreateFilterDescriptor(cudnnFilterDescriptor_t *filterDesc);
546
+
547
+ cudnnStatus_t CUDNNWINAPI
548
+ cudnnSetFilter4dDescriptor(cudnnFilterDescriptor_t filterDesc,
549
+ cudnnDataType_t dataType, /* image data type */
550
+ cudnnTensorFormat_t format,
551
+ int k, /* number of output feature maps */
552
+ int c, /* number of input feature maps */
553
+ int h, /* height of each input filter */
554
+ int w); /* width of each input filter */
555
+
556
+ cudnnStatus_t CUDNNWINAPI
557
+ cudnnGetFilter4dDescriptor(const cudnnFilterDescriptor_t filterDesc,
558
+ cudnnDataType_t *dataType, /* image data type */
559
+ cudnnTensorFormat_t *format,
560
+ int *k, /* number of output feature maps */
561
+ int *c, /* number of input feature maps */
562
+ int *h, /* height of each input filter */
563
+ int *w); /* width of each input filter */
564
+
565
+ cudnnStatus_t CUDNNWINAPI
566
+ cudnnSetFilterNdDescriptor(cudnnFilterDescriptor_t filterDesc,
567
+ cudnnDataType_t dataType, /* image data type */
568
+ cudnnTensorFormat_t format,
569
+ int nbDims,
570
+ const int filterDimA[]);
571
+
572
+ cudnnStatus_t CUDNNWINAPI
573
+ cudnnGetFilterNdDescriptor(const cudnnFilterDescriptor_t filterDesc,
574
+ int nbDimsRequested,
575
+ cudnnDataType_t *dataType, /* image data type */
576
+ cudnnTensorFormat_t *format,
577
+ int *nbDims,
578
+ int filterDimA[]);
579
+ cudnnStatus_t CUDNNWINAPI
580
+ cudnnGetFilterSizeInBytes(const cudnnFilterDescriptor_t filterDesc, size_t *size);
581
+
582
+ cudnnStatus_t CUDNNWINAPI
583
+ cudnnTransformFilter(cudnnHandle_t handle,
584
+ const cudnnTensorTransformDescriptor_t transDesc,
585
+ const void *alpha,
586
+ const cudnnFilterDescriptor_t srcDesc,
587
+ const void *srcData,
588
+ const void *beta,
589
+ const cudnnFilterDescriptor_t destDesc,
590
+ void *destData);
591
+
592
+ cudnnStatus_t CUDNNWINAPI
593
+ cudnnDestroyFilterDescriptor(cudnnFilterDescriptor_t filterDesc);
594
+
595
+ /*
596
+ * softmax algorithm
597
+ */
598
+ typedef enum {
599
+ CUDNN_SOFTMAX_FAST = 0, /* straightforward implementation */
600
+ CUDNN_SOFTMAX_ACCURATE = 1, /* subtract max from every point to avoid overflow */
601
+ CUDNN_SOFTMAX_LOG = 2
602
+ } cudnnSoftmaxAlgorithm_t;
603
+
604
+ typedef enum {
605
+ CUDNN_SOFTMAX_MODE_INSTANCE = 0, /* compute the softmax over all C, H, W for each N */
606
+ CUDNN_SOFTMAX_MODE_CHANNEL = 1 /* compute the softmax over all C for each H, W, N */
607
+ } cudnnSoftmaxMode_t;
608
+
609
+ /* Softmax functions: All of the form "output = alpha * Op(inputs) + beta * output" */
610
+
611
+ /* Function to perform forward softmax */
612
+ cudnnStatus_t CUDNNWINAPI
613
+ cudnnSoftmaxForward(cudnnHandle_t handle,
614
+ cudnnSoftmaxAlgorithm_t algo,
615
+ cudnnSoftmaxMode_t mode,
616
+ const void *alpha,
617
+ const cudnnTensorDescriptor_t xDesc,
618
+ const void *x,
619
+ const void *beta,
620
+ const cudnnTensorDescriptor_t yDesc,
621
+ void *y);
622
+
623
+ /*
624
+ * pooling mode
625
+ */
626
+ typedef enum {
627
+ CUDNN_POOLING_MAX = 0,
628
+ CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING = 1, /* count for average includes padded values */
629
+ CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING = 2, /* count for average does not include padded values */
630
+ CUDNN_POOLING_MAX_DETERMINISTIC = 3
631
+ } cudnnPoolingMode_t;
632
+
633
+ /* Create an instance of pooling descriptor */
634
+ cudnnStatus_t CUDNNWINAPI
635
+ cudnnCreatePoolingDescriptor(cudnnPoolingDescriptor_t *poolingDesc);
636
+
637
+ cudnnStatus_t CUDNNWINAPI
638
+ cudnnSetPooling2dDescriptor(cudnnPoolingDescriptor_t poolingDesc,
639
+ cudnnPoolingMode_t mode,
640
+ cudnnNanPropagation_t maxpoolingNanOpt,
641
+ int windowHeight,
642
+ int windowWidth,
643
+ int verticalPadding,
644
+ int horizontalPadding,
645
+ int verticalStride,
646
+ int horizontalStride);
647
+
648
+ cudnnStatus_t CUDNNWINAPI
649
+ cudnnGetPooling2dDescriptor(const cudnnPoolingDescriptor_t poolingDesc,
650
+ cudnnPoolingMode_t *mode,
651
+ cudnnNanPropagation_t *maxpoolingNanOpt,
652
+ int *windowHeight,
653
+ int *windowWidth,
654
+ int *verticalPadding,
655
+ int *horizontalPadding,
656
+ int *verticalStride,
657
+ int *horizontalStride);
658
+
659
+ cudnnStatus_t CUDNNWINAPI
660
+ cudnnSetPoolingNdDescriptor(cudnnPoolingDescriptor_t poolingDesc,
661
+ const cudnnPoolingMode_t mode,
662
+ const cudnnNanPropagation_t maxpoolingNanOpt,
663
+ int nbDims,
664
+ const int windowDimA[],
665
+ const int paddingA[],
666
+ const int strideA[]);
667
+
668
+ cudnnStatus_t CUDNNWINAPI
669
+ cudnnGetPoolingNdDescriptor(const cudnnPoolingDescriptor_t poolingDesc,
670
+ int nbDimsRequested,
671
+ cudnnPoolingMode_t *mode,
672
+ cudnnNanPropagation_t *maxpoolingNanOpt,
673
+ int *nbDims,
674
+ int windowDimA[],
675
+ int paddingA[],
676
+ int strideA[]);
677
+
678
+ cudnnStatus_t CUDNNWINAPI
679
+ cudnnGetPoolingNdForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc,
680
+ const cudnnTensorDescriptor_t inputTensorDesc,
681
+ int nbDims,
682
+ int outputTensorDimA[]);
683
+
684
+ cudnnStatus_t CUDNNWINAPI
685
+ cudnnGetPooling2dForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc,
686
+ const cudnnTensorDescriptor_t inputTensorDesc,
687
+ int *n,
688
+ int *c,
689
+ int *h,
690
+ int *w);
691
+
692
+ /* Destroy an instance of pooling descriptor */
693
+ cudnnStatus_t CUDNNWINAPI
694
+ cudnnDestroyPoolingDescriptor(cudnnPoolingDescriptor_t poolingDesc);
695
+
696
+ /* Pooling functions: All of the form "output = alpha * Op(inputs) + beta * output" */
697
+
698
+ /* Function to perform forward pooling */
699
+ cudnnStatus_t CUDNNWINAPI
700
+ cudnnPoolingForward(cudnnHandle_t handle,
701
+ const cudnnPoolingDescriptor_t poolingDesc,
702
+ const void *alpha,
703
+ const cudnnTensorDescriptor_t xDesc,
704
+ const void *x,
705
+ const void *beta,
706
+ const cudnnTensorDescriptor_t yDesc,
707
+ void *y);
708
+
709
+ /*
710
+ * activation mode
711
+ */
712
+ typedef enum {
713
+ CUDNN_ACTIVATION_SIGMOID = 0,
714
+ CUDNN_ACTIVATION_RELU = 1,
715
+ CUDNN_ACTIVATION_TANH = 2,
716
+ CUDNN_ACTIVATION_CLIPPED_RELU = 3,
717
+ CUDNN_ACTIVATION_ELU = 4,
718
+ CUDNN_ACTIVATION_IDENTITY = 5,
719
+ CUDNN_ACTIVATION_SWISH = 6
720
+ } cudnnActivationMode_t;
721
+
722
+ /* Activation functions: All of the form "output = alpha * Op(inputs) + beta * output" */
723
+ cudnnStatus_t CUDNNWINAPI
724
+ cudnnCreateActivationDescriptor(cudnnActivationDescriptor_t *activationDesc);
725
+
726
+ cudnnStatus_t CUDNNWINAPI
727
+ cudnnSetActivationDescriptor(cudnnActivationDescriptor_t activationDesc,
728
+ cudnnActivationMode_t mode,
729
+ cudnnNanPropagation_t reluNanOpt,
730
+ double coef); /* ceiling for clipped RELU, alpha for ELU */
731
+
732
+ cudnnStatus_t CUDNNWINAPI
733
+ cudnnGetActivationDescriptor(const cudnnActivationDescriptor_t activationDesc,
734
+ cudnnActivationMode_t *mode,
735
+ cudnnNanPropagation_t *reluNanOpt,
736
+ double *coef); /* ceiling for clipped RELU, alpha for ELU */
737
+
738
+ cudnnStatus_t CUDNNWINAPI
739
+ cudnnSetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double swish_beta);
740
+
741
+ cudnnStatus_t CUDNNWINAPI
742
+ cudnnGetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double *swish_beta);
743
+
744
+ cudnnStatus_t CUDNNWINAPI
745
+ cudnnDestroyActivationDescriptor(cudnnActivationDescriptor_t activationDesc);
746
+
747
+ /* Function to perform forward activation */
748
+ cudnnStatus_t CUDNNWINAPI
749
+ cudnnActivationForward(cudnnHandle_t handle,
750
+ cudnnActivationDescriptor_t activationDesc,
751
+ const void *alpha,
752
+ const cudnnTensorDescriptor_t xDesc,
753
+ const void *x,
754
+ const void *beta,
755
+ const cudnnTensorDescriptor_t yDesc,
756
+ void *y);
757
+
758
+ /*
759
+ * Create an instance of LRN (Local Response Normalization) descriptor
760
+ * Uses lrnN=5, lrnAlpha=1e-4, lrnBeta=0.75, lrnK=2.0 as defaults from Krizhevsky'12 ImageNet paper
761
+ */
762
+ cudnnStatus_t CUDNNWINAPI
763
+ cudnnCreateLRNDescriptor(cudnnLRNDescriptor_t *normDesc);
764
+
765
+ #define CUDNN_LRN_MIN_N 1 /* minimum allowed lrnN */
766
+ #define CUDNN_LRN_MAX_N 16 /* maximum allowed lrnN */
767
+ #define CUDNN_LRN_MIN_K 1e-5 /* minimum allowed lrnK */
768
+ #define CUDNN_LRN_MIN_BETA 0.01 /* minimum allowed lrnBeta */
769
+
770
+ /* LRN layer mode */
771
+ typedef enum {
772
+ CUDNN_LRN_CROSS_CHANNEL_DIM1 = 0, /* Normalize across tensor's dimA[1] dimension */
773
+ } cudnnLRNMode_t;
774
+
775
+ /*
776
+ * Uses a window [center-lookBehind, center+lookAhead], where
777
+ * lookBehind = floor( (lrnN-1)/2 ), lookAhead = lrnN-lookBehind-1.
778
+ * Values of double parameters cast to tensor data type.
779
+ */
780
+ cudnnStatus_t CUDNNWINAPI
781
+ cudnnSetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned lrnN, double lrnAlpha, double lrnBeta, double lrnK);
782
+ /*
783
+ * Retrieve the settings currently stored in an LRN layer descriptor
784
+ * Any of the provided pointers can be NULL (no corresponding value will be returned)
785
+ */
786
+ cudnnStatus_t CUDNNWINAPI
787
+ cudnnGetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned *lrnN, double *lrnAlpha, double *lrnBeta, double *lrnK);
788
+
789
+ /* Destroy an instance of LRN descriptor */
790
+ cudnnStatus_t CUDNNWINAPI
791
+ cudnnDestroyLRNDescriptor(cudnnLRNDescriptor_t lrnDesc);
792
+
793
+ /* LRN functions: output = alpha * normalize(x) + beta * old_y */
794
+
795
+ /* LRN cross-channel forward computation. Double parameters cast to tensor data type */
796
+ cudnnStatus_t CUDNNWINAPI
797
+ cudnnLRNCrossChannelForward(cudnnHandle_t handle,
798
+ cudnnLRNDescriptor_t normDesc,
799
+ cudnnLRNMode_t lrnMode,
800
+ const void *alpha,
801
+ const cudnnTensorDescriptor_t xDesc,
802
+ const void *x,
803
+ const void *beta,
804
+ const cudnnTensorDescriptor_t yDesc,
805
+ void *y);
806
+
807
+ typedef enum {
808
+ CUDNN_DIVNORM_PRECOMPUTED_MEANS = 0,
809
+ } cudnnDivNormMode_t;
810
+
811
+ /* LCN/divisive normalization functions: y = alpha * normalize(x) + beta * y */
812
+ cudnnStatus_t CUDNNWINAPI
813
+ cudnnDivisiveNormalizationForward(cudnnHandle_t handle,
814
+ cudnnLRNDescriptor_t normDesc,
815
+ cudnnDivNormMode_t mode,
816
+ const void *alpha,
817
+ const cudnnTensorDescriptor_t xDesc, /* same desc for means, temp, temp2 */
818
+ const void *x,
819
+ const void *means, /* if NULL, means are assumed to be zero */
820
+ void *temp,
821
+ void *temp2,
822
+ const void *beta,
823
+ const cudnnTensorDescriptor_t yDesc,
824
+ void *y);
825
+
826
+ typedef enum {
827
+ /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */
828
+ CUDNN_BATCHNORM_PER_ACTIVATION = 0,
829
+
830
+ /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */
831
+ CUDNN_BATCHNORM_SPATIAL = 1,
832
+
833
+ /*
834
+ * bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors).
835
+ * May be faster than CUDNN_BATCHNORM_SPATIAL but imposes some limits on the range of values
836
+ */
837
+ CUDNN_BATCHNORM_SPATIAL_PERSISTENT = 2,
838
+ } cudnnBatchNormMode_t;
839
+
840
+ #define CUDNN_BN_MIN_EPSILON 0.0 /* Minimum epsilon allowed to be used in the Batch Normalization formula */
841
+
842
+ /*
843
+ * Derives a tensor descriptor from layer data descriptor for BatchNormalization
844
+ * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for
845
+ * bnScaleBiasMeanVarDesc and bnScaleBiasDiffDesc in Batch Normalization forward and backward functions.
846
+ */
847
+ cudnnStatus_t CUDNNWINAPI
848
+ cudnnDeriveBNTensorDescriptor(cudnnTensorDescriptor_t derivedBnDesc,
849
+ const cudnnTensorDescriptor_t xDesc,
850
+ cudnnBatchNormMode_t mode);
851
+
852
+ typedef enum {
853
+ CUDNN_BATCHNORM_OPS_BN = 0, /* do batch normalization only */
854
+ CUDNN_BATCHNORM_OPS_BN_ACTIVATION = 1, /* do batchNorm, then activation */
855
+ CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION = 2, /* do batchNorm, then elemWiseAdd, then activation */
856
+ } cudnnBatchNormOps_t;
857
+
858
+ /*
859
+ * Performs Batch Normalization during Inference:
860
+ * y[i] = bnScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + bnBias[k]
861
+ * with bnScale, bnBias, runningMean, runningInvVariance tensors indexed
862
+ * according to spatial or per-activation mode. Refer to cudnnBatchNormalizationForwardTraining
863
+ * above for notes on function arguments.
864
+ */
865
+ cudnnStatus_t CUDNNWINAPI
866
+ cudnnBatchNormalizationForwardInference(cudnnHandle_t handle,
867
+ cudnnBatchNormMode_t mode,
868
+ const void *alpha, /* alpha[0] = result blend factor */
869
+ const void *beta, /* beta[0] = dest layer blend factor */
870
+ const cudnnTensorDescriptor_t xDesc,
871
+ const void *x, /* NxCxHxW */
872
+ const cudnnTensorDescriptor_t yDesc,
873
+ void *y, /* NxCxHxW */
874
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
875
+ const void *bnScale,
876
+ const void *bnBias,
877
+ const void *estimatedMean,
878
+ const void *estimatedVariance,
879
+ double epsilon);
880
+
881
+ typedef enum {
882
+ /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */
883
+ CUDNN_NORM_PER_ACTIVATION = 0,
884
+
885
+ /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */
886
+ CUDNN_NORM_PER_CHANNEL = 1,
887
+ } cudnnNormMode_t;
888
+
889
+ typedef enum { CUDNN_NORM_ALGO_STANDARD = 0, CUDNN_NORM_ALGO_PERSIST = 1 } cudnnNormAlgo_t;
890
+
891
+ /*
892
+ * Derives a tensor descriptor from layer data descriptor for Normalization
893
+ * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for
894
+ * normScaleBiasMeanVarDesc and normScaleBiasDiffDesc in Normalization forward and backward functions.
895
+ */
896
+ cudnnStatus_t CUDNNWINAPI
897
+ cudnnDeriveNormTensorDescriptor(cudnnTensorDescriptor_t derivedNormScaleBiasDesc,
898
+ cudnnTensorDescriptor_t derivedNormMeanVarDesc,
899
+ const cudnnTensorDescriptor_t xDesc,
900
+ cudnnNormMode_t mode,
901
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
902
+
903
+ typedef enum {
904
+ CUDNN_NORM_OPS_NORM = 0, /* do normalization only */
905
+ CUDNN_NORM_OPS_NORM_ACTIVATION = 1, /* do Norm, then activation */
906
+ CUDNN_NORM_OPS_NORM_ADD_ACTIVATION = 2, /* do Norm, then elemWiseAdd, then activation */
907
+ } cudnnNormOps_t;
908
+
909
+ /*
910
+ * Performs Normalization during Inference:
911
+ * y[i] = normScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + normBias[k]
912
+ * with normScale, normBias, runningMean, runningInvVariance tensors indexed
913
+ * according to per-channel or per-activation mode. Refer to cudnnNormalizationForwardTraining
914
+ * above for notes on function arguments.
915
+ */
916
+ cudnnStatus_t CUDNNWINAPI
917
+ cudnnNormalizationForwardInference(cudnnHandle_t handle,
918
+ cudnnNormMode_t mode,
919
+ cudnnNormOps_t normOps,
920
+ cudnnNormAlgo_t algo,
921
+ const void *alpha, /* alpha[0] = result blend factor */
922
+ const void *beta, /* beta[0] = dest layer blend factor */
923
+ const cudnnTensorDescriptor_t xDesc,
924
+ const void *x, /* NxCxHxW */
925
+ const cudnnTensorDescriptor_t normScaleBiasDesc,
926
+ const void *normScale,
927
+ const void *normBias,
928
+ const cudnnTensorDescriptor_t normMeanVarDesc,
929
+ const void *estimatedMean,
930
+ const void *estimatedVariance,
931
+ const cudnnTensorDescriptor_t zDesc,
932
+ const void *z,
933
+ cudnnActivationDescriptor_t activationDesc,
934
+ const cudnnTensorDescriptor_t yDesc,
935
+ void *y, /* NxCxHxW */
936
+ double epsilon,
937
+ int groupCnt); /* Place hold for future work*/
938
+
939
+ /* APIs for spatial transformer network*/
940
+ typedef enum {
941
+ CUDNN_SAMPLER_BILINEAR = 0,
942
+ } cudnnSamplerType_t;
943
+
944
+ cudnnStatus_t CUDNNWINAPI
945
+ cudnnCreateSpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t *stDesc);
946
+
947
+ cudnnStatus_t CUDNNWINAPI
948
+ cudnnSetSpatialTransformerNdDescriptor(cudnnSpatialTransformerDescriptor_t stDesc,
949
+ cudnnSamplerType_t samplerType,
950
+ cudnnDataType_t dataType,
951
+ const int nbDims,
952
+ const int dimA[]);
953
+
954
+ cudnnStatus_t CUDNNWINAPI
955
+ cudnnDestroySpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t stDesc);
956
+
957
+ cudnnStatus_t CUDNNWINAPI
958
+ cudnnSpatialTfGridGeneratorForward(cudnnHandle_t handle,
959
+ const cudnnSpatialTransformerDescriptor_t stDesc,
960
+ const void *theta,
961
+ void *grid);
962
+
963
+ cudnnStatus_t CUDNNWINAPI
964
+ cudnnSpatialTfSamplerForward(cudnnHandle_t handle,
965
+ cudnnSpatialTransformerDescriptor_t stDesc,
966
+ const void *alpha,
967
+ const cudnnTensorDescriptor_t xDesc,
968
+ const void *x,
969
+ const void *grid,
970
+ const void *beta,
971
+ cudnnTensorDescriptor_t yDesc,
972
+ void *y);
973
+
974
+ typedef struct cudnnDropoutStruct *cudnnDropoutDescriptor_t;
975
+
976
+ cudnnStatus_t CUDNNWINAPI
977
+ cudnnCreateDropoutDescriptor(cudnnDropoutDescriptor_t *dropoutDesc);
978
+
979
+ cudnnStatus_t CUDNNWINAPI
980
+ cudnnDestroyDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc);
981
+
982
+ /*helper function to determine size of the states to be passed to cudnnSetDropoutDescriptor */
983
+ cudnnStatus_t CUDNNWINAPI
984
+ cudnnDropoutGetStatesSize(cudnnHandle_t handle, size_t *sizeInBytes);
985
+
986
+ /*helper function to determine size of the reserve space to be passed to dropout forward/backward calls */
987
+ cudnnStatus_t CUDNNWINAPI
988
+ cudnnDropoutGetReserveSpaceSize(cudnnTensorDescriptor_t xdesc, size_t *sizeInBytes);
989
+
990
+ cudnnStatus_t CUDNNWINAPI
991
+ cudnnSetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc,
992
+ cudnnHandle_t handle,
993
+ float dropout,
994
+ void *states,
995
+ size_t stateSizeInBytes,
996
+ unsigned long long seed);
997
+
998
+ /* Restores the dropout descriptor to a previously saved-off state */
999
+ cudnnStatus_t CUDNNWINAPI
1000
+ cudnnRestoreDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc,
1001
+ cudnnHandle_t handle,
1002
+ float dropout,
1003
+ void *states,
1004
+ size_t stateSizeInBytes,
1005
+ unsigned long long seed);
1006
+
1007
+ cudnnStatus_t CUDNNWINAPI
1008
+ cudnnGetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc,
1009
+ cudnnHandle_t handle,
1010
+ float *dropout,
1011
+ void **states,
1012
+ unsigned long long *seed);
1013
+
1014
+ cudnnStatus_t CUDNNWINAPI
1015
+ cudnnDropoutForward(cudnnHandle_t handle,
1016
+ const cudnnDropoutDescriptor_t dropoutDesc,
1017
+ const cudnnTensorDescriptor_t xdesc,
1018
+ const void *x,
1019
+ const cudnnTensorDescriptor_t ydesc,
1020
+ void *y,
1021
+ void *reserveSpace,
1022
+ size_t reserveSpaceSizeInBytes);
1023
+
1024
+ /* TODO: remove */
1025
+
1026
+ typedef struct cudnnAlgorithmStruct *cudnnAlgorithmDescriptor_t;
1027
+ typedef struct cudnnAlgorithmPerformanceStruct *cudnnAlgorithmPerformance_t;
1028
+
1029
+ /* TODO: move these enums out to the appropriate submodule */
1030
+ typedef enum {
1031
+ CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = 0,
1032
+ CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM = 1,
1033
+ CUDNN_CONVOLUTION_FWD_ALGO_GEMM = 2,
1034
+ CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = 3,
1035
+ CUDNN_CONVOLUTION_FWD_ALGO_FFT = 4,
1036
+ CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING = 5,
1037
+ CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD = 6,
1038
+ CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED = 7,
1039
+ CUDNN_CONVOLUTION_FWD_ALGO_COUNT = 8
1040
+ } cudnnConvolutionFwdAlgo_t;
1041
+
1042
+ typedef enum {
1043
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = 0, /* non-deterministic */
1044
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 = 1,
1045
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT = 2,
1046
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3 = 3, /* non-deterministic */
1047
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD = 4, /* not implemented */
1048
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED = 5,
1049
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING = 6,
1050
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT = 7
1051
+ } cudnnConvolutionBwdFilterAlgo_t;
1052
+
1053
+ typedef enum {
1054
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = 0, /* non-deterministic */
1055
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 = 1,
1056
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT = 2,
1057
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING = 3,
1058
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD = 4,
1059
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED = 5,
1060
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT = 6
1061
+ } cudnnConvolutionBwdDataAlgo_t;
1062
+
1063
+ typedef enum {
1064
+ CUDNN_RNN_ALGO_STANDARD = 0,
1065
+ CUDNN_RNN_ALGO_PERSIST_STATIC = 1,
1066
+ CUDNN_RNN_ALGO_PERSIST_DYNAMIC = 2,
1067
+ CUDNN_RNN_ALGO_PERSIST_STATIC_SMALL_H = 3,
1068
+ CUDNN_RNN_ALGO_COUNT = 4,
1069
+ } cudnnRNNAlgo_t;
1070
+
1071
+ typedef enum { CUDNN_CTC_LOSS_ALGO_DETERMINISTIC = 0, CUDNN_CTC_LOSS_ALGO_NON_DETERMINISTIC = 1 } cudnnCTCLossAlgo_t;
1072
+
1073
+ /* TODO: remove */
1074
+ typedef struct cudnnAlgorithmUnionStruct {
1075
+ union Algorithm {
1076
+ cudnnConvolutionFwdAlgo_t convFwdAlgo;
1077
+ cudnnConvolutionBwdFilterAlgo_t convBwdFilterAlgo;
1078
+ cudnnConvolutionBwdDataAlgo_t convBwdDataAlgo;
1079
+ cudnnRNNAlgo_t RNNAlgo;
1080
+ cudnnCTCLossAlgo_t CTCLossAlgo;
1081
+ } algo;
1082
+ } cudnnAlgorithm_t;
1083
+
1084
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1085
+ cudnnCreateAlgorithmDescriptor(cudnnAlgorithmDescriptor_t *algoDesc);
1086
+
1087
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1088
+ cudnnSetAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t algorithm);
1089
+
1090
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1091
+ cudnnGetAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t *algorithm);
1092
+
1093
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1094
+ cudnnCopyAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t src, cudnnAlgorithmDescriptor_t dest);
1095
+
1096
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1097
+ cudnnDestroyAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc);
1098
+
1099
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1100
+ cudnnCreateAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToCreate);
1101
+
1102
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1103
+ cudnnSetAlgorithmPerformance(cudnnAlgorithmPerformance_t algoPerf,
1104
+ cudnnAlgorithmDescriptor_t algoDesc,
1105
+ cudnnStatus_t status,
1106
+ float time,
1107
+ size_t memory);
1108
+
1109
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1110
+ cudnnGetAlgorithmPerformance(const cudnnAlgorithmPerformance_t algoPerf,
1111
+ cudnnAlgorithmDescriptor_t *algoDesc,
1112
+ cudnnStatus_t *status,
1113
+ float *time,
1114
+ size_t *memory);
1115
+
1116
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1117
+ cudnnDestroyAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToDestroy);
1118
+
1119
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1120
+ cudnnGetAlgorithmSpaceSize(cudnnHandle_t handle, cudnnAlgorithmDescriptor_t algoDesc, size_t *algoSpaceSizeInBytes);
1121
+
1122
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1123
+ cudnnSaveAlgorithm(cudnnHandle_t handle,
1124
+ cudnnAlgorithmDescriptor_t algoDesc,
1125
+ void *algoSpace,
1126
+ size_t algoSpaceSizeInBytes);
1127
+
1128
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1129
+ cudnnRestoreAlgorithm(cudnnHandle_t handle,
1130
+ void *algoSpace,
1131
+ size_t algoSpaceSizeInBytes,
1132
+ cudnnAlgorithmDescriptor_t algoDesc);
1133
+
1134
+ typedef enum {
1135
+ CUDNN_SEV_FATAL = 0,
1136
+ CUDNN_SEV_ERROR = 1,
1137
+ CUDNN_SEV_WARNING = 2,
1138
+ CUDNN_SEV_INFO = 3,
1139
+ } cudnnSeverity_t;
1140
+
1141
+ /* Message masks to be used with cudnnSetCallback() */
1142
+ #define CUDNN_SEV_ERROR_EN (1U << CUDNN_SEV_ERROR)
1143
+ #define CUDNN_SEV_WARNING_EN (1U << CUDNN_SEV_WARNING)
1144
+ #define CUDNN_SEV_INFO_EN (1U << CUDNN_SEV_INFO)
1145
+
1146
+ /* struct containing useful informaiton for each API call */
1147
+ typedef struct cudnnDebugStruct {
1148
+ unsigned cudnn_version;
1149
+ cudnnStatus_t cudnnStatus;
1150
+ unsigned time_sec; /* epoch time in seconds */
1151
+ unsigned time_usec; /* microseconds part of epoch time */
1152
+ unsigned time_delta; /* time since start in seconds */
1153
+ cudnnHandle_t handle; /* cudnn handle */
1154
+ cudaStream_t stream; /* cuda stream ID */
1155
+ unsigned long long pid; /* process ID */
1156
+ unsigned long long tid; /* thread ID */
1157
+ int cudaDeviceId; /* CUDA device ID */
1158
+ int reserved[15]; /* reserved for future use */
1159
+ } cudnnDebug_t;
1160
+
1161
+ typedef void (*cudnnCallback_t)(cudnnSeverity_t sev, void *udata, const cudnnDebug_t *dbg, const char *msg);
1162
+
1163
+ cudnnStatus_t CUDNNWINAPI
1164
+ cudnnSetCallback(unsigned mask, void *udata, cudnnCallback_t fptr);
1165
+
1166
+ cudnnStatus_t CUDNNWINAPI
1167
+ cudnnGetCallback(unsigned *mask, void **udata, cudnnCallback_t *fptr);
1168
+
1169
+ /*
1170
+ * \brief Cross-library version checker.
1171
+ * This function is implemented differently in each sub-library. Each sublib
1172
+ * checks whether its own version matches that of its dependencies.
1173
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
1174
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
1175
+ */
1176
+ cudnnStatus_t CUDNNWINAPI
1177
+ cudnnOpsInferVersionCheck(void);
1178
+
1179
+ #if defined(__cplusplus)
1180
+ }
1181
+ #endif
1182
+
1183
+ #endif /* CUDNN_OPS_INFER_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer_v8.h ADDED
@@ -0,0 +1,1183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_ops_infer : cuDNN's basic definitions and inference operations.
52
+ */
53
+
54
+ #if !defined(CUDNN_OPS_INFER_H_)
55
+ #define CUDNN_OPS_INFER_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+
62
+ /* These version numbers are autogenerated, do not edit manually. */
63
+ #define CUDNN_OPS_INFER_MAJOR 8
64
+ #define CUDNN_OPS_INFER_MINOR 9
65
+ #define CUDNN_OPS_INFER_PATCH 2
66
+
67
+ #if (CUDNN_OPS_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_INFER_MINOR != CUDNN_MINOR) || \
68
+ (CUDNN_OPS_INFER_PATCH != CUDNN_PATCHLEVEL)
69
+ #error Version mismatch in cuDNN OPS INFER!!!
70
+ #endif
71
+
72
+ #ifndef CUDNNWINAPI
73
+ #ifdef _WIN32
74
+ #define CUDNNWINAPI __stdcall
75
+ #else
76
+ #define CUDNNWINAPI
77
+ #endif
78
+ #endif
79
+
80
+ /* Warnings for deprecated API-s are enabled using the CUDNN_WARN_DEPRECATED macro */
81
+ #if defined(CUDNN_WARN_DEPRECATED) && (defined(__GNUC__) || defined(__clang__))
82
+ /* GCC, Intel C/C++, Cray C/C++, CLANG, IBM XL C/C++ little endian */
83
+ #define CUDNN_DEPRECATED __attribute__((deprecated))
84
+ #elif defined(CUDNN_WARN_DEPRECATED) && defined(_MSC_VER)
85
+ /* Microsoft Visual C++ */
86
+ #define CUDNN_DEPRECATED __declspec(deprecated)
87
+ #elif defined(CUDNN_WARN_DEPRECATED) && (__cplusplus >= 201402L)
88
+ /* C++14 compilers */
89
+ #define CUDNN_DEPRECATED [[deprecated]]
90
+ #else
91
+ /* No support for the deprecated attribute */
92
+ #define CUDNN_DEPRECATED
93
+ #endif
94
+
95
+ #if defined(__cplusplus)
96
+ extern "C" {
97
+ #endif
98
+
99
+ struct cudnnContext;
100
+ typedef struct cudnnContext *cudnnHandle_t;
101
+
102
+ size_t CUDNNWINAPI
103
+ cudnnGetVersion(void);
104
+
105
+ size_t CUDNNWINAPI
106
+ cudnnGetMaxDeviceVersion(void);
107
+
108
+ /* Returns CUDA Runtime version statically linked against cudnn */
109
+ size_t CUDNNWINAPI
110
+ cudnnGetCudartVersion(void);
111
+
112
+ /*
113
+ * CUDNN return codes
114
+ */
115
+ typedef enum {
116
+ CUDNN_STATUS_SUCCESS = 0,
117
+ CUDNN_STATUS_NOT_INITIALIZED = 1,
118
+ CUDNN_STATUS_ALLOC_FAILED = 2,
119
+ CUDNN_STATUS_BAD_PARAM = 3,
120
+ CUDNN_STATUS_INTERNAL_ERROR = 4,
121
+ CUDNN_STATUS_INVALID_VALUE = 5,
122
+ CUDNN_STATUS_ARCH_MISMATCH = 6,
123
+ CUDNN_STATUS_MAPPING_ERROR = 7,
124
+ CUDNN_STATUS_EXECUTION_FAILED = 8,
125
+ CUDNN_STATUS_NOT_SUPPORTED = 9,
126
+ CUDNN_STATUS_LICENSE_ERROR = 10,
127
+ CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING = 11,
128
+ CUDNN_STATUS_RUNTIME_IN_PROGRESS = 12,
129
+ CUDNN_STATUS_RUNTIME_FP_OVERFLOW = 13,
130
+ CUDNN_STATUS_VERSION_MISMATCH = 14,
131
+ } cudnnStatus_t;
132
+
133
+ /* human-readable error messages */
134
+ const char *CUDNNWINAPI
135
+ cudnnGetErrorString(cudnnStatus_t status);
136
+
137
+ /* Forward definition in this version only */
138
+ typedef struct cudnnRuntimeTag_t cudnnRuntimeTag_t;
139
+
140
+ typedef enum {
141
+ CUDNN_ERRQUERY_RAWCODE = 0,
142
+ CUDNN_ERRQUERY_NONBLOCKING = 1,
143
+ CUDNN_ERRQUERY_BLOCKING = 2,
144
+ } cudnnErrQueryMode_t;
145
+
146
+ cudnnStatus_t CUDNNWINAPI
147
+ cudnnQueryRuntimeError(cudnnHandle_t handle, cudnnStatus_t *rstatus, cudnnErrQueryMode_t mode, cudnnRuntimeTag_t *tag);
148
+
149
+ #ifndef __LIBRARY_TYPES_H__
150
+
151
+ typedef enum libraryPropertyType_t { MAJOR_VERSION, MINOR_VERSION, PATCH_LEVEL } libraryPropertyType;
152
+
153
+ #endif
154
+
155
+ cudnnStatus_t CUDNNWINAPI
156
+ cudnnGetProperty(libraryPropertyType type, int *value);
157
+
158
+ cudnnStatus_t CUDNNWINAPI
159
+ cudnnCreate(cudnnHandle_t *handle);
160
+ cudnnStatus_t CUDNNWINAPI
161
+ cudnnDestroy(cudnnHandle_t handle);
162
+ cudnnStatus_t CUDNNWINAPI
163
+ cudnnSetStream(cudnnHandle_t handle, cudaStream_t streamId);
164
+ cudnnStatus_t CUDNNWINAPI
165
+ cudnnGetStream(cudnnHandle_t handle, cudaStream_t *streamId);
166
+
167
+ /* Data structures to represent Image/Filter and the Neural Network Layer */
168
+ typedef struct cudnnTensorStruct *cudnnTensorDescriptor_t;
169
+ typedef struct cudnnPoolingStruct *cudnnPoolingDescriptor_t;
170
+ typedef struct cudnnFilterStruct *cudnnFilterDescriptor_t;
171
+ typedef struct cudnnLRNStruct *cudnnLRNDescriptor_t;
172
+ typedef struct cudnnActivationStruct *cudnnActivationDescriptor_t;
173
+ typedef struct cudnnSpatialTransformerStruct *cudnnSpatialTransformerDescriptor_t;
174
+ typedef struct cudnnOpTensorStruct *cudnnOpTensorDescriptor_t;
175
+ typedef struct cudnnReduceTensorStruct *cudnnReduceTensorDescriptor_t;
176
+ typedef struct cudnnCTCLossStruct *cudnnCTCLossDescriptor_t;
177
+ typedef struct cudnnTensorTransformStruct *cudnnTensorTransformDescriptor_t;
178
+ /*
179
+ * CUDNN data type
180
+ */
181
+ typedef enum {
182
+ CUDNN_DATA_FLOAT = 0,
183
+ CUDNN_DATA_DOUBLE = 1,
184
+ CUDNN_DATA_HALF = 2,
185
+ CUDNN_DATA_INT8 = 3,
186
+ CUDNN_DATA_INT32 = 4,
187
+ CUDNN_DATA_INT8x4 = 5,
188
+ CUDNN_DATA_UINT8 = 6,
189
+ CUDNN_DATA_UINT8x4 = 7,
190
+ CUDNN_DATA_INT8x32 = 8,
191
+ CUDNN_DATA_BFLOAT16 = 9,
192
+ CUDNN_DATA_INT64 = 10,
193
+ CUDNN_DATA_BOOLEAN = 11,
194
+ CUDNN_DATA_FP8_E4M3 = 12,
195
+ CUDNN_DATA_FP8_E5M2 = 13,
196
+ CUDNN_DATA_FAST_FLOAT_FOR_FP8 = 14,
197
+ } cudnnDataType_t;
198
+
199
+ /*
200
+ * CUDNN math type
201
+ */
202
+ typedef enum {
203
+ CUDNN_DEFAULT_MATH = 0,
204
+ CUDNN_TENSOR_OP_MATH = 1,
205
+ CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION = 2,
206
+ CUDNN_FMA_MATH = 3,
207
+ } cudnnMathType_t;
208
+
209
+ /*
210
+ * CUDNN propagate Nan
211
+ */
212
+ typedef enum {
213
+ CUDNN_NOT_PROPAGATE_NAN = 0,
214
+ CUDNN_PROPAGATE_NAN = 1,
215
+ } cudnnNanPropagation_t;
216
+
217
+ /*
218
+ * CUDNN Determinism
219
+ */
220
+ typedef enum {
221
+ CUDNN_NON_DETERMINISTIC = 0,
222
+ CUDNN_DETERMINISTIC = 1,
223
+ } cudnnDeterminism_t;
224
+
225
+ /* Maximum supported number of tensor dimensions */
226
+ #define CUDNN_DIM_MAX 8
227
+
228
+ /* Create an instance of a generic Tensor descriptor */
229
+ cudnnStatus_t CUDNNWINAPI
230
+ cudnnCreateTensorDescriptor(cudnnTensorDescriptor_t *tensorDesc);
231
+
232
+ typedef enum {
233
+ CUDNN_TENSOR_NCHW = 0, /* row major (wStride = 1, hStride = w) */
234
+ CUDNN_TENSOR_NHWC = 1, /* feature maps interleaved ( cStride = 1 )*/
235
+ CUDNN_TENSOR_NCHW_VECT_C = 2, /* each image point is vector of element of C, vector length in data type */
236
+ } cudnnTensorFormat_t;
237
+
238
+ cudnnStatus_t CUDNNWINAPI
239
+ cudnnSetTensor4dDescriptor(cudnnTensorDescriptor_t tensorDesc,
240
+ cudnnTensorFormat_t format,
241
+ cudnnDataType_t dataType, /* image data type */
242
+ int n, /* number of inputs (batch size) */
243
+ int c, /* number of input feature maps */
244
+ int h, /* height of input section */
245
+ int w); /* width of input section */
246
+
247
+ cudnnStatus_t CUDNNWINAPI
248
+ cudnnSetTensor4dDescriptorEx(cudnnTensorDescriptor_t tensorDesc,
249
+ cudnnDataType_t dataType, /* image data type */
250
+ int n, /* number of inputs (batch size) */
251
+ int c, /* number of input feature maps */
252
+ int h, /* height of input section */
253
+ int w, /* width of input section */
254
+ int nStride,
255
+ int cStride,
256
+ int hStride,
257
+ int wStride);
258
+
259
+ cudnnStatus_t CUDNNWINAPI
260
+ cudnnGetTensor4dDescriptor(const cudnnTensorDescriptor_t tensorDesc,
261
+ cudnnDataType_t *dataType, /* image data type */
262
+ int *n, /* number of inputs (batch size) */
263
+ int *c, /* number of input feature maps */
264
+ int *h, /* height of input section */
265
+ int *w, /* width of input section */
266
+ int *nStride,
267
+ int *cStride,
268
+ int *hStride,
269
+ int *wStride);
270
+
271
+ cudnnStatus_t CUDNNWINAPI
272
+ cudnnSetTensorNdDescriptor(cudnnTensorDescriptor_t tensorDesc,
273
+ cudnnDataType_t dataType,
274
+ int nbDims,
275
+ const int dimA[],
276
+ const int strideA[]);
277
+
278
+ cudnnStatus_t CUDNNWINAPI
279
+ cudnnSetTensorNdDescriptorEx(cudnnTensorDescriptor_t tensorDesc,
280
+ cudnnTensorFormat_t format,
281
+ cudnnDataType_t dataType,
282
+ int nbDims,
283
+ const int dimA[]);
284
+
285
+ cudnnStatus_t CUDNNWINAPI
286
+ cudnnGetTensorNdDescriptor(const cudnnTensorDescriptor_t tensorDesc,
287
+ int nbDimsRequested,
288
+ cudnnDataType_t *dataType,
289
+ int *nbDims,
290
+ int dimA[],
291
+ int strideA[]);
292
+
293
+ cudnnStatus_t CUDNNWINAPI
294
+ cudnnGetTensorSizeInBytes(const cudnnTensorDescriptor_t tensorDesc, size_t *size);
295
+
296
+ /* PixelOffset( n, c, h, w ) = n *input_stride + c * feature_stride + h * h_stride + w * w_stride
297
+
298
+ 1)Example of all images in row major order one batch of features after the other (with an optional padding on row)
299
+ input_stride : c x h x h_stride
300
+ feature_stride : h x h_stride
301
+ h_stride : >= w ( h_stride = w if no padding)
302
+ w_stride : 1
303
+
304
+
305
+ 2)Example of all images in row major with features maps interleaved
306
+ input_stride : c x h x h_stride
307
+ feature_stride : 1
308
+ h_stride : w x c
309
+ w_stride : c
310
+
311
+ 3)Example of all images in column major order one batch of features after the other (with optional padding on column)
312
+ input_stride : c x w x w_stride
313
+ feature_stride : w x w_stride
314
+ h_stride : 1
315
+ w_stride : >= h
316
+
317
+ */
318
+
319
+ /* Destroy an instance of Tensor4d descriptor */
320
+ cudnnStatus_t CUDNNWINAPI
321
+ cudnnDestroyTensorDescriptor(cudnnTensorDescriptor_t tensorDesc);
322
+
323
+ /* Fold/unfold transforms */
324
+ typedef enum {
325
+ CUDNN_TRANSFORM_FOLD = 0U,
326
+ CUDNN_TRANSFORM_UNFOLD = 1U,
327
+ } cudnnFoldingDirection_t;
328
+
329
+ /** Create a destination descriptor for cudnnTransformTensor */
330
+ cudnnStatus_t CUDNNWINAPI
331
+ cudnnInitTransformDest(const cudnnTensorTransformDescriptor_t transformDesc,
332
+ const cudnnTensorDescriptor_t srcDesc,
333
+ cudnnTensorDescriptor_t destDesc,
334
+ size_t *destSizeInBytes);
335
+
336
+ /** Create an empty tensor transform descriptor */
337
+ cudnnStatus_t CUDNNWINAPI
338
+ cudnnCreateTensorTransformDescriptor(cudnnTensorTransformDescriptor_t *transformDesc);
339
+
340
+ /** Initialize a previously created tensor transform descriptor. */
341
+ cudnnStatus_t CUDNNWINAPI
342
+ cudnnSetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc,
343
+ const uint32_t nbDims,
344
+ const cudnnTensorFormat_t destFormat,
345
+ const int32_t padBeforeA[],
346
+ const int32_t padAfterA[],
347
+ const uint32_t foldA[],
348
+ const cudnnFoldingDirection_t direction);
349
+
350
+ /**
351
+ * Retrieves the values stored in a previously initialized tensor transform
352
+ * descriptor.
353
+ */
354
+ cudnnStatus_t CUDNNWINAPI
355
+ cudnnGetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc,
356
+ uint32_t nbDimsRequested,
357
+ cudnnTensorFormat_t *destFormat,
358
+ int32_t padBeforeA[],
359
+ int32_t padAfterA[],
360
+ uint32_t foldA[],
361
+ cudnnFoldingDirection_t *direction);
362
+
363
+ /**
364
+ * Destroys a previously created tensor transform descriptor.
365
+ */
366
+ cudnnStatus_t CUDNNWINAPI
367
+ cudnnDestroyTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc);
368
+
369
+ /* Tensor layout conversion helper (y = alpha * x + beta * y) */
370
+ cudnnStatus_t CUDNNWINAPI
371
+ cudnnTransformTensor(cudnnHandle_t handle,
372
+ const void *alpha,
373
+ const cudnnTensorDescriptor_t xDesc,
374
+ const void *x,
375
+ const void *beta,
376
+ const cudnnTensorDescriptor_t yDesc,
377
+ void *y);
378
+
379
+ cudnnStatus_t CUDNNWINAPI
380
+ cudnnTransformTensorEx(cudnnHandle_t handle,
381
+ const cudnnTensorTransformDescriptor_t transDesc,
382
+ const void *alpha,
383
+ const cudnnTensorDescriptor_t srcDesc,
384
+ const void *srcData,
385
+ const void *beta,
386
+ const cudnnTensorDescriptor_t destDesc,
387
+ void *destData);
388
+
389
+ /* Tensor Bias addition : C = alpha * A + beta * C */
390
+ cudnnStatus_t CUDNNWINAPI
391
+ cudnnAddTensor(cudnnHandle_t handle,
392
+ const void *alpha,
393
+ const cudnnTensorDescriptor_t aDesc,
394
+ const void *A,
395
+ const void *beta,
396
+ const cudnnTensorDescriptor_t cDesc,
397
+ void *C);
398
+
399
+ /*
400
+ * CUDNN OpTensor op type
401
+ */
402
+ typedef enum {
403
+ CUDNN_OP_TENSOR_ADD = 0,
404
+ CUDNN_OP_TENSOR_MUL = 1,
405
+ CUDNN_OP_TENSOR_MIN = 2,
406
+ CUDNN_OP_TENSOR_MAX = 3,
407
+ CUDNN_OP_TENSOR_SQRT = 4,
408
+ CUDNN_OP_TENSOR_NOT = 5,
409
+ } cudnnOpTensorOp_t;
410
+
411
+ cudnnStatus_t CUDNNWINAPI
412
+ cudnnCreateOpTensorDescriptor(cudnnOpTensorDescriptor_t *opTensorDesc);
413
+
414
+ cudnnStatus_t CUDNNWINAPI
415
+ cudnnSetOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc,
416
+ cudnnOpTensorOp_t opTensorOp,
417
+ cudnnDataType_t opTensorCompType,
418
+ cudnnNanPropagation_t opTensorNanOpt);
419
+
420
+ cudnnStatus_t CUDNNWINAPI
421
+ cudnnGetOpTensorDescriptor(const cudnnOpTensorDescriptor_t opTensorDesc,
422
+ cudnnOpTensorOp_t *opTensorOp,
423
+ cudnnDataType_t *opTensorCompType,
424
+ cudnnNanPropagation_t *opTensorNanOpt);
425
+
426
+ cudnnStatus_t CUDNNWINAPI
427
+ cudnnDestroyOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc);
428
+
429
+ /* Tensor operation : C = op( alpha1 * A, alpha2 * B ) + beta * C */
430
+ /* B tensor is ignored for CUDNN_OP_TENSOR_SQRT, CUDNN_OP_TENSOR_NOT. */
431
+ cudnnStatus_t CUDNNWINAPI
432
+ cudnnOpTensor(cudnnHandle_t handle,
433
+ const cudnnOpTensorDescriptor_t opTensorDesc,
434
+ const void *alpha1,
435
+ const cudnnTensorDescriptor_t aDesc,
436
+ const void *A,
437
+ const void *alpha2,
438
+ const cudnnTensorDescriptor_t bDesc,
439
+ const void *B,
440
+ const void *beta,
441
+ const cudnnTensorDescriptor_t cDesc,
442
+ void *C);
443
+
444
+ /*
445
+ * CUDNN ReduceTensor op type
446
+ */
447
+ typedef enum {
448
+ CUDNN_REDUCE_TENSOR_ADD = 0,
449
+ CUDNN_REDUCE_TENSOR_MUL = 1,
450
+ CUDNN_REDUCE_TENSOR_MIN = 2,
451
+ CUDNN_REDUCE_TENSOR_MAX = 3,
452
+ CUDNN_REDUCE_TENSOR_AMAX = 4,
453
+ CUDNN_REDUCE_TENSOR_AVG = 5,
454
+ CUDNN_REDUCE_TENSOR_NORM1 = 6,
455
+ CUDNN_REDUCE_TENSOR_NORM2 = 7,
456
+ CUDNN_REDUCE_TENSOR_MUL_NO_ZEROS = 8,
457
+ } cudnnReduceTensorOp_t;
458
+
459
+ /*
460
+ * CUDNN ReduceTensor indices type
461
+ */
462
+ typedef enum {
463
+ CUDNN_REDUCE_TENSOR_NO_INDICES = 0,
464
+ CUDNN_REDUCE_TENSOR_FLATTENED_INDICES = 1,
465
+ } cudnnReduceTensorIndices_t;
466
+
467
+ /*
468
+ * CUDNN tensor indices type size (all unsigned)
469
+ * Currently not supported, default is 32 bit unsigned.
470
+ */
471
+ typedef enum {
472
+ CUDNN_32BIT_INDICES = 0,
473
+ CUDNN_64BIT_INDICES = 1,
474
+ CUDNN_16BIT_INDICES = 2,
475
+ CUDNN_8BIT_INDICES = 3,
476
+ } cudnnIndicesType_t;
477
+
478
+ cudnnStatus_t CUDNNWINAPI
479
+ cudnnCreateReduceTensorDescriptor(cudnnReduceTensorDescriptor_t *reduceTensorDesc);
480
+
481
+ cudnnStatus_t CUDNNWINAPI
482
+ cudnnSetReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc,
483
+ cudnnReduceTensorOp_t reduceTensorOp,
484
+ cudnnDataType_t reduceTensorCompType,
485
+ cudnnNanPropagation_t reduceTensorNanOpt,
486
+ cudnnReduceTensorIndices_t reduceTensorIndices,
487
+ cudnnIndicesType_t reduceTensorIndicesType);
488
+
489
+ cudnnStatus_t CUDNNWINAPI
490
+ cudnnGetReduceTensorDescriptor(const cudnnReduceTensorDescriptor_t reduceTensorDesc,
491
+ cudnnReduceTensorOp_t *reduceTensorOp,
492
+ cudnnDataType_t *reduceTensorCompType,
493
+ cudnnNanPropagation_t *reduceTensorNanOpt,
494
+ cudnnReduceTensorIndices_t *reduceTensorIndices,
495
+ cudnnIndicesType_t *reduceTensorIndicesType);
496
+
497
+ cudnnStatus_t CUDNNWINAPI
498
+ cudnnDestroyReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc);
499
+
500
+ /* Helper function to return the minimum size of the index space to be passed to the reduction given the input and
501
+ * output tensors */
502
+ cudnnStatus_t CUDNNWINAPI
503
+ cudnnGetReductionIndicesSize(cudnnHandle_t handle,
504
+ const cudnnReduceTensorDescriptor_t reduceTensorDesc,
505
+ const cudnnTensorDescriptor_t aDesc,
506
+ const cudnnTensorDescriptor_t cDesc,
507
+ size_t *sizeInBytes);
508
+
509
+ /* Helper function to return the minimum size of the workspace to be passed to the reduction given the input and output
510
+ * tensors */
511
+ cudnnStatus_t CUDNNWINAPI
512
+ cudnnGetReductionWorkspaceSize(cudnnHandle_t handle,
513
+ const cudnnReduceTensorDescriptor_t reduceTensorDesc,
514
+ const cudnnTensorDescriptor_t aDesc,
515
+ const cudnnTensorDescriptor_t cDesc,
516
+ size_t *sizeInBytes);
517
+
518
+ /* Tensor operation : C = reduce op( alpha * A ) + beta * C */
519
+ /* The NaN propagation enum applies to only the min and max reduce ops; the other reduce ops propagate NaN as usual. */
520
+ /* The indices space is ignored for reduce ops other than min or max. */
521
+ cudnnStatus_t CUDNNWINAPI
522
+ cudnnReduceTensor(cudnnHandle_t handle,
523
+ const cudnnReduceTensorDescriptor_t reduceTensorDesc,
524
+ void *indices,
525
+ size_t indicesSizeInBytes,
526
+ void *workspace,
527
+ size_t workspaceSizeInBytes,
528
+ const void *alpha,
529
+ const cudnnTensorDescriptor_t aDesc,
530
+ const void *A,
531
+ const void *beta,
532
+ const cudnnTensorDescriptor_t cDesc,
533
+ void *C);
534
+
535
+ /* Set all values of a tensor to a given value : y[i] = value[0] */
536
+ cudnnStatus_t CUDNNWINAPI
537
+ cudnnSetTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *valuePtr);
538
+
539
+ /* Scale all values of a tensor by a given factor : y[i] = alpha * y[i] */
540
+ cudnnStatus_t CUDNNWINAPI
541
+ cudnnScaleTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *alpha);
542
+
543
+ /* Create an instance of FilterStruct */
544
+ cudnnStatus_t CUDNNWINAPI
545
+ cudnnCreateFilterDescriptor(cudnnFilterDescriptor_t *filterDesc);
546
+
547
+ cudnnStatus_t CUDNNWINAPI
548
+ cudnnSetFilter4dDescriptor(cudnnFilterDescriptor_t filterDesc,
549
+ cudnnDataType_t dataType, /* image data type */
550
+ cudnnTensorFormat_t format,
551
+ int k, /* number of output feature maps */
552
+ int c, /* number of input feature maps */
553
+ int h, /* height of each input filter */
554
+ int w); /* width of each input filter */
555
+
556
+ cudnnStatus_t CUDNNWINAPI
557
+ cudnnGetFilter4dDescriptor(const cudnnFilterDescriptor_t filterDesc,
558
+ cudnnDataType_t *dataType, /* image data type */
559
+ cudnnTensorFormat_t *format,
560
+ int *k, /* number of output feature maps */
561
+ int *c, /* number of input feature maps */
562
+ int *h, /* height of each input filter */
563
+ int *w); /* width of each input filter */
564
+
565
+ cudnnStatus_t CUDNNWINAPI
566
+ cudnnSetFilterNdDescriptor(cudnnFilterDescriptor_t filterDesc,
567
+ cudnnDataType_t dataType, /* image data type */
568
+ cudnnTensorFormat_t format,
569
+ int nbDims,
570
+ const int filterDimA[]);
571
+
572
+ cudnnStatus_t CUDNNWINAPI
573
+ cudnnGetFilterNdDescriptor(const cudnnFilterDescriptor_t filterDesc,
574
+ int nbDimsRequested,
575
+ cudnnDataType_t *dataType, /* image data type */
576
+ cudnnTensorFormat_t *format,
577
+ int *nbDims,
578
+ int filterDimA[]);
579
+ cudnnStatus_t CUDNNWINAPI
580
+ cudnnGetFilterSizeInBytes(const cudnnFilterDescriptor_t filterDesc, size_t *size);
581
+
582
+ cudnnStatus_t CUDNNWINAPI
583
+ cudnnTransformFilter(cudnnHandle_t handle,
584
+ const cudnnTensorTransformDescriptor_t transDesc,
585
+ const void *alpha,
586
+ const cudnnFilterDescriptor_t srcDesc,
587
+ const void *srcData,
588
+ const void *beta,
589
+ const cudnnFilterDescriptor_t destDesc,
590
+ void *destData);
591
+
592
+ cudnnStatus_t CUDNNWINAPI
593
+ cudnnDestroyFilterDescriptor(cudnnFilterDescriptor_t filterDesc);
594
+
595
+ /*
596
+ * softmax algorithm
597
+ */
598
+ typedef enum {
599
+ CUDNN_SOFTMAX_FAST = 0, /* straightforward implementation */
600
+ CUDNN_SOFTMAX_ACCURATE = 1, /* subtract max from every point to avoid overflow */
601
+ CUDNN_SOFTMAX_LOG = 2
602
+ } cudnnSoftmaxAlgorithm_t;
603
+
604
+ typedef enum {
605
+ CUDNN_SOFTMAX_MODE_INSTANCE = 0, /* compute the softmax over all C, H, W for each N */
606
+ CUDNN_SOFTMAX_MODE_CHANNEL = 1 /* compute the softmax over all C for each H, W, N */
607
+ } cudnnSoftmaxMode_t;
608
+
609
+ /* Softmax functions: All of the form "output = alpha * Op(inputs) + beta * output" */
610
+
611
+ /* Function to perform forward softmax */
612
+ cudnnStatus_t CUDNNWINAPI
613
+ cudnnSoftmaxForward(cudnnHandle_t handle,
614
+ cudnnSoftmaxAlgorithm_t algo,
615
+ cudnnSoftmaxMode_t mode,
616
+ const void *alpha,
617
+ const cudnnTensorDescriptor_t xDesc,
618
+ const void *x,
619
+ const void *beta,
620
+ const cudnnTensorDescriptor_t yDesc,
621
+ void *y);
622
+
623
+ /*
624
+ * pooling mode
625
+ */
626
+ typedef enum {
627
+ CUDNN_POOLING_MAX = 0,
628
+ CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING = 1, /* count for average includes padded values */
629
+ CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING = 2, /* count for average does not include padded values */
630
+ CUDNN_POOLING_MAX_DETERMINISTIC = 3
631
+ } cudnnPoolingMode_t;
632
+
633
+ /* Create an instance of pooling descriptor */
634
+ cudnnStatus_t CUDNNWINAPI
635
+ cudnnCreatePoolingDescriptor(cudnnPoolingDescriptor_t *poolingDesc);
636
+
637
+ cudnnStatus_t CUDNNWINAPI
638
+ cudnnSetPooling2dDescriptor(cudnnPoolingDescriptor_t poolingDesc,
639
+ cudnnPoolingMode_t mode,
640
+ cudnnNanPropagation_t maxpoolingNanOpt,
641
+ int windowHeight,
642
+ int windowWidth,
643
+ int verticalPadding,
644
+ int horizontalPadding,
645
+ int verticalStride,
646
+ int horizontalStride);
647
+
648
+ cudnnStatus_t CUDNNWINAPI
649
+ cudnnGetPooling2dDescriptor(const cudnnPoolingDescriptor_t poolingDesc,
650
+ cudnnPoolingMode_t *mode,
651
+ cudnnNanPropagation_t *maxpoolingNanOpt,
652
+ int *windowHeight,
653
+ int *windowWidth,
654
+ int *verticalPadding,
655
+ int *horizontalPadding,
656
+ int *verticalStride,
657
+ int *horizontalStride);
658
+
659
+ cudnnStatus_t CUDNNWINAPI
660
+ cudnnSetPoolingNdDescriptor(cudnnPoolingDescriptor_t poolingDesc,
661
+ const cudnnPoolingMode_t mode,
662
+ const cudnnNanPropagation_t maxpoolingNanOpt,
663
+ int nbDims,
664
+ const int windowDimA[],
665
+ const int paddingA[],
666
+ const int strideA[]);
667
+
668
+ cudnnStatus_t CUDNNWINAPI
669
+ cudnnGetPoolingNdDescriptor(const cudnnPoolingDescriptor_t poolingDesc,
670
+ int nbDimsRequested,
671
+ cudnnPoolingMode_t *mode,
672
+ cudnnNanPropagation_t *maxpoolingNanOpt,
673
+ int *nbDims,
674
+ int windowDimA[],
675
+ int paddingA[],
676
+ int strideA[]);
677
+
678
+ cudnnStatus_t CUDNNWINAPI
679
+ cudnnGetPoolingNdForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc,
680
+ const cudnnTensorDescriptor_t inputTensorDesc,
681
+ int nbDims,
682
+ int outputTensorDimA[]);
683
+
684
+ cudnnStatus_t CUDNNWINAPI
685
+ cudnnGetPooling2dForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc,
686
+ const cudnnTensorDescriptor_t inputTensorDesc,
687
+ int *n,
688
+ int *c,
689
+ int *h,
690
+ int *w);
691
+
692
+ /* Destroy an instance of pooling descriptor */
693
+ cudnnStatus_t CUDNNWINAPI
694
+ cudnnDestroyPoolingDescriptor(cudnnPoolingDescriptor_t poolingDesc);
695
+
696
+ /* Pooling functions: All of the form "output = alpha * Op(inputs) + beta * output" */
697
+
698
+ /* Function to perform forward pooling */
699
+ cudnnStatus_t CUDNNWINAPI
700
+ cudnnPoolingForward(cudnnHandle_t handle,
701
+ const cudnnPoolingDescriptor_t poolingDesc,
702
+ const void *alpha,
703
+ const cudnnTensorDescriptor_t xDesc,
704
+ const void *x,
705
+ const void *beta,
706
+ const cudnnTensorDescriptor_t yDesc,
707
+ void *y);
708
+
709
+ /*
710
+ * activation mode
711
+ */
712
+ typedef enum {
713
+ CUDNN_ACTIVATION_SIGMOID = 0,
714
+ CUDNN_ACTIVATION_RELU = 1,
715
+ CUDNN_ACTIVATION_TANH = 2,
716
+ CUDNN_ACTIVATION_CLIPPED_RELU = 3,
717
+ CUDNN_ACTIVATION_ELU = 4,
718
+ CUDNN_ACTIVATION_IDENTITY = 5,
719
+ CUDNN_ACTIVATION_SWISH = 6
720
+ } cudnnActivationMode_t;
721
+
722
+ /* Activation functions: All of the form "output = alpha * Op(inputs) + beta * output" */
723
+ cudnnStatus_t CUDNNWINAPI
724
+ cudnnCreateActivationDescriptor(cudnnActivationDescriptor_t *activationDesc);
725
+
726
+ cudnnStatus_t CUDNNWINAPI
727
+ cudnnSetActivationDescriptor(cudnnActivationDescriptor_t activationDesc,
728
+ cudnnActivationMode_t mode,
729
+ cudnnNanPropagation_t reluNanOpt,
730
+ double coef); /* ceiling for clipped RELU, alpha for ELU */
731
+
732
+ cudnnStatus_t CUDNNWINAPI
733
+ cudnnGetActivationDescriptor(const cudnnActivationDescriptor_t activationDesc,
734
+ cudnnActivationMode_t *mode,
735
+ cudnnNanPropagation_t *reluNanOpt,
736
+ double *coef); /* ceiling for clipped RELU, alpha for ELU */
737
+
738
+ cudnnStatus_t CUDNNWINAPI
739
+ cudnnSetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double swish_beta);
740
+
741
+ cudnnStatus_t CUDNNWINAPI
742
+ cudnnGetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double *swish_beta);
743
+
744
+ cudnnStatus_t CUDNNWINAPI
745
+ cudnnDestroyActivationDescriptor(cudnnActivationDescriptor_t activationDesc);
746
+
747
+ /* Function to perform forward activation */
748
+ cudnnStatus_t CUDNNWINAPI
749
+ cudnnActivationForward(cudnnHandle_t handle,
750
+ cudnnActivationDescriptor_t activationDesc,
751
+ const void *alpha,
752
+ const cudnnTensorDescriptor_t xDesc,
753
+ const void *x,
754
+ const void *beta,
755
+ const cudnnTensorDescriptor_t yDesc,
756
+ void *y);
757
+
758
+ /*
759
+ * Create an instance of LRN (Local Response Normalization) descriptor
760
+ * Uses lrnN=5, lrnAlpha=1e-4, lrnBeta=0.75, lrnK=2.0 as defaults from Krizhevsky'12 ImageNet paper
761
+ */
762
+ cudnnStatus_t CUDNNWINAPI
763
+ cudnnCreateLRNDescriptor(cudnnLRNDescriptor_t *normDesc);
764
+
765
+ #define CUDNN_LRN_MIN_N 1 /* minimum allowed lrnN */
766
+ #define CUDNN_LRN_MAX_N 16 /* maximum allowed lrnN */
767
+ #define CUDNN_LRN_MIN_K 1e-5 /* minimum allowed lrnK */
768
+ #define CUDNN_LRN_MIN_BETA 0.01 /* minimum allowed lrnBeta */
769
+
770
+ /* LRN layer mode */
771
+ typedef enum {
772
+ CUDNN_LRN_CROSS_CHANNEL_DIM1 = 0, /* Normalize across tensor's dimA[1] dimension */
773
+ } cudnnLRNMode_t;
774
+
775
+ /*
776
+ * Uses a window [center-lookBehind, center+lookAhead], where
777
+ * lookBehind = floor( (lrnN-1)/2 ), lookAhead = lrnN-lookBehind-1.
778
+ * Values of double parameters cast to tensor data type.
779
+ */
780
+ cudnnStatus_t CUDNNWINAPI
781
+ cudnnSetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned lrnN, double lrnAlpha, double lrnBeta, double lrnK);
782
+ /*
783
+ * Retrieve the settings currently stored in an LRN layer descriptor
784
+ * Any of the provided pointers can be NULL (no corresponding value will be returned)
785
+ */
786
+ cudnnStatus_t CUDNNWINAPI
787
+ cudnnGetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned *lrnN, double *lrnAlpha, double *lrnBeta, double *lrnK);
788
+
789
+ /* Destroy an instance of LRN descriptor */
790
+ cudnnStatus_t CUDNNWINAPI
791
+ cudnnDestroyLRNDescriptor(cudnnLRNDescriptor_t lrnDesc);
792
+
793
+ /* LRN functions: output = alpha * normalize(x) + beta * old_y */
794
+
795
+ /* LRN cross-channel forward computation. Double parameters cast to tensor data type */
796
+ cudnnStatus_t CUDNNWINAPI
797
+ cudnnLRNCrossChannelForward(cudnnHandle_t handle,
798
+ cudnnLRNDescriptor_t normDesc,
799
+ cudnnLRNMode_t lrnMode,
800
+ const void *alpha,
801
+ const cudnnTensorDescriptor_t xDesc,
802
+ const void *x,
803
+ const void *beta,
804
+ const cudnnTensorDescriptor_t yDesc,
805
+ void *y);
806
+
807
+ typedef enum {
808
+ CUDNN_DIVNORM_PRECOMPUTED_MEANS = 0,
809
+ } cudnnDivNormMode_t;
810
+
811
+ /* LCN/divisive normalization functions: y = alpha * normalize(x) + beta * y */
812
+ cudnnStatus_t CUDNNWINAPI
813
+ cudnnDivisiveNormalizationForward(cudnnHandle_t handle,
814
+ cudnnLRNDescriptor_t normDesc,
815
+ cudnnDivNormMode_t mode,
816
+ const void *alpha,
817
+ const cudnnTensorDescriptor_t xDesc, /* same desc for means, temp, temp2 */
818
+ const void *x,
819
+ const void *means, /* if NULL, means are assumed to be zero */
820
+ void *temp,
821
+ void *temp2,
822
+ const void *beta,
823
+ const cudnnTensorDescriptor_t yDesc,
824
+ void *y);
825
+
826
+ typedef enum {
827
+ /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */
828
+ CUDNN_BATCHNORM_PER_ACTIVATION = 0,
829
+
830
+ /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */
831
+ CUDNN_BATCHNORM_SPATIAL = 1,
832
+
833
+ /*
834
+ * bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors).
835
+ * May be faster than CUDNN_BATCHNORM_SPATIAL but imposes some limits on the range of values
836
+ */
837
+ CUDNN_BATCHNORM_SPATIAL_PERSISTENT = 2,
838
+ } cudnnBatchNormMode_t;
839
+
840
+ #define CUDNN_BN_MIN_EPSILON 0.0 /* Minimum epsilon allowed to be used in the Batch Normalization formula */
841
+
842
+ /*
843
+ * Derives a tensor descriptor from layer data descriptor for BatchNormalization
844
+ * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for
845
+ * bnScaleBiasMeanVarDesc and bnScaleBiasDiffDesc in Batch Normalization forward and backward functions.
846
+ */
847
+ cudnnStatus_t CUDNNWINAPI
848
+ cudnnDeriveBNTensorDescriptor(cudnnTensorDescriptor_t derivedBnDesc,
849
+ const cudnnTensorDescriptor_t xDesc,
850
+ cudnnBatchNormMode_t mode);
851
+
852
+ typedef enum {
853
+ CUDNN_BATCHNORM_OPS_BN = 0, /* do batch normalization only */
854
+ CUDNN_BATCHNORM_OPS_BN_ACTIVATION = 1, /* do batchNorm, then activation */
855
+ CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION = 2, /* do batchNorm, then elemWiseAdd, then activation */
856
+ } cudnnBatchNormOps_t;
857
+
858
+ /*
859
+ * Performs Batch Normalization during Inference:
860
+ * y[i] = bnScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + bnBias[k]
861
+ * with bnScale, bnBias, runningMean, runningInvVariance tensors indexed
862
+ * according to spatial or per-activation mode. Refer to cudnnBatchNormalizationForwardTraining
863
+ * above for notes on function arguments.
864
+ */
865
+ cudnnStatus_t CUDNNWINAPI
866
+ cudnnBatchNormalizationForwardInference(cudnnHandle_t handle,
867
+ cudnnBatchNormMode_t mode,
868
+ const void *alpha, /* alpha[0] = result blend factor */
869
+ const void *beta, /* beta[0] = dest layer blend factor */
870
+ const cudnnTensorDescriptor_t xDesc,
871
+ const void *x, /* NxCxHxW */
872
+ const cudnnTensorDescriptor_t yDesc,
873
+ void *y, /* NxCxHxW */
874
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
875
+ const void *bnScale,
876
+ const void *bnBias,
877
+ const void *estimatedMean,
878
+ const void *estimatedVariance,
879
+ double epsilon);
880
+
881
+ typedef enum {
882
+ /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */
883
+ CUDNN_NORM_PER_ACTIVATION = 0,
884
+
885
+ /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */
886
+ CUDNN_NORM_PER_CHANNEL = 1,
887
+ } cudnnNormMode_t;
888
+
889
+ typedef enum { CUDNN_NORM_ALGO_STANDARD = 0, CUDNN_NORM_ALGO_PERSIST = 1 } cudnnNormAlgo_t;
890
+
891
+ /*
892
+ * Derives a tensor descriptor from layer data descriptor for Normalization
893
+ * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for
894
+ * normScaleBiasMeanVarDesc and normScaleBiasDiffDesc in Normalization forward and backward functions.
895
+ */
896
+ cudnnStatus_t CUDNNWINAPI
897
+ cudnnDeriveNormTensorDescriptor(cudnnTensorDescriptor_t derivedNormScaleBiasDesc,
898
+ cudnnTensorDescriptor_t derivedNormMeanVarDesc,
899
+ const cudnnTensorDescriptor_t xDesc,
900
+ cudnnNormMode_t mode,
901
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
902
+
903
+ typedef enum {
904
+ CUDNN_NORM_OPS_NORM = 0, /* do normalization only */
905
+ CUDNN_NORM_OPS_NORM_ACTIVATION = 1, /* do Norm, then activation */
906
+ CUDNN_NORM_OPS_NORM_ADD_ACTIVATION = 2, /* do Norm, then elemWiseAdd, then activation */
907
+ } cudnnNormOps_t;
908
+
909
+ /*
910
+ * Performs Normalization during Inference:
911
+ * y[i] = normScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + normBias[k]
912
+ * with normScale, normBias, runningMean, runningInvVariance tensors indexed
913
+ * according to per-channel or per-activation mode. Refer to cudnnNormalizationForwardTraining
914
+ * above for notes on function arguments.
915
+ */
916
+ cudnnStatus_t CUDNNWINAPI
917
+ cudnnNormalizationForwardInference(cudnnHandle_t handle,
918
+ cudnnNormMode_t mode,
919
+ cudnnNormOps_t normOps,
920
+ cudnnNormAlgo_t algo,
921
+ const void *alpha, /* alpha[0] = result blend factor */
922
+ const void *beta, /* beta[0] = dest layer blend factor */
923
+ const cudnnTensorDescriptor_t xDesc,
924
+ const void *x, /* NxCxHxW */
925
+ const cudnnTensorDescriptor_t normScaleBiasDesc,
926
+ const void *normScale,
927
+ const void *normBias,
928
+ const cudnnTensorDescriptor_t normMeanVarDesc,
929
+ const void *estimatedMean,
930
+ const void *estimatedVariance,
931
+ const cudnnTensorDescriptor_t zDesc,
932
+ const void *z,
933
+ cudnnActivationDescriptor_t activationDesc,
934
+ const cudnnTensorDescriptor_t yDesc,
935
+ void *y, /* NxCxHxW */
936
+ double epsilon,
937
+ int groupCnt); /* Place hold for future work*/
938
+
939
+ /* APIs for spatial transformer network*/
940
+ typedef enum {
941
+ CUDNN_SAMPLER_BILINEAR = 0,
942
+ } cudnnSamplerType_t;
943
+
944
+ cudnnStatus_t CUDNNWINAPI
945
+ cudnnCreateSpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t *stDesc);
946
+
947
+ cudnnStatus_t CUDNNWINAPI
948
+ cudnnSetSpatialTransformerNdDescriptor(cudnnSpatialTransformerDescriptor_t stDesc,
949
+ cudnnSamplerType_t samplerType,
950
+ cudnnDataType_t dataType,
951
+ const int nbDims,
952
+ const int dimA[]);
953
+
954
+ cudnnStatus_t CUDNNWINAPI
955
+ cudnnDestroySpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t stDesc);
956
+
957
+ cudnnStatus_t CUDNNWINAPI
958
+ cudnnSpatialTfGridGeneratorForward(cudnnHandle_t handle,
959
+ const cudnnSpatialTransformerDescriptor_t stDesc,
960
+ const void *theta,
961
+ void *grid);
962
+
963
+ cudnnStatus_t CUDNNWINAPI
964
+ cudnnSpatialTfSamplerForward(cudnnHandle_t handle,
965
+ cudnnSpatialTransformerDescriptor_t stDesc,
966
+ const void *alpha,
967
+ const cudnnTensorDescriptor_t xDesc,
968
+ const void *x,
969
+ const void *grid,
970
+ const void *beta,
971
+ cudnnTensorDescriptor_t yDesc,
972
+ void *y);
973
+
974
+ typedef struct cudnnDropoutStruct *cudnnDropoutDescriptor_t;
975
+
976
+ cudnnStatus_t CUDNNWINAPI
977
+ cudnnCreateDropoutDescriptor(cudnnDropoutDescriptor_t *dropoutDesc);
978
+
979
+ cudnnStatus_t CUDNNWINAPI
980
+ cudnnDestroyDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc);
981
+
982
+ /*helper function to determine size of the states to be passed to cudnnSetDropoutDescriptor */
983
+ cudnnStatus_t CUDNNWINAPI
984
+ cudnnDropoutGetStatesSize(cudnnHandle_t handle, size_t *sizeInBytes);
985
+
986
+ /*helper function to determine size of the reserve space to be passed to dropout forward/backward calls */
987
+ cudnnStatus_t CUDNNWINAPI
988
+ cudnnDropoutGetReserveSpaceSize(cudnnTensorDescriptor_t xdesc, size_t *sizeInBytes);
989
+
990
+ cudnnStatus_t CUDNNWINAPI
991
+ cudnnSetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc,
992
+ cudnnHandle_t handle,
993
+ float dropout,
994
+ void *states,
995
+ size_t stateSizeInBytes,
996
+ unsigned long long seed);
997
+
998
+ /* Restores the dropout descriptor to a previously saved-off state */
999
+ cudnnStatus_t CUDNNWINAPI
1000
+ cudnnRestoreDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc,
1001
+ cudnnHandle_t handle,
1002
+ float dropout,
1003
+ void *states,
1004
+ size_t stateSizeInBytes,
1005
+ unsigned long long seed);
1006
+
1007
+ cudnnStatus_t CUDNNWINAPI
1008
+ cudnnGetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc,
1009
+ cudnnHandle_t handle,
1010
+ float *dropout,
1011
+ void **states,
1012
+ unsigned long long *seed);
1013
+
1014
+ cudnnStatus_t CUDNNWINAPI
1015
+ cudnnDropoutForward(cudnnHandle_t handle,
1016
+ const cudnnDropoutDescriptor_t dropoutDesc,
1017
+ const cudnnTensorDescriptor_t xdesc,
1018
+ const void *x,
1019
+ const cudnnTensorDescriptor_t ydesc,
1020
+ void *y,
1021
+ void *reserveSpace,
1022
+ size_t reserveSpaceSizeInBytes);
1023
+
1024
+ /* TODO: remove */
1025
+
1026
+ typedef struct cudnnAlgorithmStruct *cudnnAlgorithmDescriptor_t;
1027
+ typedef struct cudnnAlgorithmPerformanceStruct *cudnnAlgorithmPerformance_t;
1028
+
1029
+ /* TODO: move these enums out to the appropriate submodule */
1030
+ typedef enum {
1031
+ CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = 0,
1032
+ CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM = 1,
1033
+ CUDNN_CONVOLUTION_FWD_ALGO_GEMM = 2,
1034
+ CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = 3,
1035
+ CUDNN_CONVOLUTION_FWD_ALGO_FFT = 4,
1036
+ CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING = 5,
1037
+ CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD = 6,
1038
+ CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED = 7,
1039
+ CUDNN_CONVOLUTION_FWD_ALGO_COUNT = 8
1040
+ } cudnnConvolutionFwdAlgo_t;
1041
+
1042
+ typedef enum {
1043
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = 0, /* non-deterministic */
1044
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 = 1,
1045
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT = 2,
1046
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3 = 3, /* non-deterministic */
1047
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD = 4, /* not implemented */
1048
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED = 5,
1049
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING = 6,
1050
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT = 7
1051
+ } cudnnConvolutionBwdFilterAlgo_t;
1052
+
1053
+ typedef enum {
1054
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = 0, /* non-deterministic */
1055
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 = 1,
1056
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT = 2,
1057
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING = 3,
1058
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD = 4,
1059
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED = 5,
1060
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT = 6
1061
+ } cudnnConvolutionBwdDataAlgo_t;
1062
+
1063
+ typedef enum {
1064
+ CUDNN_RNN_ALGO_STANDARD = 0,
1065
+ CUDNN_RNN_ALGO_PERSIST_STATIC = 1,
1066
+ CUDNN_RNN_ALGO_PERSIST_DYNAMIC = 2,
1067
+ CUDNN_RNN_ALGO_PERSIST_STATIC_SMALL_H = 3,
1068
+ CUDNN_RNN_ALGO_COUNT = 4,
1069
+ } cudnnRNNAlgo_t;
1070
+
1071
+ typedef enum { CUDNN_CTC_LOSS_ALGO_DETERMINISTIC = 0, CUDNN_CTC_LOSS_ALGO_NON_DETERMINISTIC = 1 } cudnnCTCLossAlgo_t;
1072
+
1073
+ /* TODO: remove */
1074
+ typedef struct cudnnAlgorithmUnionStruct {
1075
+ union Algorithm {
1076
+ cudnnConvolutionFwdAlgo_t convFwdAlgo;
1077
+ cudnnConvolutionBwdFilterAlgo_t convBwdFilterAlgo;
1078
+ cudnnConvolutionBwdDataAlgo_t convBwdDataAlgo;
1079
+ cudnnRNNAlgo_t RNNAlgo;
1080
+ cudnnCTCLossAlgo_t CTCLossAlgo;
1081
+ } algo;
1082
+ } cudnnAlgorithm_t;
1083
+
1084
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1085
+ cudnnCreateAlgorithmDescriptor(cudnnAlgorithmDescriptor_t *algoDesc);
1086
+
1087
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1088
+ cudnnSetAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t algorithm);
1089
+
1090
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1091
+ cudnnGetAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t *algorithm);
1092
+
1093
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1094
+ cudnnCopyAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t src, cudnnAlgorithmDescriptor_t dest);
1095
+
1096
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1097
+ cudnnDestroyAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc);
1098
+
1099
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1100
+ cudnnCreateAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToCreate);
1101
+
1102
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1103
+ cudnnSetAlgorithmPerformance(cudnnAlgorithmPerformance_t algoPerf,
1104
+ cudnnAlgorithmDescriptor_t algoDesc,
1105
+ cudnnStatus_t status,
1106
+ float time,
1107
+ size_t memory);
1108
+
1109
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1110
+ cudnnGetAlgorithmPerformance(const cudnnAlgorithmPerformance_t algoPerf,
1111
+ cudnnAlgorithmDescriptor_t *algoDesc,
1112
+ cudnnStatus_t *status,
1113
+ float *time,
1114
+ size_t *memory);
1115
+
1116
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1117
+ cudnnDestroyAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToDestroy);
1118
+
1119
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1120
+ cudnnGetAlgorithmSpaceSize(cudnnHandle_t handle, cudnnAlgorithmDescriptor_t algoDesc, size_t *algoSpaceSizeInBytes);
1121
+
1122
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1123
+ cudnnSaveAlgorithm(cudnnHandle_t handle,
1124
+ cudnnAlgorithmDescriptor_t algoDesc,
1125
+ void *algoSpace,
1126
+ size_t algoSpaceSizeInBytes);
1127
+
1128
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1129
+ cudnnRestoreAlgorithm(cudnnHandle_t handle,
1130
+ void *algoSpace,
1131
+ size_t algoSpaceSizeInBytes,
1132
+ cudnnAlgorithmDescriptor_t algoDesc);
1133
+
1134
+ typedef enum {
1135
+ CUDNN_SEV_FATAL = 0,
1136
+ CUDNN_SEV_ERROR = 1,
1137
+ CUDNN_SEV_WARNING = 2,
1138
+ CUDNN_SEV_INFO = 3,
1139
+ } cudnnSeverity_t;
1140
+
1141
+ /* Message masks to be used with cudnnSetCallback() */
1142
+ #define CUDNN_SEV_ERROR_EN (1U << CUDNN_SEV_ERROR)
1143
+ #define CUDNN_SEV_WARNING_EN (1U << CUDNN_SEV_WARNING)
1144
+ #define CUDNN_SEV_INFO_EN (1U << CUDNN_SEV_INFO)
1145
+
1146
+ /* struct containing useful informaiton for each API call */
1147
+ typedef struct cudnnDebugStruct {
1148
+ unsigned cudnn_version;
1149
+ cudnnStatus_t cudnnStatus;
1150
+ unsigned time_sec; /* epoch time in seconds */
1151
+ unsigned time_usec; /* microseconds part of epoch time */
1152
+ unsigned time_delta; /* time since start in seconds */
1153
+ cudnnHandle_t handle; /* cudnn handle */
1154
+ cudaStream_t stream; /* cuda stream ID */
1155
+ unsigned long long pid; /* process ID */
1156
+ unsigned long long tid; /* thread ID */
1157
+ int cudaDeviceId; /* CUDA device ID */
1158
+ int reserved[15]; /* reserved for future use */
1159
+ } cudnnDebug_t;
1160
+
1161
+ typedef void (*cudnnCallback_t)(cudnnSeverity_t sev, void *udata, const cudnnDebug_t *dbg, const char *msg);
1162
+
1163
+ cudnnStatus_t CUDNNWINAPI
1164
+ cudnnSetCallback(unsigned mask, void *udata, cudnnCallback_t fptr);
1165
+
1166
+ cudnnStatus_t CUDNNWINAPI
1167
+ cudnnGetCallback(unsigned *mask, void **udata, cudnnCallback_t *fptr);
1168
+
1169
+ /*
1170
+ * \brief Cross-library version checker.
1171
+ * This function is implemented differently in each sub-library. Each sublib
1172
+ * checks whether its own version matches that of its dependencies.
1173
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
1174
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
1175
+ */
1176
+ cudnnStatus_t CUDNNWINAPI
1177
+ cudnnOpsInferVersionCheck(void);
1178
+
1179
+ #if defined(__cplusplus)
1180
+ }
1181
+ #endif
1182
+
1183
+ #endif /* CUDNN_OPS_INFER_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train.h ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_ops_train : cuDNN's basic training operations and algorithms.
52
+ */
53
+
54
+ #if !defined(CUDNN_OPS_TRAIN_H_)
55
+ #define CUDNN_OPS_TRAIN_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_OPS_TRAIN_MAJOR 8
65
+ #define CUDNN_OPS_TRAIN_MINOR 9
66
+ #define CUDNN_OPS_TRAIN_PATCH 2
67
+
68
+ #if (CUDNN_OPS_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_TRAIN_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_OPS_TRAIN_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN OPS TRAIN!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* Function to perform backward softmax */
78
+ cudnnStatus_t CUDNNWINAPI
79
+ cudnnSoftmaxBackward(cudnnHandle_t handle,
80
+ cudnnSoftmaxAlgorithm_t algo,
81
+ cudnnSoftmaxMode_t mode,
82
+ const void *alpha,
83
+ const cudnnTensorDescriptor_t yDesc,
84
+ const void *y,
85
+ const cudnnTensorDescriptor_t dyDesc,
86
+ const void *dy,
87
+ const void *beta,
88
+ const cudnnTensorDescriptor_t dxDesc,
89
+ void *dx);
90
+
91
+ /* Function to perform backward pooling */
92
+ cudnnStatus_t CUDNNWINAPI
93
+ cudnnPoolingBackward(cudnnHandle_t handle,
94
+ const cudnnPoolingDescriptor_t poolingDesc,
95
+ const void *alpha,
96
+ const cudnnTensorDescriptor_t yDesc,
97
+ const void *y,
98
+ const cudnnTensorDescriptor_t dyDesc,
99
+ const void *dy,
100
+ const cudnnTensorDescriptor_t xDesc,
101
+ const void *x,
102
+ const void *beta,
103
+ const cudnnTensorDescriptor_t dxDesc,
104
+ void *dx);
105
+
106
+ /* Function to perform backward activation */
107
+ cudnnStatus_t CUDNNWINAPI
108
+ cudnnActivationBackward(cudnnHandle_t handle,
109
+ cudnnActivationDescriptor_t activationDesc,
110
+ const void *alpha,
111
+ const cudnnTensorDescriptor_t yDesc,
112
+ const void *y,
113
+ const cudnnTensorDescriptor_t dyDesc,
114
+ const void *dy,
115
+ const cudnnTensorDescriptor_t xDesc,
116
+ const void *x,
117
+ const void *beta,
118
+ const cudnnTensorDescriptor_t dxDesc,
119
+ void *dx);
120
+
121
+ /* LRN cross-channel backward computation. Double parameters cast to tensor data type */
122
+ cudnnStatus_t CUDNNWINAPI
123
+ cudnnLRNCrossChannelBackward(cudnnHandle_t handle,
124
+ cudnnLRNDescriptor_t normDesc,
125
+ cudnnLRNMode_t lrnMode,
126
+ const void *alpha,
127
+ const cudnnTensorDescriptor_t yDesc,
128
+ const void *y,
129
+ const cudnnTensorDescriptor_t dyDesc,
130
+ const void *dy,
131
+ const cudnnTensorDescriptor_t xDesc,
132
+ const void *x,
133
+ const void *beta,
134
+ const cudnnTensorDescriptor_t dxDesc,
135
+ void *dx);
136
+
137
+ cudnnStatus_t CUDNNWINAPI
138
+ cudnnDivisiveNormalizationBackward(cudnnHandle_t handle,
139
+ cudnnLRNDescriptor_t normDesc,
140
+ cudnnDivNormMode_t mode,
141
+ const void *alpha,
142
+ const cudnnTensorDescriptor_t xDesc, /* same desc for x, means, dy, temp, temp2 */
143
+ const void *x,
144
+ const void *means, /* if NULL, means are assumed to be zero */
145
+ const void *dy,
146
+ void *temp,
147
+ void *temp2,
148
+ const void *beta,
149
+ const cudnnTensorDescriptor_t dXdMeansDesc, /* same desc for dx, dMeans */
150
+ void *dx, /* output x differential */
151
+ void *dMeans); /* output means differential, can be NULL */
152
+
153
+ cudnnStatus_t CUDNNWINAPI
154
+ cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(cudnnHandle_t handle,
155
+ cudnnBatchNormMode_t mode,
156
+ cudnnBatchNormOps_t bnOps,
157
+ const cudnnTensorDescriptor_t xDesc,
158
+ const cudnnTensorDescriptor_t zDesc,
159
+ const cudnnTensorDescriptor_t yDesc,
160
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
161
+ const cudnnActivationDescriptor_t activationDesc,
162
+ size_t *sizeInBytes);
163
+
164
+ cudnnStatus_t CUDNNWINAPI
165
+ cudnnGetBatchNormalizationBackwardExWorkspaceSize(cudnnHandle_t handle,
166
+ cudnnBatchNormMode_t mode,
167
+ cudnnBatchNormOps_t bnOps,
168
+ const cudnnTensorDescriptor_t xDesc,
169
+ const cudnnTensorDescriptor_t yDesc,
170
+ const cudnnTensorDescriptor_t dyDesc,
171
+ const cudnnTensorDescriptor_t dzDesc,
172
+ const cudnnTensorDescriptor_t dxDesc,
173
+ const cudnnTensorDescriptor_t dBnScaleBiasDesc,
174
+ const cudnnActivationDescriptor_t activationDesc,
175
+ size_t *sizeInBytes);
176
+
177
+ cudnnStatus_t CUDNNWINAPI
178
+ cudnnGetBatchNormalizationTrainingExReserveSpaceSize(cudnnHandle_t handle,
179
+ cudnnBatchNormMode_t mode,
180
+ cudnnBatchNormOps_t bnOps,
181
+ const cudnnActivationDescriptor_t activationDesc,
182
+ const cudnnTensorDescriptor_t xDesc,
183
+ size_t *sizeInBytes);
184
+
185
+ /* Computes y = BN(x). Also accumulates moving averages of mean and inverse variances */
186
+ cudnnStatus_t CUDNNWINAPI
187
+ cudnnBatchNormalizationForwardTraining(
188
+ cudnnHandle_t handle,
189
+ cudnnBatchNormMode_t mode,
190
+
191
+ const void *alpha, /* alpha[0] = result blend factor */
192
+ const void *beta, /* beta[0] = dest layer blend factor */
193
+
194
+ const cudnnTensorDescriptor_t xDesc,
195
+ const void *x, /* NxCxHxW */
196
+ const cudnnTensorDescriptor_t yDesc,
197
+ void *y, /* NxCxHxW */
198
+
199
+ /* Shared desc for the next 6 tensors in the argument list.
200
+ Data type to be set as follows:
201
+ type = (typeOf(x) == double) ? double : float
202
+ Dimensions for this descriptor depend on normalization mode
203
+ - Spatial Normalization : tensors are expected to have dims 1xCx1x1
204
+ (normalization is performed across NxHxW)
205
+ - Per-Activation Normalization : tensors are expected to have dims of 1xCxHxW
206
+ (normalization is performed across N) */
207
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
208
+
209
+ /* 'Gamma' and 'Beta' respectively in Ioffe and Szegedy's paper's notation */
210
+ const void *bnScale,
211
+ const void *bnBias,
212
+
213
+ /* MUST use factor=1 in the very first call of a complete training cycle.
214
+ Use a factor=1/(1+n) at N-th call to the function to get
215
+ Cumulative Moving Average (CMA) behavior
216
+ CMA[n] = (x[1]+...+x[n])/n
217
+ Since CMA[n+1] = (n*CMA[n]+x[n+1])/(n+1) =
218
+ ((n+1)*CMA[n]-CMA[n])/(n+1) + x[n+1]/(n+1) =
219
+ CMA[n]*(1-1/(n+1)) + x[n+1]*1/(n+1) */
220
+ double exponentialAverageFactor,
221
+
222
+ /* Used in Training phase only.
223
+ runningMean = newMean*factor + runningMean*(1-factor) */
224
+ void *resultRunningMean,
225
+ /* Output in training mode, input in inference. Is the moving average
226
+ of variance[x] (factor is applied in the same way as for runningMean) */
227
+ void *resultRunningVariance,
228
+
229
+ /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */
230
+ double epsilon,
231
+
232
+ /* Optionally save intermediate results from the forward pass here
233
+ - can be reused to speed up backward pass. NULL if unused */
234
+ void *resultSaveMean,
235
+ void *resultSaveInvVariance);
236
+
237
+ /* Computes y = relu(BN(x) + z). Also accumulates moving averages of mean and inverse variances */
238
+ cudnnStatus_t CUDNNWINAPI
239
+ cudnnBatchNormalizationForwardTrainingEx(
240
+ cudnnHandle_t handle,
241
+ cudnnBatchNormMode_t mode,
242
+ cudnnBatchNormOps_t bnOps,
243
+
244
+ const void *alpha, /* alpha[0] = result blend factor */
245
+ const void *beta, /* beta[0] = dest layer blend factor */
246
+
247
+ const cudnnTensorDescriptor_t xDesc,
248
+ const void *xData,
249
+ const cudnnTensorDescriptor_t zDesc,
250
+ const void *zData,
251
+ const cudnnTensorDescriptor_t yDesc,
252
+ void *yData,
253
+
254
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
255
+ const void *bnScale,
256
+ const void *bnBias,
257
+
258
+ double exponentialAverageFactor,
259
+ void *resultRunningMean,
260
+ void *resultRunningVariance,
261
+
262
+ /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */
263
+ double epsilon,
264
+
265
+ /* Optionally save intermediate results from the forward pass here
266
+ - can be reused to speed up backward pass. NULL if unused */
267
+ void *resultSaveMean,
268
+ void *resultSaveInvVariance,
269
+
270
+ cudnnActivationDescriptor_t activationDesc,
271
+ void *workspace,
272
+ size_t workSpaceSizeInBytes,
273
+ void *reserveSpace,
274
+ size_t reserveSpaceSizeInBytes);
275
+
276
+ /* Performs backward pass of Batch Normalization layer. Returns x gradient,
277
+ * bnScale gradient and bnBias gradient */
278
+ cudnnStatus_t CUDNNWINAPI
279
+ cudnnBatchNormalizationBackward(cudnnHandle_t handle,
280
+ cudnnBatchNormMode_t mode,
281
+ const void *alphaDataDiff,
282
+ const void *betaDataDiff,
283
+ const void *alphaParamDiff,
284
+ const void *betaParamDiff,
285
+ const cudnnTensorDescriptor_t xDesc, /* same desc for x, dx, dy */
286
+ const void *x,
287
+ const cudnnTensorDescriptor_t dyDesc,
288
+ const void *dy,
289
+ const cudnnTensorDescriptor_t dxDesc,
290
+ void *dx,
291
+ /* Shared tensor desc for the 4 tensors below */
292
+ const cudnnTensorDescriptor_t dBnScaleBiasDesc,
293
+ const void *bnScale, /* bnBias doesn't affect backpropagation */
294
+ /* scale and bias diff are not backpropagated below this layer */
295
+ void *dBnScaleResult,
296
+ void *dBnBiasResult,
297
+ /* Same epsilon as forward pass */
298
+ double epsilon,
299
+
300
+ /* Optionally cached intermediate results from
301
+ forward pass */
302
+ const void *savedMean,
303
+ const void *savedInvVariance);
304
+
305
+ cudnnStatus_t CUDNNWINAPI
306
+ cudnnBatchNormalizationBackwardEx(cudnnHandle_t handle,
307
+ cudnnBatchNormMode_t mode,
308
+ cudnnBatchNormOps_t bnOps,
309
+
310
+ const void *alphaDataDiff,
311
+ const void *betaDataDiff,
312
+ const void *alphaParamDiff,
313
+ const void *betaParamDiff,
314
+ const cudnnTensorDescriptor_t xDesc,
315
+ const void *xData,
316
+ const cudnnTensorDescriptor_t yDesc,
317
+ const void *yData,
318
+ const cudnnTensorDescriptor_t dyDesc,
319
+ const void *dyData,
320
+ const cudnnTensorDescriptor_t dzDesc,
321
+ void *dzData,
322
+ const cudnnTensorDescriptor_t dxDesc,
323
+ void *dxData,
324
+
325
+ /* Shared tensor desc for the 4 tensors below */
326
+ const cudnnTensorDescriptor_t dBnScaleBiasDesc,
327
+ const void *bnScaleData,
328
+ const void *bnBiasData, /* needed if there is activation */
329
+ void *dBnScaleData,
330
+ void *dBnBiasData,
331
+ double epsilon, /* Same epsilon as forward pass */
332
+
333
+ /* Optionally cached intermediate results from
334
+ forward pass */
335
+ const void *savedMean,
336
+ const void *savedInvVariance,
337
+ cudnnActivationDescriptor_t activationDesc,
338
+ void *workSpace,
339
+ size_t workSpaceSizeInBytes,
340
+ void *reserveSpace,
341
+ size_t reserveSpaceSizeInBytes);
342
+
343
+ cudnnStatus_t CUDNNWINAPI
344
+ cudnnGetNormalizationForwardTrainingWorkspaceSize(cudnnHandle_t handle,
345
+ cudnnNormMode_t mode,
346
+ cudnnNormOps_t normOps,
347
+ cudnnNormAlgo_t algo,
348
+ const cudnnTensorDescriptor_t xDesc,
349
+ const cudnnTensorDescriptor_t zDesc,
350
+ const cudnnTensorDescriptor_t yDesc,
351
+ const cudnnTensorDescriptor_t normScaleBiasDesc,
352
+ const cudnnActivationDescriptor_t activationDesc,
353
+ const cudnnTensorDescriptor_t normMeanVarDesc,
354
+ size_t *sizeInBytes,
355
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
356
+
357
+ cudnnStatus_t CUDNNWINAPI
358
+ cudnnGetNormalizationBackwardWorkspaceSize(cudnnHandle_t handle,
359
+ cudnnNormMode_t mode,
360
+ cudnnNormOps_t normOps,
361
+ cudnnNormAlgo_t algo,
362
+ const cudnnTensorDescriptor_t xDesc,
363
+ const cudnnTensorDescriptor_t yDesc,
364
+ const cudnnTensorDescriptor_t dyDesc,
365
+ const cudnnTensorDescriptor_t dzDesc,
366
+ const cudnnTensorDescriptor_t dxDesc,
367
+ const cudnnTensorDescriptor_t dNormScaleBiasDesc,
368
+ const cudnnActivationDescriptor_t activationDesc,
369
+ const cudnnTensorDescriptor_t normMeanVarDesc,
370
+ size_t *sizeInBytes,
371
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
372
+
373
+ cudnnStatus_t CUDNNWINAPI
374
+ cudnnGetNormalizationTrainingReserveSpaceSize(cudnnHandle_t handle,
375
+ cudnnNormMode_t mode,
376
+ cudnnNormOps_t normOps,
377
+ cudnnNormAlgo_t algo,
378
+ const cudnnActivationDescriptor_t activationDesc,
379
+ const cudnnTensorDescriptor_t xDesc,
380
+ size_t *sizeInBytes,
381
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
382
+
383
+ /* Computes y = relu(Norm(x) + z). Also accumulates moving averages of mean and inverse variances */
384
+ cudnnStatus_t CUDNNWINAPI
385
+ cudnnNormalizationForwardTraining(cudnnHandle_t handle,
386
+ cudnnNormMode_t mode,
387
+ cudnnNormOps_t normOps,
388
+ cudnnNormAlgo_t algo,
389
+ const void *alpha, /* alpha[0] = result blend factor */
390
+ const void *beta, /* beta[0] = dest layer blend factor */
391
+ const cudnnTensorDescriptor_t xDesc,
392
+ const void *xData,
393
+ const cudnnTensorDescriptor_t normScaleBiasDesc,
394
+ const void *normScale,
395
+ const void *normBias,
396
+ double exponentialAverageFactor,
397
+ const cudnnTensorDescriptor_t normMeanVarDesc,
398
+ void *resultRunningMean,
399
+ void *resultRunningVariance,
400
+ /* Has to be >= 0. Should be the same in forward and backward functions. */
401
+ double epsilon,
402
+ /* Optionally save intermediate results from the forward pass here
403
+ - can be reused to speed up backward pass. NULL if unused */
404
+ void *resultSaveMean,
405
+ void *resultSaveInvVariance,
406
+ cudnnActivationDescriptor_t activationDesc,
407
+ const cudnnTensorDescriptor_t zDesc,
408
+ const void *zData,
409
+ const cudnnTensorDescriptor_t yDesc,
410
+ void *yData,
411
+ void *workspace,
412
+ size_t workSpaceSizeInBytes,
413
+ void *reserveSpace,
414
+ size_t reserveSpaceSizeInBytes,
415
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
416
+
417
+ cudnnStatus_t CUDNNWINAPI
418
+ cudnnNormalizationBackward(cudnnHandle_t handle,
419
+ cudnnNormMode_t mode,
420
+ cudnnNormOps_t normOps,
421
+ cudnnNormAlgo_t algo,
422
+ const void *alphaDataDiff,
423
+ const void *betaDataDiff,
424
+ const void *alphaParamDiff,
425
+ const void *betaParamDiff,
426
+ const cudnnTensorDescriptor_t xDesc,
427
+ const void *xData,
428
+ const cudnnTensorDescriptor_t yDesc,
429
+ const void *yData,
430
+ const cudnnTensorDescriptor_t dyDesc,
431
+ const void *dyData,
432
+ const cudnnTensorDescriptor_t dzDesc,
433
+ void *dzData,
434
+ const cudnnTensorDescriptor_t dxDesc,
435
+ void *dxData,
436
+ /* Shared tensor desc for the 4 tensors below */
437
+ const cudnnTensorDescriptor_t dNormScaleBiasDesc,
438
+ const void *normScaleData,
439
+ const void *normBiasData, /* needed if there is activation */
440
+ void *dNormScaleData,
441
+ void *dNormBiasData,
442
+ double epsilon, /* Same epsilon as forward pass */
443
+ const cudnnTensorDescriptor_t normMeanVarDesc,
444
+ /* Optionally cached intermediate results from
445
+ forward pass */
446
+ const void *savedMean,
447
+ const void *savedInvVariance,
448
+ cudnnActivationDescriptor_t activationDesc,
449
+ void *workSpace,
450
+ size_t workSpaceSizeInBytes,
451
+ void *reserveSpace,
452
+ size_t reserveSpaceSizeInBytes,
453
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
454
+
455
+ cudnnStatus_t CUDNNWINAPI
456
+ cudnnSpatialTfGridGeneratorBackward(cudnnHandle_t handle,
457
+ const cudnnSpatialTransformerDescriptor_t stDesc,
458
+ const void *dgrid,
459
+ void *dtheta);
460
+
461
+ cudnnStatus_t CUDNNWINAPI
462
+ cudnnSpatialTfSamplerBackward(cudnnHandle_t handle,
463
+ cudnnSpatialTransformerDescriptor_t stDesc,
464
+ const void *alpha,
465
+ const cudnnTensorDescriptor_t xDesc,
466
+ const void *x,
467
+ const void *beta,
468
+ const cudnnTensorDescriptor_t dxDesc,
469
+ void *dx,
470
+ const void *alphaDgrid,
471
+ const cudnnTensorDescriptor_t dyDesc,
472
+ const void *dy,
473
+ const void *grid,
474
+ const void *betaDgrid,
475
+ void *dgrid);
476
+
477
+ cudnnStatus_t CUDNNWINAPI
478
+ cudnnDropoutBackward(cudnnHandle_t handle,
479
+ const cudnnDropoutDescriptor_t dropoutDesc,
480
+ const cudnnTensorDescriptor_t dydesc,
481
+ const void *dy,
482
+ const cudnnTensorDescriptor_t dxdesc,
483
+ void *dx,
484
+ void *reserveSpace,
485
+ size_t reserveSpaceSizeInBytes);
486
+
487
+ /*
488
+ * \brief Cross-library version checker.
489
+ * This function is implemented differently in each sub-library. Each sublib
490
+ * checks whether its own version matches that of its dependencies.
491
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
492
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
493
+ */
494
+ cudnnStatus_t CUDNNWINAPI
495
+ cudnnOpsTrainVersionCheck(void);
496
+
497
+ #if defined(__cplusplus)
498
+ }
499
+ #endif
500
+
501
+ #endif /* CUDNN_OPS_TRAIN_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train_v8.h ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_ops_train : cuDNN's basic training operations and algorithms.
52
+ */
53
+
54
+ #if !defined(CUDNN_OPS_TRAIN_H_)
55
+ #define CUDNN_OPS_TRAIN_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_OPS_TRAIN_MAJOR 8
65
+ #define CUDNN_OPS_TRAIN_MINOR 9
66
+ #define CUDNN_OPS_TRAIN_PATCH 2
67
+
68
+ #if (CUDNN_OPS_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_TRAIN_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_OPS_TRAIN_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN OPS TRAIN!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* Function to perform backward softmax */
78
+ cudnnStatus_t CUDNNWINAPI
79
+ cudnnSoftmaxBackward(cudnnHandle_t handle,
80
+ cudnnSoftmaxAlgorithm_t algo,
81
+ cudnnSoftmaxMode_t mode,
82
+ const void *alpha,
83
+ const cudnnTensorDescriptor_t yDesc,
84
+ const void *y,
85
+ const cudnnTensorDescriptor_t dyDesc,
86
+ const void *dy,
87
+ const void *beta,
88
+ const cudnnTensorDescriptor_t dxDesc,
89
+ void *dx);
90
+
91
+ /* Function to perform backward pooling */
92
+ cudnnStatus_t CUDNNWINAPI
93
+ cudnnPoolingBackward(cudnnHandle_t handle,
94
+ const cudnnPoolingDescriptor_t poolingDesc,
95
+ const void *alpha,
96
+ const cudnnTensorDescriptor_t yDesc,
97
+ const void *y,
98
+ const cudnnTensorDescriptor_t dyDesc,
99
+ const void *dy,
100
+ const cudnnTensorDescriptor_t xDesc,
101
+ const void *x,
102
+ const void *beta,
103
+ const cudnnTensorDescriptor_t dxDesc,
104
+ void *dx);
105
+
106
+ /* Function to perform backward activation */
107
+ cudnnStatus_t CUDNNWINAPI
108
+ cudnnActivationBackward(cudnnHandle_t handle,
109
+ cudnnActivationDescriptor_t activationDesc,
110
+ const void *alpha,
111
+ const cudnnTensorDescriptor_t yDesc,
112
+ const void *y,
113
+ const cudnnTensorDescriptor_t dyDesc,
114
+ const void *dy,
115
+ const cudnnTensorDescriptor_t xDesc,
116
+ const void *x,
117
+ const void *beta,
118
+ const cudnnTensorDescriptor_t dxDesc,
119
+ void *dx);
120
+
121
+ /* LRN cross-channel backward computation. Double parameters cast to tensor data type */
122
+ cudnnStatus_t CUDNNWINAPI
123
+ cudnnLRNCrossChannelBackward(cudnnHandle_t handle,
124
+ cudnnLRNDescriptor_t normDesc,
125
+ cudnnLRNMode_t lrnMode,
126
+ const void *alpha,
127
+ const cudnnTensorDescriptor_t yDesc,
128
+ const void *y,
129
+ const cudnnTensorDescriptor_t dyDesc,
130
+ const void *dy,
131
+ const cudnnTensorDescriptor_t xDesc,
132
+ const void *x,
133
+ const void *beta,
134
+ const cudnnTensorDescriptor_t dxDesc,
135
+ void *dx);
136
+
137
+ cudnnStatus_t CUDNNWINAPI
138
+ cudnnDivisiveNormalizationBackward(cudnnHandle_t handle,
139
+ cudnnLRNDescriptor_t normDesc,
140
+ cudnnDivNormMode_t mode,
141
+ const void *alpha,
142
+ const cudnnTensorDescriptor_t xDesc, /* same desc for x, means, dy, temp, temp2 */
143
+ const void *x,
144
+ const void *means, /* if NULL, means are assumed to be zero */
145
+ const void *dy,
146
+ void *temp,
147
+ void *temp2,
148
+ const void *beta,
149
+ const cudnnTensorDescriptor_t dXdMeansDesc, /* same desc for dx, dMeans */
150
+ void *dx, /* output x differential */
151
+ void *dMeans); /* output means differential, can be NULL */
152
+
153
+ cudnnStatus_t CUDNNWINAPI
154
+ cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(cudnnHandle_t handle,
155
+ cudnnBatchNormMode_t mode,
156
+ cudnnBatchNormOps_t bnOps,
157
+ const cudnnTensorDescriptor_t xDesc,
158
+ const cudnnTensorDescriptor_t zDesc,
159
+ const cudnnTensorDescriptor_t yDesc,
160
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
161
+ const cudnnActivationDescriptor_t activationDesc,
162
+ size_t *sizeInBytes);
163
+
164
+ cudnnStatus_t CUDNNWINAPI
165
+ cudnnGetBatchNormalizationBackwardExWorkspaceSize(cudnnHandle_t handle,
166
+ cudnnBatchNormMode_t mode,
167
+ cudnnBatchNormOps_t bnOps,
168
+ const cudnnTensorDescriptor_t xDesc,
169
+ const cudnnTensorDescriptor_t yDesc,
170
+ const cudnnTensorDescriptor_t dyDesc,
171
+ const cudnnTensorDescriptor_t dzDesc,
172
+ const cudnnTensorDescriptor_t dxDesc,
173
+ const cudnnTensorDescriptor_t dBnScaleBiasDesc,
174
+ const cudnnActivationDescriptor_t activationDesc,
175
+ size_t *sizeInBytes);
176
+
177
+ cudnnStatus_t CUDNNWINAPI
178
+ cudnnGetBatchNormalizationTrainingExReserveSpaceSize(cudnnHandle_t handle,
179
+ cudnnBatchNormMode_t mode,
180
+ cudnnBatchNormOps_t bnOps,
181
+ const cudnnActivationDescriptor_t activationDesc,
182
+ const cudnnTensorDescriptor_t xDesc,
183
+ size_t *sizeInBytes);
184
+
185
+ /* Computes y = BN(x). Also accumulates moving averages of mean and inverse variances */
186
+ cudnnStatus_t CUDNNWINAPI
187
+ cudnnBatchNormalizationForwardTraining(
188
+ cudnnHandle_t handle,
189
+ cudnnBatchNormMode_t mode,
190
+
191
+ const void *alpha, /* alpha[0] = result blend factor */
192
+ const void *beta, /* beta[0] = dest layer blend factor */
193
+
194
+ const cudnnTensorDescriptor_t xDesc,
195
+ const void *x, /* NxCxHxW */
196
+ const cudnnTensorDescriptor_t yDesc,
197
+ void *y, /* NxCxHxW */
198
+
199
+ /* Shared desc for the next 6 tensors in the argument list.
200
+ Data type to be set as follows:
201
+ type = (typeOf(x) == double) ? double : float
202
+ Dimensions for this descriptor depend on normalization mode
203
+ - Spatial Normalization : tensors are expected to have dims 1xCx1x1
204
+ (normalization is performed across NxHxW)
205
+ - Per-Activation Normalization : tensors are expected to have dims of 1xCxHxW
206
+ (normalization is performed across N) */
207
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
208
+
209
+ /* 'Gamma' and 'Beta' respectively in Ioffe and Szegedy's paper's notation */
210
+ const void *bnScale,
211
+ const void *bnBias,
212
+
213
+ /* MUST use factor=1 in the very first call of a complete training cycle.
214
+ Use a factor=1/(1+n) at N-th call to the function to get
215
+ Cumulative Moving Average (CMA) behavior
216
+ CMA[n] = (x[1]+...+x[n])/n
217
+ Since CMA[n+1] = (n*CMA[n]+x[n+1])/(n+1) =
218
+ ((n+1)*CMA[n]-CMA[n])/(n+1) + x[n+1]/(n+1) =
219
+ CMA[n]*(1-1/(n+1)) + x[n+1]*1/(n+1) */
220
+ double exponentialAverageFactor,
221
+
222
+ /* Used in Training phase only.
223
+ runningMean = newMean*factor + runningMean*(1-factor) */
224
+ void *resultRunningMean,
225
+ /* Output in training mode, input in inference. Is the moving average
226
+ of variance[x] (factor is applied in the same way as for runningMean) */
227
+ void *resultRunningVariance,
228
+
229
+ /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */
230
+ double epsilon,
231
+
232
+ /* Optionally save intermediate results from the forward pass here
233
+ - can be reused to speed up backward pass. NULL if unused */
234
+ void *resultSaveMean,
235
+ void *resultSaveInvVariance);
236
+
237
+ /* Computes y = relu(BN(x) + z). Also accumulates moving averages of mean and inverse variances */
238
+ cudnnStatus_t CUDNNWINAPI
239
+ cudnnBatchNormalizationForwardTrainingEx(
240
+ cudnnHandle_t handle,
241
+ cudnnBatchNormMode_t mode,
242
+ cudnnBatchNormOps_t bnOps,
243
+
244
+ const void *alpha, /* alpha[0] = result blend factor */
245
+ const void *beta, /* beta[0] = dest layer blend factor */
246
+
247
+ const cudnnTensorDescriptor_t xDesc,
248
+ const void *xData,
249
+ const cudnnTensorDescriptor_t zDesc,
250
+ const void *zData,
251
+ const cudnnTensorDescriptor_t yDesc,
252
+ void *yData,
253
+
254
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
255
+ const void *bnScale,
256
+ const void *bnBias,
257
+
258
+ double exponentialAverageFactor,
259
+ void *resultRunningMean,
260
+ void *resultRunningVariance,
261
+
262
+ /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */
263
+ double epsilon,
264
+
265
+ /* Optionally save intermediate results from the forward pass here
266
+ - can be reused to speed up backward pass. NULL if unused */
267
+ void *resultSaveMean,
268
+ void *resultSaveInvVariance,
269
+
270
+ cudnnActivationDescriptor_t activationDesc,
271
+ void *workspace,
272
+ size_t workSpaceSizeInBytes,
273
+ void *reserveSpace,
274
+ size_t reserveSpaceSizeInBytes);
275
+
276
+ /* Performs backward pass of Batch Normalization layer. Returns x gradient,
277
+ * bnScale gradient and bnBias gradient */
278
+ cudnnStatus_t CUDNNWINAPI
279
+ cudnnBatchNormalizationBackward(cudnnHandle_t handle,
280
+ cudnnBatchNormMode_t mode,
281
+ const void *alphaDataDiff,
282
+ const void *betaDataDiff,
283
+ const void *alphaParamDiff,
284
+ const void *betaParamDiff,
285
+ const cudnnTensorDescriptor_t xDesc, /* same desc for x, dx, dy */
286
+ const void *x,
287
+ const cudnnTensorDescriptor_t dyDesc,
288
+ const void *dy,
289
+ const cudnnTensorDescriptor_t dxDesc,
290
+ void *dx,
291
+ /* Shared tensor desc for the 4 tensors below */
292
+ const cudnnTensorDescriptor_t dBnScaleBiasDesc,
293
+ const void *bnScale, /* bnBias doesn't affect backpropagation */
294
+ /* scale and bias diff are not backpropagated below this layer */
295
+ void *dBnScaleResult,
296
+ void *dBnBiasResult,
297
+ /* Same epsilon as forward pass */
298
+ double epsilon,
299
+
300
+ /* Optionally cached intermediate results from
301
+ forward pass */
302
+ const void *savedMean,
303
+ const void *savedInvVariance);
304
+
305
+ cudnnStatus_t CUDNNWINAPI
306
+ cudnnBatchNormalizationBackwardEx(cudnnHandle_t handle,
307
+ cudnnBatchNormMode_t mode,
308
+ cudnnBatchNormOps_t bnOps,
309
+
310
+ const void *alphaDataDiff,
311
+ const void *betaDataDiff,
312
+ const void *alphaParamDiff,
313
+ const void *betaParamDiff,
314
+ const cudnnTensorDescriptor_t xDesc,
315
+ const void *xData,
316
+ const cudnnTensorDescriptor_t yDesc,
317
+ const void *yData,
318
+ const cudnnTensorDescriptor_t dyDesc,
319
+ const void *dyData,
320
+ const cudnnTensorDescriptor_t dzDesc,
321
+ void *dzData,
322
+ const cudnnTensorDescriptor_t dxDesc,
323
+ void *dxData,
324
+
325
+ /* Shared tensor desc for the 4 tensors below */
326
+ const cudnnTensorDescriptor_t dBnScaleBiasDesc,
327
+ const void *bnScaleData,
328
+ const void *bnBiasData, /* needed if there is activation */
329
+ void *dBnScaleData,
330
+ void *dBnBiasData,
331
+ double epsilon, /* Same epsilon as forward pass */
332
+
333
+ /* Optionally cached intermediate results from
334
+ forward pass */
335
+ const void *savedMean,
336
+ const void *savedInvVariance,
337
+ cudnnActivationDescriptor_t activationDesc,
338
+ void *workSpace,
339
+ size_t workSpaceSizeInBytes,
340
+ void *reserveSpace,
341
+ size_t reserveSpaceSizeInBytes);
342
+
343
+ cudnnStatus_t CUDNNWINAPI
344
+ cudnnGetNormalizationForwardTrainingWorkspaceSize(cudnnHandle_t handle,
345
+ cudnnNormMode_t mode,
346
+ cudnnNormOps_t normOps,
347
+ cudnnNormAlgo_t algo,
348
+ const cudnnTensorDescriptor_t xDesc,
349
+ const cudnnTensorDescriptor_t zDesc,
350
+ const cudnnTensorDescriptor_t yDesc,
351
+ const cudnnTensorDescriptor_t normScaleBiasDesc,
352
+ const cudnnActivationDescriptor_t activationDesc,
353
+ const cudnnTensorDescriptor_t normMeanVarDesc,
354
+ size_t *sizeInBytes,
355
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
356
+
357
+ cudnnStatus_t CUDNNWINAPI
358
+ cudnnGetNormalizationBackwardWorkspaceSize(cudnnHandle_t handle,
359
+ cudnnNormMode_t mode,
360
+ cudnnNormOps_t normOps,
361
+ cudnnNormAlgo_t algo,
362
+ const cudnnTensorDescriptor_t xDesc,
363
+ const cudnnTensorDescriptor_t yDesc,
364
+ const cudnnTensorDescriptor_t dyDesc,
365
+ const cudnnTensorDescriptor_t dzDesc,
366
+ const cudnnTensorDescriptor_t dxDesc,
367
+ const cudnnTensorDescriptor_t dNormScaleBiasDesc,
368
+ const cudnnActivationDescriptor_t activationDesc,
369
+ const cudnnTensorDescriptor_t normMeanVarDesc,
370
+ size_t *sizeInBytes,
371
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
372
+
373
+ cudnnStatus_t CUDNNWINAPI
374
+ cudnnGetNormalizationTrainingReserveSpaceSize(cudnnHandle_t handle,
375
+ cudnnNormMode_t mode,
376
+ cudnnNormOps_t normOps,
377
+ cudnnNormAlgo_t algo,
378
+ const cudnnActivationDescriptor_t activationDesc,
379
+ const cudnnTensorDescriptor_t xDesc,
380
+ size_t *sizeInBytes,
381
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
382
+
383
+ /* Computes y = relu(Norm(x) + z). Also accumulates moving averages of mean and inverse variances */
384
+ cudnnStatus_t CUDNNWINAPI
385
+ cudnnNormalizationForwardTraining(cudnnHandle_t handle,
386
+ cudnnNormMode_t mode,
387
+ cudnnNormOps_t normOps,
388
+ cudnnNormAlgo_t algo,
389
+ const void *alpha, /* alpha[0] = result blend factor */
390
+ const void *beta, /* beta[0] = dest layer blend factor */
391
+ const cudnnTensorDescriptor_t xDesc,
392
+ const void *xData,
393
+ const cudnnTensorDescriptor_t normScaleBiasDesc,
394
+ const void *normScale,
395
+ const void *normBias,
396
+ double exponentialAverageFactor,
397
+ const cudnnTensorDescriptor_t normMeanVarDesc,
398
+ void *resultRunningMean,
399
+ void *resultRunningVariance,
400
+ /* Has to be >= 0. Should be the same in forward and backward functions. */
401
+ double epsilon,
402
+ /* Optionally save intermediate results from the forward pass here
403
+ - can be reused to speed up backward pass. NULL if unused */
404
+ void *resultSaveMean,
405
+ void *resultSaveInvVariance,
406
+ cudnnActivationDescriptor_t activationDesc,
407
+ const cudnnTensorDescriptor_t zDesc,
408
+ const void *zData,
409
+ const cudnnTensorDescriptor_t yDesc,
410
+ void *yData,
411
+ void *workspace,
412
+ size_t workSpaceSizeInBytes,
413
+ void *reserveSpace,
414
+ size_t reserveSpaceSizeInBytes,
415
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
416
+
417
+ cudnnStatus_t CUDNNWINAPI
418
+ cudnnNormalizationBackward(cudnnHandle_t handle,
419
+ cudnnNormMode_t mode,
420
+ cudnnNormOps_t normOps,
421
+ cudnnNormAlgo_t algo,
422
+ const void *alphaDataDiff,
423
+ const void *betaDataDiff,
424
+ const void *alphaParamDiff,
425
+ const void *betaParamDiff,
426
+ const cudnnTensorDescriptor_t xDesc,
427
+ const void *xData,
428
+ const cudnnTensorDescriptor_t yDesc,
429
+ const void *yData,
430
+ const cudnnTensorDescriptor_t dyDesc,
431
+ const void *dyData,
432
+ const cudnnTensorDescriptor_t dzDesc,
433
+ void *dzData,
434
+ const cudnnTensorDescriptor_t dxDesc,
435
+ void *dxData,
436
+ /* Shared tensor desc for the 4 tensors below */
437
+ const cudnnTensorDescriptor_t dNormScaleBiasDesc,
438
+ const void *normScaleData,
439
+ const void *normBiasData, /* needed if there is activation */
440
+ void *dNormScaleData,
441
+ void *dNormBiasData,
442
+ double epsilon, /* Same epsilon as forward pass */
443
+ const cudnnTensorDescriptor_t normMeanVarDesc,
444
+ /* Optionally cached intermediate results from
445
+ forward pass */
446
+ const void *savedMean,
447
+ const void *savedInvVariance,
448
+ cudnnActivationDescriptor_t activationDesc,
449
+ void *workSpace,
450
+ size_t workSpaceSizeInBytes,
451
+ void *reserveSpace,
452
+ size_t reserveSpaceSizeInBytes,
453
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
454
+
455
+ cudnnStatus_t CUDNNWINAPI
456
+ cudnnSpatialTfGridGeneratorBackward(cudnnHandle_t handle,
457
+ const cudnnSpatialTransformerDescriptor_t stDesc,
458
+ const void *dgrid,
459
+ void *dtheta);
460
+
461
+ cudnnStatus_t CUDNNWINAPI
462
+ cudnnSpatialTfSamplerBackward(cudnnHandle_t handle,
463
+ cudnnSpatialTransformerDescriptor_t stDesc,
464
+ const void *alpha,
465
+ const cudnnTensorDescriptor_t xDesc,
466
+ const void *x,
467
+ const void *beta,
468
+ const cudnnTensorDescriptor_t dxDesc,
469
+ void *dx,
470
+ const void *alphaDgrid,
471
+ const cudnnTensorDescriptor_t dyDesc,
472
+ const void *dy,
473
+ const void *grid,
474
+ const void *betaDgrid,
475
+ void *dgrid);
476
+
477
+ cudnnStatus_t CUDNNWINAPI
478
+ cudnnDropoutBackward(cudnnHandle_t handle,
479
+ const cudnnDropoutDescriptor_t dropoutDesc,
480
+ const cudnnTensorDescriptor_t dydesc,
481
+ const void *dy,
482
+ const cudnnTensorDescriptor_t dxdesc,
483
+ void *dx,
484
+ void *reserveSpace,
485
+ size_t reserveSpaceSizeInBytes);
486
+
487
+ /*
488
+ * \brief Cross-library version checker.
489
+ * This function is implemented differently in each sub-library. Each sublib
490
+ * checks whether its own version matches that of its dependencies.
491
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
492
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
493
+ */
494
+ cudnnStatus_t CUDNNWINAPI
495
+ cudnnOpsTrainVersionCheck(void);
496
+
497
+ #if defined(__cplusplus)
498
+ }
499
+ #endif
500
+
501
+ #endif /* CUDNN_OPS_TRAIN_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_v8.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cudnn : Neural Networks Library
51
+
52
+ */
53
+
54
+ #if !defined(CUDNN_H_)
55
+ #define CUDNN_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+ #include "cudnn_ops_train.h"
63
+ #include "cudnn_adv_infer.h"
64
+ #include "cudnn_adv_train.h"
65
+ #include "cudnn_cnn_infer.h"
66
+ #include "cudnn_cnn_train.h"
67
+
68
+ #include "cudnn_backend.h"
69
+
70
+ #if defined(__cplusplus)
71
+ extern "C" {
72
+ #endif
73
+
74
+ #if defined(__cplusplus)
75
+ }
76
+ #endif
77
+
78
+ #endif /* CUDNN_H_ */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_version.h ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /**
51
+ * \file: The master cuDNN version file.
52
+ */
53
+
54
+ #ifndef CUDNN_VERSION_H_
55
+ #define CUDNN_VERSION_H_
56
+
57
+ #define CUDNN_MAJOR 8
58
+ #define CUDNN_MINOR 9
59
+ #define CUDNN_PATCHLEVEL 2
60
+
61
+ #define CUDNN_VERSION (CUDNN_MAJOR * 1000 + CUDNN_MINOR * 100 + CUDNN_PATCHLEVEL)
62
+
63
+ /* cannot use constexpr here since this is a C-only file */
64
+ /* Below is the max SM version this cuDNN library is aware of and supports natively */
65
+
66
+ #define CUDNN_MAX_SM_MAJOR_NUMBER 9
67
+ #define CUDNN_MAX_SM_MINOR_NUMBER 0
68
+ #define CUDNN_MAX_DEVICE_VERSION (CUDNN_MAX_SM_MAJOR_NUMBER * 100 + CUDNN_MAX_SM_MINOR_NUMBER * 10)
69
+
70
+ /* Here are constants for each of the SM Architectures we support to use in code where device version checks must be
71
+ * made */
72
+
73
+ /* MAXWELL SM 50 52 53 */
74
+ #define CUDNN_SM_50 500
75
+ #define CUDNN_SM_52 520
76
+ #define CUDNN_SM_53 530
77
+
78
+ /* PASCAL SM 60 61 62 */
79
+ #define CUDNN_SM_60 600
80
+ #define CUDNN_SM_61 610
81
+ #define CUDNN_SM_62 620
82
+
83
+ /* VOLTA SM 70 72 */
84
+ #define CUDNN_SM_70 700
85
+ #define CUDNN_SM_72 720
86
+
87
+ /* TURING SM 75 */
88
+ #define CUDNN_SM_75 750
89
+
90
+ /* AMPERE SM 80 86 87 */
91
+ #define CUDNN_SM_80 800
92
+ #define CUDNN_SM_86 860
93
+ #define CUDNN_SM_87 870
94
+
95
+ /* ADA LOVELACE SM 89 */
96
+ #define CUDNN_SM_89 890
97
+
98
+ /* HOPPER SM 90 */
99
+ #define CUDNN_SM_90 900
100
+
101
+ /* END MARKER for last known version.
102
+ * This can be replaced after support for 1000 is added
103
+ */
104
+ #define CUDNN_SM_9X_END 999
105
+
106
+ /* This is the minimum version we support devices below this will return CUDNN_STATUS_ARCH_MISMATCH */
107
+ #define CUDNN_MIN_DEVICE_VERSION CUDNN_SM_50
108
+
109
+ #endif /* CUDNN_VERSION_H */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_version_v8.h ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /**
51
+ * \file: The master cuDNN version file.
52
+ */
53
+
54
+ #ifndef CUDNN_VERSION_H_
55
+ #define CUDNN_VERSION_H_
56
+
57
+ #define CUDNN_MAJOR 8
58
+ #define CUDNN_MINOR 9
59
+ #define CUDNN_PATCHLEVEL 2
60
+
61
+ #define CUDNN_VERSION (CUDNN_MAJOR * 1000 + CUDNN_MINOR * 100 + CUDNN_PATCHLEVEL)
62
+
63
+ /* cannot use constexpr here since this is a C-only file */
64
+ /* Below is the max SM version this cuDNN library is aware of and supports natively */
65
+
66
+ #define CUDNN_MAX_SM_MAJOR_NUMBER 9
67
+ #define CUDNN_MAX_SM_MINOR_NUMBER 0
68
+ #define CUDNN_MAX_DEVICE_VERSION (CUDNN_MAX_SM_MAJOR_NUMBER * 100 + CUDNN_MAX_SM_MINOR_NUMBER * 10)
69
+
70
+ /* Here are constants for each of the SM Architectures we support to use in code where device version checks must be
71
+ * made */
72
+
73
+ /* MAXWELL SM 50 52 53 */
74
+ #define CUDNN_SM_50 500
75
+ #define CUDNN_SM_52 520
76
+ #define CUDNN_SM_53 530
77
+
78
+ /* PASCAL SM 60 61 62 */
79
+ #define CUDNN_SM_60 600
80
+ #define CUDNN_SM_61 610
81
+ #define CUDNN_SM_62 620
82
+
83
+ /* VOLTA SM 70 72 */
84
+ #define CUDNN_SM_70 700
85
+ #define CUDNN_SM_72 720
86
+
87
+ /* TURING SM 75 */
88
+ #define CUDNN_SM_75 750
89
+
90
+ /* AMPERE SM 80 86 87 */
91
+ #define CUDNN_SM_80 800
92
+ #define CUDNN_SM_86 860
93
+ #define CUDNN_SM_87 870
94
+
95
+ /* ADA LOVELACE SM 89 */
96
+ #define CUDNN_SM_89 890
97
+
98
+ /* HOPPER SM 90 */
99
+ #define CUDNN_SM_90 900
100
+
101
+ /* END MARKER for last known version.
102
+ * This can be replaced after support for 1000 is added
103
+ */
104
+ #define CUDNN_SM_9X_END 999
105
+
106
+ /* This is the minimum version we support devices below this will return CUDNN_STATUS_ARCH_MISMATCH */
107
+ #define CUDNN_MIN_DEVICE_VERSION CUDNN_SM_50
108
+
109
+ #endif /* CUDNN_VERSION_H */
llava_next/lib/python3.10/site-packages/nvidia/cudnn/lib/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/nvidia/cudnn/lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (172 Bytes). View file
 
llava_next/lib/python3.10/site-packages/nvidia/nvjitlink/include/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/nvidia/nvjitlink/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
llava_next/lib/python3.10/site-packages/nvidia/nvjitlink/include/nvJitLink.h ADDED
@@ -0,0 +1,531 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * NVIDIA_COPYRIGHT_BEGIN
3
+ *
4
+ * Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
5
+ *
6
+ * NVIDIA CORPORATION and its licensors retain all intellectual property
7
+ * and proprietary rights in and to this software, related documentation
8
+ * and any modifications thereto. Any use, reproduction, disclosure or
9
+ * distribution of this software and related documentation without an express
10
+ * license agreement from NVIDIA CORPORATION is strictly prohibited.
11
+ *
12
+ * NVIDIA_COPYRIGHT_END
13
+ */
14
+
15
+ #ifndef nvJitLink_INCLUDED
16
+ #define nvJitLink_INCLUDED
17
+
18
+ #ifdef __cplusplus
19
+ extern "C" {
20
+ #endif
21
+
22
+ #include <stdint.h>
23
+ #include <stdlib.h>
24
+
25
+ /**
26
+ *
27
+ * \defgroup error Error codes
28
+ *
29
+ */
30
+
31
+ /** \ingroup error
32
+ *
33
+ * \brief The enumerated type nvJitLinkResult defines API call result codes.
34
+ * nvJitLink APIs return nvJitLinkResult codes to indicate the result.
35
+ */
36
+
37
+ typedef enum {
38
+ NVJITLINK_SUCCESS = 0,
39
+ NVJITLINK_ERROR_UNRECOGNIZED_OPTION,
40
+ NVJITLINK_ERROR_MISSING_ARCH, // -arch=sm_NN option not specified
41
+ NVJITLINK_ERROR_INVALID_INPUT,
42
+ NVJITLINK_ERROR_PTX_COMPILE,
43
+ NVJITLINK_ERROR_NVVM_COMPILE,
44
+ NVJITLINK_ERROR_INTERNAL,
45
+ NVJITLINK_ERROR_THREADPOOL,
46
+ NVJITLINK_ERROR_UNRECOGNIZED_INPUT,
47
+ NVJITLINK_ERROR_FINALIZE,
48
+ #ifdef NEW_ERROR_CODES // These error codes will appear in a future CUDA release.
49
+ NVJITLINK_ERROR_NULL_INPUT,
50
+ NVJITLINK_ERROR_INCOMPATIBLE_OPTIONS,
51
+ NVJITLINK_ERROR_INCORRECT_INPUT_TYPE,
52
+ NVJITLINK_ERROR_ARCH_MISMATCH,
53
+ NVJITLINK_ERROR_OUTDATED_LIBRARY,
54
+ NVJITLINK_ERROR_MISSING_FATBIN
55
+ #endif
56
+ } nvJitLinkResult;
57
+
58
+ #ifndef NEW_ERROR_CODES // To avoid breaking compatibility, we map them to existing error codes for now.
59
+ #define NVJITLINK_ERROR_NULL_INPUT NVJITLINK_ERROR_INVALID_INPUT
60
+ #define NVJITLINK_ERROR_INCOMPATIBLE_OPTIONS NVJITLINK_ERROR_INVALID_INPUT
61
+ #define NVJITLINK_ERROR_INCORRECT_INPUT_TYPE NVJITLINK_ERROR_INVALID_INPUT
62
+ #define NVJITLINK_ERROR_ARCH_MISMATCH NVJITLINK_ERROR_INTERNAL
63
+ #define NVJITLINK_ERROR_OUTDATED_LIBRARY NVJITLINK_ERROR_INTERNAL
64
+ #define NVJITLINK_ERROR_MISSING_FATBIN NVJITLINK_ERROR_INVALID_INPUT
65
+ #endif
66
+
67
+ /**
68
+ *
69
+ * \defgroup linking Linking
70
+ *
71
+ */
72
+
73
+ /** \ingroup linking
74
+ *
75
+ * \brief The enumerated type nvJitLinkInputType defines the kind of inputs
76
+ * that can be passed to nvJitLinkAdd* APIs.
77
+ */
78
+
79
+ typedef enum {
80
+ NVJITLINK_INPUT_NONE = 0, // error
81
+ NVJITLINK_INPUT_CUBIN = 1,
82
+ NVJITLINK_INPUT_PTX,
83
+ NVJITLINK_INPUT_LTOIR,
84
+ NVJITLINK_INPUT_FATBIN,
85
+ NVJITLINK_INPUT_OBJECT,
86
+ NVJITLINK_INPUT_LIBRARY,
87
+ NVJITLINK_INPUT_INDEX,
88
+ NVJITLINK_INPUT_ANY = 10 // will dynamically determine one of above types
89
+ } nvJitLinkInputType;
90
+
91
+ /**
92
+ * \defgroup options Supported Link Options
93
+ *
94
+ * nvJitLink supports the link options below.
95
+ * Option names are prefixed with a single dash (\c -).
96
+ * Options that take a value have an assignment operator (\c =)
97
+ * followed by the option value, with no spaces, e.g. \c "-arch=sm_90".
98
+ *
99
+ * The supported options are:
100
+ * - \c -arch=sm_<N\> \n
101
+ * Pass SM architecture value. See nvcc for valid values of <N\>.
102
+ * Can use compute_<N\> value instead if only generating PTX.
103
+ * This is a required option.
104
+ * - \c -maxrregcount=<N\> \n
105
+ * Maximum register count.
106
+ * - \c -time \n
107
+ * Print timing information to InfoLog.
108
+ * - \c -verbose \n
109
+ * Print verbose messages to InfoLog.
110
+ * - \c -lto \n
111
+ * Do link time optimization.
112
+ * - \c -ptx \n
113
+ * Emit ptx after linking instead of cubin; only supported with \c -lto
114
+ * - \c -O<N\> \n
115
+ * Optimization level. Only 0 and 3 are accepted.
116
+ * - \c -g \n
117
+ * Generate debug information.
118
+ * - \c -lineinfo \n
119
+ * Generate line information.
120
+ * - \c -ftz=<n\> \n
121
+ * Flush to zero.
122
+ * - \c -prec-div=<n\> \n
123
+ * Precise divide.
124
+ * - \c -prec-sqrt=<n\> \n
125
+ * Precise square root.
126
+ * - \c -fma=<n\> \n
127
+ * Fast multiply add.
128
+ * - \c -kernels-used=<name\> \n
129
+ * Pass list of kernels that are used; any not in the list can be removed.
130
+ * This option can be specified multiple times.
131
+ * - \c -variables-used=<name\> \n
132
+ * Pass list of variables that are used; any not in the list can be removed.
133
+ * This option can be specified multiple times.
134
+ * - \c -optimize-unused-variables \n
135
+ * Normally device code optimization is limited by not knowing what the
136
+ * host code references. With this option it can assume that if a variable
137
+ * is not referenced in device code then it can be removed.
138
+ * - \c -Xptxas=<opt\> \n
139
+ * Pass <opt\> to ptxas. This option can be called multiple times.
140
+ * - \c -split-compile=<N\> \n
141
+ * Split compilation maximum thread count. Use 0 to use all available processors.
142
+ * Value of 1 disables split compilation (default).
143
+ * - \c -split-compile-extended=<N\> \n
144
+ * A more aggressive form of split compilation available in LTO mode only.
145
+ * Accepts a maximum thread count value. Use 0 to use all available processors.
146
+ * Value of 1 disables extended split compilation (default).
147
+ * Note: This option can potentially impact performance of the compiled binary.
148
+ * - \c -jump-table-density=<N\> \n
149
+ * When doing LTO, specify the case density percentage in switch statements,
150
+ * and use it as a minimal threshold to determine whether jump table(brx.idx
151
+ * instruction) will be used to implement a switch statement. Default
152
+ * value is 101. The percentage ranges from 0 to 101 inclusively.
153
+ * - \c -no-cache \n
154
+ * Don't cache the intermediate steps of nvJitLink.
155
+ * - \c -device-stack-protector \n
156
+ * Enable stack canaries in device code.
157
+ * Stack canaries make it more difficult to exploit certain types of memory safety bugs involving stack-local variables.
158
+ * The compiler uses heuristics to assess the risk of such a bug in each function. Only those functions which are deemed high-risk make use of a stack canary.
159
+ */
160
+
161
+ /**
162
+ * \ingroup linking
163
+ * \brief nvJitLinkHandle is the unit of linking, and an opaque handle for
164
+ * a program.
165
+ *
166
+ * To link inputs, an instance of nvJitLinkHandle must be created first with
167
+ * nvJitLinkCreate().
168
+ */
169
+
170
+ typedef struct nvJitLink* nvJitLinkHandle; // opaque handle
171
+
172
+ // For versioning we will have separate API version for each library version
173
+
174
+ extern nvJitLinkResult __nvJitLinkCreate_12_6(
175
+ nvJitLinkHandle *handle,
176
+ uint32_t numOptions,
177
+ const char **options);
178
+ /**
179
+ * \ingroup linking
180
+ * \brief nvJitLinkCreate creates an instance of nvJitLinkHandle with the
181
+ * given input options, and sets the output parameter \p handle.
182
+ *
183
+ * \param [out] handle Address of nvJitLink handle.
184
+ * \param [in] numOptions Number of options passed.
185
+ * \param [in] options Array of size \p numOptions of option strings.
186
+ * \return
187
+ * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink
188
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_UNRECOGNIZED_OPTION\endlink
189
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_MISSING_ARCH\endlink
190
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink
191
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink
192
+ *
193
+ * It supports options listed in \ref options.
194
+ *
195
+ * \see nvJitLinkDestroy
196
+ */
197
+ #ifndef NVJITLINK_NO_INLINE
198
+ static inline nvJitLinkResult nvJitLinkCreate(
199
+ nvJitLinkHandle *handle,
200
+ uint32_t numOptions,
201
+ const char **options)
202
+ {
203
+ return __nvJitLinkCreate_12_6 (handle, numOptions, options);
204
+ }
205
+ #endif
206
+
207
+ extern nvJitLinkResult __nvJitLinkDestroy_12_6 (nvJitLinkHandle *handle);
208
+ /**
209
+ * \ingroup linking
210
+ * \brief nvJitLinkDestroy frees the memory associated with the given handle
211
+ * and sets it to NULL.
212
+ *
213
+ * \param [in] handle Address of nvJitLink handle.
214
+ * \return
215
+ * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink
216
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink
217
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink
218
+ *
219
+ * \see nvJitLinkCreate
220
+ */
221
+ #ifndef NVJITLINK_NO_INLINE
222
+ static inline nvJitLinkResult nvJitLinkDestroy (nvJitLinkHandle *handle)
223
+ {
224
+ return __nvJitLinkDestroy_12_6 (handle);
225
+ }
226
+ #endif
227
+
228
+ extern nvJitLinkResult __nvJitLinkAddData_12_6(
229
+ nvJitLinkHandle handle,
230
+ nvJitLinkInputType inputType,
231
+ const void *data,
232
+ size_t size,
233
+ const char *name); // name can be null
234
+ /**
235
+ * \ingroup linking
236
+ * \brief nvJitLinkAddData adds data image to the link.
237
+ *
238
+ * \param [in] handle nvJitLink handle.
239
+ * \param [in] inputType kind of input.
240
+ * \param [in] data pointer to data image in memory.
241
+ * \param [in] size size of the data.
242
+ * \param [in] name name of input object.
243
+ * \return
244
+ * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink
245
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink
246
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink
247
+ */
248
+ #ifndef NVJITLINK_NO_INLINE
249
+ static inline nvJitLinkResult nvJitLinkAddData(
250
+ nvJitLinkHandle handle,
251
+ nvJitLinkInputType inputType,
252
+ const void *data,
253
+ size_t size,
254
+ const char *name) // name can be null
255
+ {
256
+ return __nvJitLinkAddData_12_6 (handle, inputType, data, size, name);
257
+ }
258
+ #endif
259
+
260
+ extern nvJitLinkResult __nvJitLinkAddFile_12_6(
261
+ nvJitLinkHandle handle,
262
+ nvJitLinkInputType inputType,
263
+ const char *fileName); // includes path to file
264
+ /**
265
+ * \ingroup linking
266
+ * \brief nvJitLinkAddFile reads data from file and links it in.
267
+ *
268
+ * \param [in] handle nvJitLink handle.
269
+ * \param [in] inputType kind of input.
270
+ * \param [in] fileName name of file.
271
+ * \return
272
+ * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink
273
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink
274
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink
275
+ */
276
+ #ifndef NVJITLINK_NO_INLINE
277
+ static inline nvJitLinkResult nvJitLinkAddFile(
278
+ nvJitLinkHandle handle,
279
+ nvJitLinkInputType inputType,
280
+ const char *fileName) // includes path to file
281
+ {
282
+ return __nvJitLinkAddFile_12_6 (handle, inputType, fileName);
283
+ }
284
+ #endif
285
+
286
+ extern nvJitLinkResult __nvJitLinkComplete_12_6 (nvJitLinkHandle handle);
287
+ /**
288
+ * \ingroup linking
289
+ * \brief nvJitLinkComplete does the actual link.
290
+ *
291
+ * \param [in] handle nvJitLink handle.
292
+ * \return
293
+ * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink
294
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink
295
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink
296
+ */
297
+ #ifndef NVJITLINK_NO_INLINE
298
+ static inline nvJitLinkResult nvJitLinkComplete (nvJitLinkHandle handle)
299
+ {
300
+ return __nvJitLinkComplete_12_6 (handle);
301
+ }
302
+ #endif
303
+
304
+ extern nvJitLinkResult __nvJitLinkGetLinkedCubinSize_12_6(
305
+ nvJitLinkHandle handle,
306
+ size_t *size);
307
+ /**
308
+ * \ingroup linking
309
+ * \brief nvJitLinkGetLinkedCubinSize gets the size of the linked cubin.
310
+ *
311
+ * \param [in] handle nvJitLink handle.
312
+ * \param [out] size Size of the linked cubin.
313
+ * \return
314
+ * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink
315
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink
316
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink
317
+ *
318
+ * \see nvJitLinkGetLinkedCubin
319
+ */
320
+ #ifndef NVJITLINK_NO_INLINE
321
+ static inline nvJitLinkResult nvJitLinkGetLinkedCubinSize(
322
+ nvJitLinkHandle handle,
323
+ size_t *size)
324
+ {
325
+ return __nvJitLinkGetLinkedCubinSize_12_6 (handle, size);
326
+ }
327
+ #endif
328
+
329
+ extern nvJitLinkResult __nvJitLinkGetLinkedCubin_12_6(
330
+ nvJitLinkHandle handle,
331
+ void *cubin);
332
+ /**
333
+ * \ingroup linking
334
+ * \brief nvJitLinkGetLinkedCubin gets the linked cubin.
335
+ *
336
+ * \param [in] handle nvJitLink handle.
337
+ * \param [out] cubin The linked cubin.
338
+ * \return
339
+ * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink
340
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink
341
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink
342
+ *
343
+ * User is responsible for allocating enough space to hold the \p cubin.
344
+ * \see nvJitLinkGetLinkedCubinSize
345
+ */
346
+ #ifndef NVJITLINK_NO_INLINE
347
+ static inline nvJitLinkResult nvJitLinkGetLinkedCubin(
348
+ nvJitLinkHandle handle,
349
+ void *cubin)
350
+ {
351
+ return __nvJitLinkGetLinkedCubin_12_6 (handle, cubin);
352
+ }
353
+ #endif
354
+
355
+ extern nvJitLinkResult __nvJitLinkGetLinkedPtxSize_12_6(
356
+ nvJitLinkHandle handle,
357
+ size_t *size);
358
+ /**
359
+ * \ingroup linking
360
+ * \brief nvJitLinkGetLinkedPtxSize gets the size of the linked ptx.
361
+ *
362
+ * \param [in] handle nvJitLink handle.
363
+ * \param [out] size Size of the linked PTX.
364
+ * \return
365
+ * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink
366
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink
367
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink
368
+ *
369
+ * Linked PTX is only available when using the \c -lto option.
370
+ * \see nvJitLinkGetLinkedPtx
371
+ */
372
+ #ifndef NVJITLINK_NO_INLINE
373
+ static inline nvJitLinkResult nvJitLinkGetLinkedPtxSize(
374
+ nvJitLinkHandle handle,
375
+ size_t *size)
376
+ {
377
+ return __nvJitLinkGetLinkedPtxSize_12_6 (handle, size);
378
+ }
379
+ #endif
380
+
381
+ extern nvJitLinkResult __nvJitLinkGetLinkedPtx_12_6(
382
+ nvJitLinkHandle handle,
383
+ char *ptx);
384
+ /**
385
+ * \ingroup linking
386
+ * \brief nvJitLinkGetLinkedPtx gets the linked ptx.
387
+ *
388
+ * \param [in] handle nvJitLink handle.
389
+ * \param [out] ptx The linked PTX.
390
+ * \return
391
+ * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink
392
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink
393
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink
394
+ *
395
+ * Linked PTX is only available when using the \c -lto option.
396
+ * User is responsible for allocating enough space to hold the \p ptx.
397
+ * \see nvJitLinkGetLinkedPtxSize
398
+ */
399
+ #ifndef NVJITLINK_NO_INLINE
400
+ static inline nvJitLinkResult nvJitLinkGetLinkedPtx(
401
+ nvJitLinkHandle handle,
402
+ char *ptx)
403
+ {
404
+ return __nvJitLinkGetLinkedPtx_12_6 (handle, ptx);
405
+ }
406
+ #endif
407
+
408
+ extern nvJitLinkResult __nvJitLinkGetErrorLogSize_12_6(
409
+ nvJitLinkHandle handle,
410
+ size_t *size);
411
+ /**
412
+ * \ingroup linking
413
+ * \brief nvJitLinkGetErrorLogSize gets the size of the error log.
414
+ *
415
+ * \param [in] handle nvJitLink handle.
416
+ * \param [out] size Size of the error log.
417
+ * \return
418
+ * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink
419
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink
420
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink
421
+ *
422
+ * \see nvJitLinkGetErrorLog
423
+ */
424
+ #ifndef NVJITLINK_NO_INLINE
425
+ static inline nvJitLinkResult nvJitLinkGetErrorLogSize(
426
+ nvJitLinkHandle handle,
427
+ size_t *size)
428
+ {
429
+ return __nvJitLinkGetErrorLogSize_12_6 (handle, size);
430
+ }
431
+ #endif
432
+
433
+ extern nvJitLinkResult __nvJitLinkGetErrorLog_12_6(
434
+ nvJitLinkHandle handle,
435
+ char *log);
436
+ /**
437
+ * \ingroup linking
438
+ * \brief nvJitLinkGetErrorLog puts any error messages in the log.
439
+ *
440
+ * \param [in] handle nvJitLink handle.
441
+ * \param [out] log The error log.
442
+ * \return
443
+ * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink
444
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink
445
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink
446
+ *
447
+ * User is responsible for allocating enough space to hold the \p log.
448
+ * \see nvJitLinkGetErrorLogSize
449
+ */
450
+ #ifndef NVJITLINK_NO_INLINE
451
+ static inline nvJitLinkResult nvJitLinkGetErrorLog(
452
+ nvJitLinkHandle handle,
453
+ char *log)
454
+ {
455
+ return __nvJitLinkGetErrorLog_12_6 (handle, log);
456
+ }
457
+ #endif
458
+
459
+ extern nvJitLinkResult __nvJitLinkGetInfoLogSize_12_6(
460
+ nvJitLinkHandle handle,
461
+ size_t *size);
462
+ /**
463
+ * \ingroup linking
464
+ * \brief nvJitLinkGetInfoLogSize gets the size of the info log.
465
+ *
466
+ * \param [in] handle nvJitLink handle.
467
+ * \param [out] size Size of the info log.
468
+ * \return
469
+ * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink
470
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink
471
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink
472
+ *
473
+ * \see nvJitLinkGetInfoLog
474
+ */
475
+ #ifndef NVJITLINK_NO_INLINE
476
+ static inline nvJitLinkResult nvJitLinkGetInfoLogSize(
477
+ nvJitLinkHandle handle,
478
+ size_t *size)
479
+ {
480
+ return __nvJitLinkGetInfoLogSize_12_6 (handle, size);
481
+ }
482
+ #endif
483
+
484
+ extern nvJitLinkResult __nvJitLinkGetInfoLog_12_6(
485
+ nvJitLinkHandle handle,
486
+ char *log);
487
+ /**
488
+ * \ingroup linking
489
+ * \brief nvJitLinkGetInfoLog puts any info messages in the log.
490
+ *
491
+ * \param [in] handle nvJitLink handle.
492
+ * \param [out] log The info log.
493
+ * \return
494
+ * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink
495
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink
496
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink
497
+ *
498
+ * User is responsible for allocating enough space to hold the \p log.
499
+ * \see nvJitLinkGetInfoLogSize
500
+ */
501
+ #ifndef NVJITLINK_NO_INLINE
502
+ static inline nvJitLinkResult nvJitLinkGetInfoLog(
503
+ nvJitLinkHandle handle,
504
+ char *log)
505
+ {
506
+ return __nvJitLinkGetInfoLog_12_6 (handle, log);
507
+ }
508
+ #endif
509
+
510
+ /**
511
+ * \ingroup linking
512
+ * \brief nvJitLinkVersion returns the current version of nvJitLink.
513
+ *
514
+ * \param [out] major The major version.
515
+ * \param [out] minor The minor version.
516
+ * \return
517
+ * - \link #nvJitLinkResult NVJITLINK_SUCCESS \endlink
518
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INVALID_INPUT\endlink
519
+ * - \link #nvJitLinkResult NVJITLINK_ERROR_INTERNAL\endlink
520
+ *
521
+ */
522
+ extern nvJitLinkResult nvJitLinkVersion(
523
+ unsigned int *major,
524
+ unsigned int *minor);
525
+
526
+ #ifdef __cplusplus
527
+ }
528
+ #endif
529
+
530
+ #endif // nvJitLink_INCLUDED
531
+
llava_next/lib/python3.10/site-packages/nvidia/nvjitlink/lib/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/nvidia/nvjitlink/lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (176 Bytes). View file
 
llava_next/lib/python3.10/site-packages/nvidia/nvtx/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/nvidia/nvtx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (167 Bytes). View file
 
llava_next/lib/python3.10/site-packages/nvidia/nvtx/include/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/nvidia/nvtx/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (175 Bytes). View file
 
llava_next/lib/python3.10/site-packages/nvidia/nvtx/include/nvToolsExt.h ADDED
@@ -0,0 +1,1561 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2009-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO USER:
5
+ *
6
+ * This source code is subject to NVIDIA ownership rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * This software and the information contained herein is PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
11
+ * of a form of NVIDIA software license agreement.
12
+ *
13
+ * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
14
+ * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
15
+ * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
16
+ * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
17
+ * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
18
+ * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
19
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
20
+ * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
21
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
22
+ * OR PERFORMANCE OF THIS SOURCE CODE.
23
+ *
24
+ * U.S. Government End Users. This source code is a "commercial item" as
25
+ * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
26
+ * "commercial computer software" and "commercial computer software
27
+ * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
28
+ * and is provided to the U.S. Government only as a commercial end item.
29
+ * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
30
+ * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
31
+ * source code with only those rights set forth herein.
32
+ *
33
+ * Any use of this source code in individual and commercial software must
34
+ * include, in the user documentation and internal comments to the code,
35
+ * the above Disclaimer and U.S. Government End Users Notice.
36
+ */
37
+
38
+ /** \file nvToolsExt.h
39
+ */
40
+
41
+ /* ========================================================================= */
42
+ /** \mainpage
43
+ * \tableofcontents
44
+ * \section INTRODUCTION Introduction
45
+ *
46
+ * The NVIDIA Tools Extension library is a set of functions that a
47
+ * developer can use to provide additional information to tools.
48
+ * The additional information is used by the tool to improve
49
+ * analysis and visualization of data.
50
+ *
51
+ * The library introduces close to zero overhead if no tool is
52
+ * attached to the application. The overhead when a tool is
53
+ * attached is specific to the tool.
54
+ *
55
+ * \section INITIALIZATION_SECTION Initialization
56
+ *
57
+ * Typically the tool's library that plugs into NVTX is indirectly
58
+ * loaded via enviromental properties that are platform specific.
59
+ * For some platform or special cases, the user may be required
60
+ * to instead explicity initialize instead though. This can also
61
+ * be helpful to control when the API loads a tool's library instead
62
+ * of what would typically be the first function call to emit info.
63
+ * For these rare case, see \ref INITIALIZATION for additional information.
64
+ *
65
+ * \section MARKERS_AND_RANGES Markers and Ranges
66
+ *
67
+ * Markers and ranges are used to describe events at a specific time (markers)
68
+ * or over a time span (ranges) during the execution of the application
69
+ * respectively.
70
+ *
71
+ * \subsection MARKERS Markers
72
+ *
73
+ * Markers denote specific moments in time.
74
+ *
75
+ *
76
+ * See \ref DOMAINS and \ref EVENT_ATTRIBUTES for additional information on
77
+ * how to specify the domain.
78
+ *
79
+ * \subsection THREAD_RANGES Thread Ranges
80
+ *
81
+ * Thread ranges denote nested time ranges. Nesting is maintained per thread
82
+ * per domain and does not require any additional correlation mechanism. The
83
+ * duration of a thread range is defined by the corresponding pair of
84
+ * nvtxRangePush* to nvtxRangePop API calls.
85
+ *
86
+ * See \ref DOMAINS and \ref EVENT_ATTRIBUTES for additional information on
87
+ * how to specify the domain.
88
+ *
89
+ * \subsection PROCESS_RANGES Process Ranges
90
+ *
91
+ * Process ranges denote a time span that can expose arbitrary concurrency, as
92
+ * opposed to thread ranges that only support nesting. In addition the range
93
+ * start event can happen on a different thread than the end marker. For the
94
+ * correlation of a start/end pair an unique correlation ID is used that is
95
+ * returned from the start API call and needs to be passed into the end API
96
+ * call.
97
+ *
98
+ * \subsection EVENT_ATTRIBUTES Event Attributes
99
+ *
100
+ * \ref MARKERS_AND_RANGES can be annotated with various attributes to provide
101
+ * additional information for an event or to guide the tool's visualization of
102
+ * the data. Each of the attributes is optional and if left unused the
103
+ * attributes fall back to a default value. The attributes include:
104
+ * - color
105
+ * - category
106
+ *
107
+ * To specify any attribute other than the text message, the \ref
108
+ * EVENT_ATTRIBUTE_STRUCTURE "Event Attribute Structure" must be used.
109
+ *
110
+ * \section DOMAINS Domains
111
+ *
112
+ * Domains enable developers to scope annotations. By default all events and
113
+ * annotations are in the default domain. Additional domains can be registered.
114
+ * This allows developers to scope markers, ranges, and resources names to
115
+ * avoid conflicts.
116
+ *
117
+ * The function ::nvtxDomainCreateA or ::nvtxDomainCreateW is used to create
118
+ * a named domain.
119
+ *
120
+ * Each domain maintains its own
121
+ * - categories
122
+ * - thread range stacks
123
+ * - registered strings
124
+ *
125
+ * The function ::nvtxDomainDestroy marks the end of the domain. Destroying
126
+ * a domain unregisters and destroys all objects associated with it such as
127
+ * registered strings, resource objects, named categories, and started ranges.
128
+ *
129
+ * \section RESOURCE_NAMING Resource Naming
130
+ *
131
+ * This section covers calls that allow to annotate objects with user-provided
132
+ * names in order to allow for a better analysis of complex trace data. All of
133
+ * the functions take the handle or the ID of the object to name and the name.
134
+ * The functions can be called multiple times during the execution of an
135
+ * application, however, in that case it is implementation dependent which
136
+ * name will be reported by the tool.
137
+ *
138
+ * \subsection CATEGORY_NAMING Category Naming
139
+ *
140
+ * Some function in this library support associating an integer category
141
+ * to enable filtering and sorting. The category naming functions allow
142
+ * the application to associate a user friendly name with the integer
143
+ * category. Support for domains have been added in NVTX_VERSION_2 to
144
+ * avoid collisions when domains are developed independantly.
145
+ *
146
+ * \subsection RESOURCE_OBJECTS Resource Objects
147
+ *
148
+ * Resource objects are a generic mechanism for attaching data to an application
149
+ * resource. The identifier field makes the association to a pointer or handle,
150
+ * while the type field helps provide deeper understanding of the identifier as
151
+ * well as enabling differentiation in cases where handles generated by different
152
+ * APIs may collide. The resource object may also have an associated message to
153
+ * associate with the application resource, enabling further annotation of this
154
+ * object and how it is used.
155
+ *
156
+ * The resource object was introduced in NVTX_VERSION_2 to supersede existing naming
157
+ * functions and allow the application resource identified by those functions to be
158
+ * associated to a domain. The other naming functions are still supported for backward
159
+ * compatibility but will be associated only to the default domain.
160
+ *
161
+ * \subsection RESOURCE_NAMING_OS Resource Naming
162
+ *
163
+ * Some operating system resources creation APIs do not support providing a user friendly
164
+ * name, such as some OS thread creation APIs. This API support resource naming though
165
+ * both through resource objects and functions following the pattern
166
+ * nvtxName[RESOURCE_TYPE][A|W](identifier, name). Resource objects introduced in NVTX_VERSION 2
167
+ * supersede the other functions with a a more general method of assigning names to OS resources,
168
+ * along with associating them to domains too. The older nvtxName* functions are only associated
169
+ * with the default domain.
170
+ * \section EXTENSIONS Optional Extensions
171
+ * Optional extensions will either appear within the existing sections the extend or appear
172
+ * in the "Related Pages" when they introduce new concepts.
173
+ */
174
+
175
+ #ifndef NVTOOLSEXT_H_
176
+ #define NVTOOLSEXT_H_
177
+
178
+ #if defined(_MSC_VER)
179
+ #ifdef NVTX_EXPORTS
180
+ #define NVTX_DECLSPEC
181
+ #else
182
+ #define NVTX_DECLSPEC __declspec(dllimport)
183
+ #endif /* NVTX_EXPORTS */
184
+ #define NVTX_API __stdcall
185
+ #define NVTX_INLINE_STATIC __inline static
186
+ #else /*defined(__GNUC__)*/
187
+ #define NVTX_DECLSPEC
188
+ #define NVTX_API
189
+ #define NVTX_INLINE_STATIC inline static
190
+ #endif /* Platform */
191
+
192
+ /**
193
+ * The nvToolsExt library depends on stdint.h. If the build tool chain in use
194
+ * does not include stdint.h then define NVTX_STDINT_TYPES_ALREADY_DEFINED
195
+ * and define the following types:
196
+ * <ul>
197
+ * <li>uint8_t
198
+ * <li>int8_t
199
+ * <li>uint16_t
200
+ * <li>int16_t
201
+ * <li>uint32_t
202
+ * <li>int32_t
203
+ * <li>uint64_t
204
+ * <li>int64_t
205
+ * <li>uintptr_t
206
+ * <li>intptr_t
207
+ * </ul>
208
+ #define NVTX_STDINT_TYPES_ALREADY_DEFINED if you are using your own header file.
209
+ */
210
+ #ifndef NVTX_STDINT_TYPES_ALREADY_DEFINED
211
+ #include <stdint.h>
212
+ #endif
213
+
214
+ #include <stddef.h>
215
+
216
+ #ifdef __cplusplus
217
+ extern "C" {
218
+ #endif /* __cplusplus */
219
+
220
+ /**
221
+ * Tools Extension API version
222
+ */
223
+ #define NVTX_VERSION 2
224
+
225
+ /**
226
+ * Size of the nvtxEventAttributes_t structure.
227
+ */
228
+ #define NVTX_EVENT_ATTRIB_STRUCT_SIZE ( (uint16_t)( sizeof(nvtxEventAttributes_t) ) )
229
+
230
+ /**
231
+ * Size of the nvtxInitializationAttributes_t structure.
232
+ */
233
+ #define NVTX_INITIALIZATION_ATTRIB_STRUCT_SIZE ( (uint16_t)( sizeof(nvtxInitializationAttributes_t) ) )
234
+
235
+ #define NVTX_NO_PUSH_POP_TRACKING ((int)-2)
236
+
237
+ typedef uint64_t nvtxRangeId_t;
238
+
239
+
240
+ /* \brief String Handle Structure.
241
+ * \anchor STRING_HANDLE_STRUCTURE
242
+ *
243
+ * This structure is opaque to the user and is used as a handle to reference
244
+ * a string. The tools will return a pointer through the API for the application
245
+ * to hold on it's behalf to reference the string in the future.
246
+ *
247
+ */
248
+ typedef struct nvtxStringHandle* nvtxStringHandle_t;
249
+
250
+ /* \brief Domain Handle Structure.
251
+ * \anchor DOMAIN_HANDLE_STRUCTURE
252
+ *
253
+ * This structure is opaque to the user and is used as a handle to reference
254
+ * a domain. The tools will return a pointer through the API for the application
255
+ * to hold on its behalf to reference the domain in the future.
256
+ *
257
+ */
258
+ typedef struct nvtxDomainHandle* nvtxDomainHandle_t;
259
+
260
+
261
+
262
+
263
+
264
+
265
+ /* ========================================================================= */
266
+ /** \defgroup GENERAL General
267
+ * @{
268
+ */
269
+
270
+ /** ---------------------------------------------------------------------------
271
+ * Color Types
272
+ * ------------------------------------------------------------------------- */
273
+ typedef enum nvtxColorType_t
274
+ {
275
+ NVTX_COLOR_UNKNOWN = 0, /**< Color attribute is unused. */
276
+ NVTX_COLOR_ARGB = 1 /**< An ARGB color is provided. */
277
+ } nvtxColorType_t;
278
+
279
+ /** ---------------------------------------------------------------------------
280
+ * Message Types
281
+ * ------------------------------------------------------------------------- */
282
+ typedef enum nvtxMessageType_t
283
+ {
284
+ NVTX_MESSAGE_UNKNOWN = 0, /**< Message payload is unused. */
285
+ NVTX_MESSAGE_TYPE_ASCII = 1, /**< A character sequence is used as payload. */
286
+ NVTX_MESSAGE_TYPE_UNICODE = 2, /**< A wide character sequence is used as payload. */
287
+ /* NVTX_VERSION_2 */
288
+ NVTX_MESSAGE_TYPE_REGISTERED = 3 /**< A unique string handle that was registered
289
+ with \ref nvtxDomainRegisterStringA() or
290
+ \ref nvtxDomainRegisterStringW(). */
291
+ } nvtxMessageType_t;
292
+
293
+ typedef union nvtxMessageValue_t
294
+ {
295
+ const char* ascii;
296
+ const wchar_t* unicode;
297
+ /* NVTX_VERSION_2 */
298
+ nvtxStringHandle_t registered;
299
+ } nvtxMessageValue_t;
300
+
301
+
302
+ /** @} */ /*END defgroup*/
303
+
304
+ /* ========================================================================= */
305
+ /** \defgroup INITIALIZATION Initialization
306
+ * @{
307
+ * Typically the tool's library that plugs into NVTX is indirectly
308
+ * loaded via enviromental properties that are platform specific.
309
+ * For some platform or special cases, the user may be required
310
+ * to instead explicity initialize instead though. This can also
311
+ * be helpful to control when the API loads a tool's library instead
312
+ * of what would typically be the first function call to emit info.
313
+ */
314
+
315
+ /** ---------------------------------------------------------------------------
316
+ * Initialization Modes
317
+ * ------------------------------------------------------------------------- */
318
+ typedef enum nvtxInitializationMode_t
319
+ {
320
+ NVTX_INITIALIZATION_MODE_UNKNOWN = 0, /**< A platform that supports indirect initialization will attempt this style, otherwise expect failure. */
321
+ NVTX_INITIALIZATION_MODE_CALLBACK_V1 = 1, /**< A function pointer conforming to NVTX_VERSION=1 will be used. */
322
+ NVTX_INITIALIZATION_MODE_CALLBACK_V2 = 2, /**< A function pointer conforming to NVTX_VERSION=2 will be used. */
323
+ NVTX_INITIALIZATION_MODE_SIZE
324
+ } nvtxInitializationMode_t;
325
+
326
+
327
+ /** \brief Initialization Attribute Structure.
328
+ * \anchor INITIALIZATION_ATTRIBUTE_STRUCTURE
329
+ *
330
+ * This structure is used to describe the attributes used for initialization
331
+ * of the NVTX API.
332
+ *
333
+ * \par Initializing the Attributes
334
+ *
335
+ * The caller should always perform the following three tasks when using
336
+ * attributes:
337
+ * <ul>
338
+ * <li>Zero the structure
339
+ * <li>Set the version field
340
+ * <li>Set the size field
341
+ * </ul>
342
+ *
343
+ * Zeroing the structure sets all the event attributes types and values
344
+ * to the default value.
345
+ *
346
+ * The version and size field are used by the Tools Extension
347
+ * implementation to handle multiple versions of the attributes structure.
348
+ * NVTX_INITIALIZATION_ATTRIB_STRUCT_SIZE may be used for the size.
349
+ *
350
+ * It is recommended that the caller use one of the following to methods
351
+ * to initialize the event attributes structure:
352
+ *
353
+ * \par Method 1: Initializing nvtxInitializationAttributes_t for future compatibility
354
+ * \code
355
+ * nvtxInitializationAttributes_t initAttribs = {0};
356
+ * initAttribs.version = NVTX_VERSION;
357
+ * initAttribs.size = NVTX_INITIALIZATION_ATTRIB_STRUCT_SIZE;
358
+ * \endcode
359
+ *
360
+ * \par Method 2: Initializing nvtxInitializationAttributes_t for a specific version
361
+ * \code
362
+ * nvtxInitializationAttributes_t initAttribs = {0};
363
+ * initAttribs.version =2;
364
+ * initAttribs.size = (uint16_t)(sizeof(nvtxInitializationAttributes_v2));
365
+ * \endcode
366
+ *
367
+ * If the caller uses Method 1 it is critical that the entire binary
368
+ * layout of the structure be configured to 0 so that all fields
369
+ * are initialized to the default value.
370
+ *
371
+ * The caller should either use both NVTX_VERSION and
372
+ * NVTX_INITIALIZATION_ATTRIB_STRUCT_SIZE (Method 1) or use explicit values
373
+ * and a versioned type (Method 2). Using a mix of the two methods
374
+ * will likely cause either source level incompatibility or binary
375
+ * incompatibility in the future.
376
+ *
377
+ * \par Settings Attribute Types and Values
378
+ *
379
+ *
380
+ * \par Example:
381
+ * \code
382
+ * // Initialize
383
+ * nvtxInitializationAttributes_t initAttribs = {0};
384
+ * initAttribs.version = NVTX_VERSION;
385
+ * initAttribs.size = NVTX_INITIALIZATION_ATTRIB_STRUCT_SIZE;
386
+ *
387
+ * // Configure the Attributes
388
+ * initAttribs.mode = NVTX_INITIALIZATION_MODE_CALLBACK_V2;
389
+ * initAttribs.fnptr = InitializeInjectionNvtx2;
390
+ * \endcode
391
+
392
+ * \sa
393
+ * ::nvtxInitializationMode_t
394
+ * ::nvtxInitialize
395
+ */
396
+ typedef struct nvtxInitializationAttributes_v2
397
+ {
398
+ /**
399
+ * \brief Version flag of the structure.
400
+ *
401
+ * Needs to be set to NVTX_VERSION to indicate the version of NVTX APIs
402
+ * supported in this header file. This can optionally be overridden to
403
+ * another version of the tools extension library.
404
+ */
405
+ uint16_t version;
406
+
407
+ /**
408
+ * \brief Size of the structure.
409
+ *
410
+ * Needs to be set to the size in bytes of the event attribute
411
+ * structure used to specify the event.
412
+ */
413
+ uint16_t size;
414
+
415
+ /**
416
+ * \brief Mode of initialization.
417
+ *
418
+ * The mode of initialization dictates the overall behavior and which
419
+ * attributes in this struct will be used.
420
+ *
421
+ * Default Value is NVTX_INITIALIZATION_MODE_UNKNOWN = 0
422
+ * \sa
423
+ * ::nvtxInitializationMode_t
424
+ */
425
+ uint32_t mode;
426
+
427
+ /**
428
+ * \brief Function pointer used for initialization if the mode requires
429
+ *
430
+ * The user has retrieved this function pointer from the tool library
431
+ * and would like to use it to initialize. The mode must be set to a
432
+ * NVTX_INITIALIZATION_MODE_CALLBACK_V# for this to be used. The mode
433
+ * will dictate the expectations for this member. The function signature
434
+ * will be cast from void(*)() to the appropriate signature for the mode.
435
+ * the expected behavior of the function will also depend on the mode
436
+ * beyond the simple function signature.
437
+ *
438
+ * Default Value is NVTX_INITIALIZATION_MODE_UNKNOWN which will either
439
+ * initialize based on external properties or fail if not supported on
440
+ * the given platform.
441
+
442
+ * \sa
443
+ * ::nvtxInitializationMode_t
444
+ */
445
+ void(*fnptr)(void);
446
+
447
+ } nvtxInitializationAttributes_v2;
448
+
449
+ typedef struct nvtxInitializationAttributes_v2 nvtxInitializationAttributes_t;
450
+
451
+
452
+ /* ------------------------------------------------------------------------- */
453
+ /** \brief Force initialization (optional on most platforms)
454
+ *
455
+ * Force NVTX library to initialize. On some platform NVTX will implicit initialize
456
+ * upon the first function call into an NVTX API.
457
+ *
458
+ * \return Result codes are simplest to assume NVTX_SUCCESS or !NVTX_SUCCESS
459
+ *
460
+ * \param initAttrib - The initialization attribute structure
461
+ *
462
+ * \sa
463
+ * ::nvtxInitializationAttributes_t
464
+ *
465
+ * \version \NVTX_VERSION_2
466
+ * @{ */
467
+ NVTX_DECLSPEC int NVTX_API nvtxInitialize(const nvtxInitializationAttributes_t* initAttrib);
468
+ /** @} */
469
+
470
+
471
+ /** @} */ /*END defgroup*/
472
+
473
+ /* ========================================================================= */
474
+ /** \defgroup EVENT_ATTRIBUTES Event Attributes
475
+ * @{
476
+ */
477
+
478
+ /** ---------------------------------------------------------------------------
479
+ * Payload Types
480
+ * ------------------------------------------------------------------------- */
481
+ typedef enum nvtxPayloadType_t
482
+ {
483
+ NVTX_PAYLOAD_UNKNOWN = 0, /**< Color payload is unused. */
484
+ NVTX_PAYLOAD_TYPE_UNSIGNED_INT64 = 1, /**< A 64 bit unsigned integer value is used as payload. */
485
+ NVTX_PAYLOAD_TYPE_INT64 = 2, /**< A 64 bit signed integer value is used as payload. */
486
+ NVTX_PAYLOAD_TYPE_DOUBLE = 3, /**< A 64 bit floating point value is used as payload. */
487
+ /* NVTX_VERSION_2 */
488
+ NVTX_PAYLOAD_TYPE_UNSIGNED_INT32 = 4, /**< A 32 bit floating point value is used as payload. */
489
+ NVTX_PAYLOAD_TYPE_INT32 = 5, /**< A 32 bit floating point value is used as payload. */
490
+ NVTX_PAYLOAD_TYPE_FLOAT = 6 /**< A 32 bit floating point value is used as payload. */
491
+ } nvtxPayloadType_t;
492
+
493
+ /** \brief Event Attribute Structure.
494
+ * \anchor EVENT_ATTRIBUTE_STRUCTURE
495
+ *
496
+ * This structure is used to describe the attributes of an event. The layout of
497
+ * the structure is defined by a specific version of the tools extension
498
+ * library and can change between different versions of the Tools Extension
499
+ * library.
500
+ *
501
+ * \par Initializing the Attributes
502
+ *
503
+ * The caller should always perform the following three tasks when using
504
+ * attributes:
505
+ * <ul>
506
+ * <li>Zero the structure
507
+ * <li>Set the version field
508
+ * <li>Set the size field
509
+ * </ul>
510
+ *
511
+ * Zeroing the structure sets all the event attributes types and values
512
+ * to the default value.
513
+ *
514
+ * The version and size field are used by the Tools Extension
515
+ * implementation to handle multiple versions of the attributes structure.
516
+ *
517
+ * It is recommended that the caller use one of the following to methods
518
+ * to initialize the event attributes structure:
519
+ *
520
+ * \par Method 1: Initializing nvtxEventAttributes for future compatibility
521
+ * \code
522
+ * nvtxEventAttributes_t eventAttrib = {0};
523
+ * eventAttrib.version = NVTX_VERSION;
524
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
525
+ * \endcode
526
+ *
527
+ * \par Method 2: Initializing nvtxEventAttributes for a specific version
528
+ * \code
529
+ * nvtxEventAttributes_t eventAttrib = {0};
530
+ * eventAttrib.version = 1;
531
+ * eventAttrib.size = (uint16_t)(sizeof(nvtxEventAttributes_v1));
532
+ * \endcode
533
+ *
534
+ * If the caller uses Method 1 it is critical that the entire binary
535
+ * layout of the structure be configured to 0 so that all fields
536
+ * are initialized to the default value.
537
+ *
538
+ * The caller should either use both NVTX_VERSION and
539
+ * NVTX_EVENT_ATTRIB_STRUCT_SIZE (Method 1) or use explicit values
540
+ * and a versioned type (Method 2). Using a mix of the two methods
541
+ * will likely cause either source level incompatibility or binary
542
+ * incompatibility in the future.
543
+ *
544
+ * \par Settings Attribute Types and Values
545
+ *
546
+ *
547
+ * \par Example:
548
+ * \code
549
+ * // Initialize
550
+ * nvtxEventAttributes_t eventAttrib = {0};
551
+ * eventAttrib.version = NVTX_VERSION;
552
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
553
+ *
554
+ * // Configure the Attributes
555
+ * eventAttrib.colorType = NVTX_COLOR_ARGB;
556
+ * eventAttrib.color = 0xFF880000;
557
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
558
+ * eventAttrib.message.ascii = "Example";
559
+ * \endcode
560
+ *
561
+ * In the example the caller does not have to set the value of
562
+ * \ref ::nvtxEventAttributes_v2::category or
563
+ * \ref ::nvtxEventAttributes_v2::payload as these fields were set to
564
+ * the default value by {0}.
565
+ * \sa
566
+ * ::nvtxDomainMarkEx
567
+ * ::nvtxDomainRangeStartEx
568
+ * ::nvtxDomainRangePushEx
569
+ */
570
+ typedef struct nvtxEventAttributes_v2
571
+ {
572
+ /**
573
+ * \brief Version flag of the structure.
574
+ *
575
+ * Needs to be set to NVTX_VERSION to indicate the version of NVTX APIs
576
+ * supported in this header file. This can optionally be overridden to
577
+ * another version of the tools extension library.
578
+ */
579
+ uint16_t version;
580
+
581
+ /**
582
+ * \brief Size of the structure.
583
+ *
584
+ * Needs to be set to the size in bytes of the event attribute
585
+ * structure used to specify the event.
586
+ */
587
+ uint16_t size;
588
+
589
+ /**
590
+ * \brief ID of the category the event is assigned to.
591
+ *
592
+ * A category is a user-controlled ID that can be used to group
593
+ * events. The tool may use category IDs to improve filtering or
594
+ * enable grouping of events in the same category. The functions
595
+ * \ref ::nvtxNameCategoryA or \ref ::nvtxNameCategoryW can be used
596
+ * to name a category.
597
+ *
598
+ * Default Value is 0
599
+ */
600
+ uint32_t category;
601
+
602
+ /** \brief Color type specified in this attribute structure.
603
+ *
604
+ * Defines the color format of the attribute structure's \ref COLOR_FIELD
605
+ * "color" field.
606
+ *
607
+ * Default Value is NVTX_COLOR_UNKNOWN
608
+ */
609
+ int32_t colorType; /* nvtxColorType_t */
610
+
611
+ /** \brief Color assigned to this event. \anchor COLOR_FIELD
612
+ *
613
+ * The color that the tool should use to visualize the event.
614
+ */
615
+ uint32_t color;
616
+
617
+ /**
618
+ * \brief Payload type specified in this attribute structure.
619
+ *
620
+ * Defines the payload format of the attribute structure's \ref PAYLOAD_FIELD
621
+ * "payload" field.
622
+ *
623
+ * Default Value is NVTX_PAYLOAD_UNKNOWN
624
+ */
625
+ int32_t payloadType; /* nvtxPayloadType_t */
626
+
627
+ int32_t reserved0;
628
+
629
+ /**
630
+ * \brief Payload assigned to this event. \anchor PAYLOAD_FIELD
631
+ *
632
+ * A numerical value that can be used to annotate an event. The tool could
633
+ * use the payload data to reconstruct graphs and diagrams.
634
+ */
635
+ union payload_t
636
+ {
637
+ uint64_t ullValue;
638
+ int64_t llValue;
639
+ double dValue;
640
+ /* NVTX_VERSION_2 */
641
+ uint32_t uiValue;
642
+ int32_t iValue;
643
+ float fValue;
644
+ } payload;
645
+
646
+ /** \brief Message type specified in this attribute structure.
647
+ *
648
+ * Defines the message format of the attribute structure's \ref MESSAGE_FIELD
649
+ * "message" field.
650
+ *
651
+ * Default Value is NVTX_MESSAGE_UNKNOWN
652
+ */
653
+ int32_t messageType; /* nvtxMessageType_t */
654
+
655
+ /** \brief Message assigned to this attribute structure. \anchor MESSAGE_FIELD
656
+ *
657
+ * The text message that is attached to an event.
658
+ */
659
+ nvtxMessageValue_t message;
660
+
661
+ } nvtxEventAttributes_v2;
662
+
663
+ typedef struct nvtxEventAttributes_v2 nvtxEventAttributes_t;
664
+
665
+ /** @} */ /*END defgroup*/
666
+ /* ========================================================================= */
667
+ /** \defgroup MARKERS_AND_RANGES Markers and Ranges
668
+ *
669
+ * See \ref MARKERS_AND_RANGES for more details
670
+ *
671
+ * @{
672
+ */
673
+
674
+ /** \name Marker */
675
+
676
+ /* ------------------------------------------------------------------------- */
677
+ /** \brief Marks an instantaneous event in the application.
678
+ *
679
+ * A marker can contain a text message or specify additional information
680
+ * using the event attributes structure. These attributes include a text
681
+ * message, color, category, and a payload. Each of the attributes is optional
682
+ * and can only be sent out using the \ref nvtxDomainMarkEx function.
683
+ *
684
+ * nvtxDomainMarkEx(NULL, event) is equivalent to calling
685
+ * nvtxMarkEx(event).
686
+ *
687
+ * \param domain - The domain of scoping the category.
688
+ * \param eventAttrib - The event attribute structure defining the marker's
689
+ * attribute types and attribute values.
690
+ *
691
+ * \sa
692
+ * ::nvtxMarkEx
693
+ *
694
+ * \version \NVTX_VERSION_2
695
+ * @{ */
696
+ NVTX_DECLSPEC void NVTX_API nvtxDomainMarkEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib);
697
+ /** @} */
698
+
699
+ /* ------------------------------------------------------------------------- */
700
+ /** \brief Marks an instantaneous event in the application.
701
+ *
702
+ * A marker can contain a text message or specify additional information
703
+ * using the event attributes structure. These attributes include a text
704
+ * message, color, category, and a payload. Each of the attributes is optional
705
+ * and can only be sent out using the \ref nvtxMarkEx function.
706
+ * If \ref nvtxMarkA or \ref nvtxMarkW are used to specify the marker
707
+ * or if an attribute is unspecified then a default value will be used.
708
+ *
709
+ * \param eventAttrib - The event attribute structure defining the marker's
710
+ * attribute types and attribute values.
711
+ *
712
+ * \par Example:
713
+ * \code
714
+ * // zero the structure
715
+ * nvtxEventAttributes_t eventAttrib = {0};
716
+ * // set the version and the size information
717
+ * eventAttrib.version = NVTX_VERSION;
718
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
719
+ * // configure the attributes. 0 is the default for all attributes.
720
+ * eventAttrib.colorType = NVTX_COLOR_ARGB;
721
+ * eventAttrib.color = 0xFF880000;
722
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
723
+ * eventAttrib.message.ascii = "Example nvtxMarkEx";
724
+ * nvtxMarkEx(&eventAttrib);
725
+ * \endcode
726
+ *
727
+ * \sa
728
+ * ::nvtxDomainMarkEx
729
+ *
730
+ * \version \NVTX_VERSION_1
731
+ * @{ */
732
+ NVTX_DECLSPEC void NVTX_API nvtxMarkEx(const nvtxEventAttributes_t* eventAttrib);
733
+ /** @} */
734
+
735
+ /* ------------------------------------------------------------------------- */
736
+ /** \brief Marks an instantaneous event in the application.
737
+ *
738
+ * A marker created using \ref nvtxMarkA or \ref nvtxMarkW contains only a
739
+ * text message.
740
+ *
741
+ * \param message - The message associated to this marker event.
742
+ *
743
+ * \par Example:
744
+ * \code
745
+ * nvtxMarkA("Example nvtxMarkA");
746
+ * nvtxMarkW(L"Example nvtxMarkW");
747
+ * \endcode
748
+ *
749
+ * \sa
750
+ * ::nvtxDomainMarkEx
751
+ * ::nvtxMarkEx
752
+ *
753
+ * \version \NVTX_VERSION_0
754
+ * @{ */
755
+ NVTX_DECLSPEC void NVTX_API nvtxMarkA(const char* message);
756
+ NVTX_DECLSPEC void NVTX_API nvtxMarkW(const wchar_t* message);
757
+ /** @} */
758
+
759
+
760
+ /** \name Process Ranges */
761
+
762
+ /* ------------------------------------------------------------------------- */
763
+ /** \brief Starts a process range in a domain.
764
+ *
765
+ * \param domain - The domain of scoping the category.
766
+ * \param eventAttrib - The event attribute structure defining the range's
767
+ * attribute types and attribute values.
768
+ *
769
+ * \return The unique ID used to correlate a pair of Start and End events.
770
+ *
771
+ * \remarks Ranges defined by Start/End can overlap.
772
+ *
773
+ * \par Example:
774
+ * \code
775
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("my domain");
776
+ * nvtxEventAttributes_t eventAttrib = {0};
777
+ * eventAttrib.version = NVTX_VERSION;
778
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
779
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
780
+ * eventAttrib.message.ascii = "my range";
781
+ * nvtxRangeId_t rangeId = nvtxDomainRangeStartEx(&eventAttrib);
782
+ * // ...
783
+ * nvtxDomainRangeEnd(rangeId);
784
+ * \endcode
785
+ *
786
+ * \sa
787
+ * ::nvtxDomainRangeEnd
788
+ *
789
+ * \version \NVTX_VERSION_2
790
+ * @{ */
791
+ NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxDomainRangeStartEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib);
792
+ /** @} */
793
+
794
+ /* ------------------------------------------------------------------------- */
795
+ /** \brief Starts a process range.
796
+ *
797
+ * \param eventAttrib - The event attribute structure defining the range's
798
+ * attribute types and attribute values.
799
+ *
800
+ * \return The unique ID used to correlate a pair of Start and End events.
801
+ *
802
+ * \remarks Ranges defined by Start/End can overlap.
803
+ *
804
+ * \par Example:
805
+ * \code
806
+ * nvtxEventAttributes_t eventAttrib = {0};
807
+ * eventAttrib.version = NVTX_VERSION;
808
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
809
+ * eventAttrib.category = 3;
810
+ * eventAttrib.colorType = NVTX_COLOR_ARGB;
811
+ * eventAttrib.color = 0xFF0088FF;
812
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
813
+ * eventAttrib.message.ascii = "Example Range";
814
+ * nvtxRangeId_t rangeId = nvtxRangeStartEx(&eventAttrib);
815
+ * // ...
816
+ * nvtxRangeEnd(rangeId);
817
+ * \endcode
818
+ *
819
+ * \sa
820
+ * ::nvtxRangeEnd
821
+ * ::nvtxDomainRangeStartEx
822
+ *
823
+ * \version \NVTX_VERSION_1
824
+ * @{ */
825
+ NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartEx(const nvtxEventAttributes_t* eventAttrib);
826
+ /** @} */
827
+
828
+ /* ------------------------------------------------------------------------- */
829
+ /** \brief Starts a process range.
830
+ *
831
+ * \param message - The event message associated to this range event.
832
+ *
833
+ * \return The unique ID used to correlate a pair of Start and End events.
834
+ *
835
+ * \remarks Ranges defined by Start/End can overlap.
836
+ *
837
+ * \par Example:
838
+ * \code
839
+ * nvtxRangeId_t r1 = nvtxRangeStartA("Range 1");
840
+ * nvtxRangeId_t r2 = nvtxRangeStartW(L"Range 2");
841
+ * nvtxRangeEnd(r1);
842
+ * nvtxRangeEnd(r2);
843
+ * \endcode
844
+ *
845
+ * \sa
846
+ * ::nvtxRangeEnd
847
+ * ::nvtxRangeStartEx
848
+ * ::nvtxDomainRangeStartEx
849
+ *
850
+ * \version \NVTX_VERSION_0
851
+ * @{ */
852
+ NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartA(const char* message);
853
+ NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartW(const wchar_t* message);
854
+ /** @} */
855
+
856
+ /* ------------------------------------------------------------------------- */
857
+ /** \brief Ends a process range.
858
+ *
859
+ * \param domain - The domain
860
+ * \param id - The correlation ID returned from a nvtxRangeStart call.
861
+ *
862
+ * \remarks This function is offered completeness but is an alias for ::nvtxRangeEnd.
863
+ * It does not need a domain param since that is associated iwth the range ID at ::nvtxDomainRangeStartEx
864
+ *
865
+ * \par Example:
866
+ * \code
867
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("my domain");
868
+ * nvtxEventAttributes_t eventAttrib = {0};
869
+ * eventAttrib.version = NVTX_VERSION;
870
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
871
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
872
+ * eventAttrib.message.ascii = "my range";
873
+ * nvtxRangeId_t rangeId = nvtxDomainRangeStartEx(&eventAttrib);
874
+ * // ...
875
+ * nvtxDomainRangeEnd(rangeId);
876
+ * \endcode
877
+ *
878
+ * \sa
879
+ * ::nvtxDomainRangeStartEx
880
+ *
881
+ * \version \NVTX_VERSION_2
882
+ * @{ */
883
+ NVTX_DECLSPEC void NVTX_API nvtxDomainRangeEnd(nvtxDomainHandle_t domain, nvtxRangeId_t id);
884
+ /** @} */
885
+
886
+ /* ------------------------------------------------------------------------- */
887
+ /** \brief Ends a process range.
888
+ *
889
+ * \param id - The correlation ID returned from an nvtxRangeStart call.
890
+ *
891
+ * \sa
892
+ * ::nvtxDomainRangeStartEx
893
+ * ::nvtxRangeStartEx
894
+ * ::nvtxRangeStartA
895
+ * ::nvtxRangeStartW
896
+ *
897
+ * \version \NVTX_VERSION_0
898
+ * @{ */
899
+ NVTX_DECLSPEC void NVTX_API nvtxRangeEnd(nvtxRangeId_t id);
900
+ /** @} */
901
+
902
+ /** \name Thread Ranges */
903
+
904
+ /* ------------------------------------------------------------------------- */
905
+ /** \brief Starts a nested thread range.
906
+ *
907
+ * \param domain - The domain of scoping.
908
+ * \param eventAttrib - The event attribute structure defining the range's
909
+ * attribute types and attribute values.
910
+ *
911
+ * \return The 0 based level of range being started. This value is scoped to the domain.
912
+ * If an error occurs, a negative value is returned.
913
+ *
914
+ * \par Example:
915
+ * \code
916
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain");
917
+ * nvtxEventAttributes_t eventAttrib = {0};
918
+ * eventAttrib.version = NVTX_VERSION;
919
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
920
+ * eventAttrib.colorType = NVTX_COLOR_ARGB;
921
+ * eventAttrib.color = 0xFFFF0000;
922
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
923
+ * eventAttrib.message.ascii = "Level 0";
924
+ * nvtxDomainRangePushEx(domain, &eventAttrib);
925
+ *
926
+ * // Re-use eventAttrib
927
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_UNICODE;
928
+ * eventAttrib.message.unicode = L"Level 1";
929
+ * nvtxDomainRangePushEx(domain, &eventAttrib);
930
+ *
931
+ * nvtxDomainRangePop(domain); //level 1
932
+ * nvtxDomainRangePop(domain); //level 0
933
+ * \endcode
934
+ *
935
+ * \sa
936
+ * ::nvtxDomainRangePop
937
+ *
938
+ * \version \NVTX_VERSION_2
939
+ * @{ */
940
+ NVTX_DECLSPEC int NVTX_API nvtxDomainRangePushEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib);
941
+ /** @} */
942
+
943
+ /* ------------------------------------------------------------------------- */
944
+ /** \brief Starts a nested thread range.
945
+ *
946
+ * \param eventAttrib - The event attribute structure defining the range's
947
+ * attribute types and attribute values.
948
+ *
949
+ * \return The 0 based level of range being started. This level is per domain.
950
+ * If an error occurs a negative value is returned.
951
+ *
952
+ * \par Example:
953
+ * \code
954
+ * nvtxEventAttributes_t eventAttrib = {0};
955
+ * eventAttrib.version = NVTX_VERSION;
956
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
957
+ * eventAttrib.colorType = NVTX_COLOR_ARGB;
958
+ * eventAttrib.color = 0xFFFF0000;
959
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
960
+ * eventAttrib.message.ascii = "Level 0";
961
+ * nvtxRangePushEx(&eventAttrib);
962
+ *
963
+ * // Re-use eventAttrib
964
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_UNICODE;
965
+ * eventAttrib.message.unicode = L"Level 1";
966
+ * nvtxRangePushEx(&eventAttrib);
967
+ *
968
+ * nvtxRangePop();
969
+ * nvtxRangePop();
970
+ * \endcode
971
+ *
972
+ * \sa
973
+ * ::nvtxDomainRangePushEx
974
+ * ::nvtxRangePop
975
+ *
976
+ * \version \NVTX_VERSION_1
977
+ * @{ */
978
+ NVTX_DECLSPEC int NVTX_API nvtxRangePushEx(const nvtxEventAttributes_t* eventAttrib);
979
+ /** @} */
980
+
981
+ /* ------------------------------------------------------------------------- */
982
+ /** \brief Starts a nested thread range.
983
+ *
984
+ * \param message - The event message associated to this range event.
985
+ *
986
+ * \return The 0 based level of range being started. If an error occurs a
987
+ * negative value is returned.
988
+ *
989
+ * \par Example:
990
+ * \code
991
+ * nvtxRangePushA("Level 0");
992
+ * nvtxRangePushW(L"Level 1");
993
+ * nvtxRangePop();
994
+ * nvtxRangePop();
995
+ * \endcode
996
+ *
997
+ * \sa
998
+ * ::nvtxDomainRangePushEx
999
+ * ::nvtxRangePop
1000
+ *
1001
+ * \version \NVTX_VERSION_0
1002
+ * @{ */
1003
+ NVTX_DECLSPEC int NVTX_API nvtxRangePushA(const char* message);
1004
+ NVTX_DECLSPEC int NVTX_API nvtxRangePushW(const wchar_t* message);
1005
+ /** @} */
1006
+
1007
+
1008
+ /* ------------------------------------------------------------------------- */
1009
+ /** \brief Ends a nested thread range.
1010
+ *
1011
+ * \return The level of the range being ended. If an error occurs a negative
1012
+ * value is returned on the current thread.
1013
+ *
1014
+ * \par Example:
1015
+ * \code
1016
+ * nvtxDomainHandle_t domain = nvtxDomainCreate("example library");
1017
+ * nvtxDomainRangePushA(domain, "Level 0");
1018
+ * nvtxDomainRangePushW(domain, L"Level 1");
1019
+ * nvtxDomainRangePop(domain);
1020
+ * nvtxDomainRangePop(domain);
1021
+ * \endcode
1022
+ *
1023
+ * \sa
1024
+ * ::nvtxRangePushEx
1025
+ * ::nvtxRangePushA
1026
+ * ::nvtxRangePushW
1027
+ *
1028
+ * \version \NVTX_VERSION_2
1029
+ * @{ */
1030
+ NVTX_DECLSPEC int NVTX_API nvtxDomainRangePop(nvtxDomainHandle_t domain);
1031
+ /** @} */
1032
+
1033
+ /* ------------------------------------------------------------------------- */
1034
+ /** \brief Ends a nested thread range.
1035
+ *
1036
+ * \return The level of the range being ended. If an error occurs a negative
1037
+ * value is returned on the current thread.
1038
+ *
1039
+ * \par Example:
1040
+ * \code
1041
+ * nvtxRangePushA("Level 0");
1042
+ * nvtxRangePushW(L"Level 1");
1043
+ * nvtxRangePop();
1044
+ * nvtxRangePop();
1045
+ * \endcode
1046
+ *
1047
+ * \sa
1048
+ * ::nvtxRangePushEx
1049
+ * ::nvtxRangePushA
1050
+ * ::nvtxRangePushW
1051
+ *
1052
+ * \version \NVTX_VERSION_0
1053
+ * @{ */
1054
+ NVTX_DECLSPEC int NVTX_API nvtxRangePop(void);
1055
+ /** @} */
1056
+
1057
+
1058
+ /** @} */ /*END defgroup*/
1059
+ /* ========================================================================= */
1060
+ /** \defgroup RESOURCE_NAMING Resource Naming
1061
+ *
1062
+ * See \ref RESOURCE_NAMING for more details
1063
+ *
1064
+ * @{
1065
+ */
1066
+
1067
+
1068
+ /* ------------------------------------------------------------------------- */
1069
+ /** \name Functions for Generic Resource Naming*/
1070
+ /* ------------------------------------------------------------------------- */
1071
+
1072
+ /* ------------------------------------------------------------------------- */
1073
+ /** \cond SHOW_HIDDEN
1074
+ * \brief Resource typing helpers.
1075
+ *
1076
+ * Classes are used to make it easy to create a series of resource types
1077
+ * per API without collisions
1078
+ */
1079
+ #define NVTX_RESOURCE_MAKE_TYPE(CLASS, INDEX) ((((uint32_t)(NVTX_RESOURCE_CLASS_ ## CLASS))<<16)|((uint32_t)(INDEX)))
1080
+ #define NVTX_RESOURCE_CLASS_GENERIC 1
1081
+ /** \endcond */
1082
+
1083
+ /* ------------------------------------------------------------------------- */
1084
+ /** \brief Generic resource type for when a resource class is not available.
1085
+ *
1086
+ * \sa
1087
+ * ::nvtxDomainResourceCreate
1088
+ *
1089
+ * \version \NVTX_VERSION_2
1090
+ */
1091
+ typedef enum nvtxResourceGenericType_t
1092
+ {
1093
+ NVTX_RESOURCE_TYPE_UNKNOWN = 0,
1094
+ NVTX_RESOURCE_TYPE_GENERIC_POINTER = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 1), /**< Generic pointer assumed to have no collisions with other pointers. */
1095
+ NVTX_RESOURCE_TYPE_GENERIC_HANDLE = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 2), /**< Generic handle assumed to have no collisions with other handles. */
1096
+ NVTX_RESOURCE_TYPE_GENERIC_THREAD_NATIVE = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 3), /**< OS native thread identifier. */
1097
+ NVTX_RESOURCE_TYPE_GENERIC_THREAD_POSIX = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 4) /**< POSIX pthread identifier. */
1098
+ } nvtxResourceGenericType_t;
1099
+
1100
+
1101
+
1102
+ /** \brief Resource Attribute Structure.
1103
+ * \anchor RESOURCE_ATTRIBUTE_STRUCTURE
1104
+ *
1105
+ * This structure is used to describe the attributes of a resource. The layout of
1106
+ * the structure is defined by a specific version of the tools extension
1107
+ * library and can change between different versions of the Tools Extension
1108
+ * library.
1109
+ *
1110
+ * \par Initializing the Attributes
1111
+ *
1112
+ * The caller should always perform the following three tasks when using
1113
+ * attributes:
1114
+ * <ul>
1115
+ * <li>Zero the structure
1116
+ * <li>Set the version field
1117
+ * <li>Set the size field
1118
+ * </ul>
1119
+ *
1120
+ * Zeroing the structure sets all the resource attributes types and values
1121
+ * to the default value.
1122
+ *
1123
+ * The version and size field are used by the Tools Extension
1124
+ * implementation to handle multiple versions of the attributes structure.
1125
+ *
1126
+ * It is recommended that the caller use one of the following to methods
1127
+ * to initialize the event attributes structure:
1128
+ *
1129
+ * \par Method 1: Initializing nvtxEventAttributes for future compatibility
1130
+ * \code
1131
+ * nvtxResourceAttributes_t attribs = {0};
1132
+ * attribs.version = NVTX_VERSION;
1133
+ * attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE;
1134
+ * \endcode
1135
+ *
1136
+ * \par Method 2: Initializing nvtxEventAttributes for a specific version
1137
+ * \code
1138
+ * nvtxResourceAttributes_v0 attribs = {0};
1139
+ * attribs.version = 2;
1140
+ * attribs.size = (uint16_t)(sizeof(nvtxResourceAttributes_v0));
1141
+ * \endcode
1142
+ *
1143
+ * If the caller uses Method 1 it is critical that the entire binary
1144
+ * layout of the structure be configured to 0 so that all fields
1145
+ * are initialized to the default value.
1146
+ *
1147
+ * The caller should either use both NVTX_VERSION and
1148
+ * NVTX_RESOURCE_ATTRIB_STRUCT_SIZE (Method 1) or use explicit values
1149
+ * and a versioned type (Method 2). Using a mix of the two methods
1150
+ * will likely cause either source level incompatibility or binary
1151
+ * incompatibility in the future.
1152
+ *
1153
+ * \par Settings Attribute Types and Values
1154
+ *
1155
+ *
1156
+ * \par Example:
1157
+ * \code
1158
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain");
1159
+ *
1160
+ * // Initialize
1161
+ * nvtxResourceAttributes_t attribs = {0};
1162
+ * attribs.version = NVTX_VERSION;
1163
+ * attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE;
1164
+ *
1165
+ * // Configure the Attributes
1166
+ * attribs.identifierType = NVTX_RESOURCE_TYPE_GENERIC_POINTER;
1167
+ * attribs.identifier.pValue = (const void*)pMutex;
1168
+ * attribs.messageType = NVTX_MESSAGE_TYPE_ASCII;
1169
+ * attribs.message.ascii = "Single thread access to database.";
1170
+ *
1171
+ * nvtxResourceHandle_t handle = nvtxDomainResourceCreate(domain, attribs);
1172
+ * \endcode
1173
+ *
1174
+ * \sa
1175
+ * ::nvtxDomainResourceCreate
1176
+ */
1177
+ typedef struct nvtxResourceAttributes_v0
1178
+ {
1179
+ /**
1180
+ * \brief Version flag of the structure.
1181
+ *
1182
+ * Needs to be set to NVTX_VERSION to indicate the version of NVTX APIs
1183
+ * supported in this header file. This can optionally be overridden to
1184
+ * another version of the tools extension library.
1185
+ */
1186
+ uint16_t version;
1187
+
1188
+ /**
1189
+ * \brief Size of the structure.
1190
+ *
1191
+ * Needs to be set to the size in bytes of this attribute
1192
+ * structure.
1193
+ */
1194
+ uint16_t size;
1195
+
1196
+ /**
1197
+ * \brief Identifier type specifies how to interpret the identifier field
1198
+ *
1199
+ * Defines the identifier format of the attribute structure's \ref RESOURCE_IDENTIFIER_FIELD
1200
+ * "identifier" field.
1201
+ *
1202
+ * Default Value is NVTX_RESOURCE_TYPE_UNKNOWN
1203
+ */
1204
+ int32_t identifierType; /* values from enums following the pattern nvtxResource[name]Type_t */
1205
+
1206
+ /**
1207
+ * \brief Identifier for the resource.
1208
+ * \anchor RESOURCE_IDENTIFIER_FIELD
1209
+ *
1210
+ * An identifier may be a pointer or a handle to an OS or middleware API object.
1211
+ * The resource type will assist in avoiding collisions where handles values may collide.
1212
+ */
1213
+ union identifier_t
1214
+ {
1215
+ const void* pValue;
1216
+ uint64_t ullValue;
1217
+ } identifier;
1218
+
1219
+ /** \brief Message type specified in this attribute structure.
1220
+ *
1221
+ * Defines the message format of the attribute structure's \ref RESOURCE_MESSAGE_FIELD
1222
+ * "message" field.
1223
+ *
1224
+ * Default Value is NVTX_MESSAGE_UNKNOWN
1225
+ */
1226
+ int32_t messageType; /* nvtxMessageType_t */
1227
+
1228
+ /** \brief Message assigned to this attribute structure. \anchor RESOURCE_MESSAGE_FIELD
1229
+ *
1230
+ * The text message that is attached to a resource.
1231
+ */
1232
+ nvtxMessageValue_t message;
1233
+
1234
+ } nvtxResourceAttributes_v0;
1235
+
1236
+ typedef struct nvtxResourceAttributes_v0 nvtxResourceAttributes_t;
1237
+
1238
+ /* \cond SHOW_HIDDEN
1239
+ * \version \NVTX_VERSION_2
1240
+ */
1241
+ #define NVTX_RESOURCE_ATTRIB_STRUCT_SIZE ( (uint16_t)( sizeof(nvtxResourceAttributes_v0) ) )
1242
+ typedef struct nvtxResourceHandle* nvtxResourceHandle_t;
1243
+ /** \endcond */
1244
+
1245
+
1246
+
1247
+ /* ------------------------------------------------------------------------- */
1248
+ /** \brief Create a resource object to track and associate data with OS and middleware objects
1249
+ *
1250
+ * Allows users to associate an API handle or pointer with a user-provided name.
1251
+ *
1252
+ *
1253
+ * \param domain - Domain to own the resource object
1254
+ * \param attribs - Attributes to be associated with the resource
1255
+ *
1256
+ * \return A handle that represents the newly created resource object.
1257
+ *
1258
+ * \par Example:
1259
+ * \code
1260
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain");
1261
+ * nvtxResourceAttributes_t attribs = {0};
1262
+ * attribs.version = NVTX_VERSION;
1263
+ * attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE;
1264
+ * attribs.identifierType = NVTX_RESOURCE_TYPE_GENERIC_POINTER;
1265
+ * attribs.identifier.pValue = (const void*)pMutex;
1266
+ * attribs.messageType = NVTX_MESSAGE_TYPE_ASCII;
1267
+ * attribs.message.ascii = "Single thread access to database.";
1268
+ * nvtxResourceHandle_t handle = nvtxDomainResourceCreate(domain, attribs);
1269
+ * \endcode
1270
+ *
1271
+ * \sa
1272
+ * ::nvtxResourceAttributes_t
1273
+ * ::nvtxDomainResourceDestroy
1274
+ *
1275
+ * \version \NVTX_VERSION_2
1276
+ * @{ */
1277
+ NVTX_DECLSPEC nvtxResourceHandle_t NVTX_API nvtxDomainResourceCreate(nvtxDomainHandle_t domain, nvtxResourceAttributes_t* attribs);
1278
+ /** @} */
1279
+
1280
+ /* ------------------------------------------------------------------------- */
1281
+ /** \brief Destroy a resource object to track and associate data with OS and middleware objects
1282
+ *
1283
+ * Allows users to associate an API handle or pointer with a user-provided name.
1284
+ *
1285
+ * \param resource - Handle to the resource in which to operate.
1286
+ *
1287
+ * \par Example:
1288
+ * \code
1289
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain");
1290
+ * nvtxResourceAttributes_t attribs = {0};
1291
+ * attribs.version = NVTX_VERSION;
1292
+ * attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE;
1293
+ * attribs.identifierType = NVTX_RESOURCE_TYPE_GENERIC_POINTER;
1294
+ * attribs.identifier.pValue = (const void*)pMutex;
1295
+ * attribs.messageType = NVTX_MESSAGE_TYPE_ASCII;
1296
+ * attribs.message.ascii = "Single thread access to database.";
1297
+ * nvtxResourceHandle_t handle = nvtxDomainResourceCreate(domain, attribs);
1298
+ * nvtxDomainResourceDestroy(handle);
1299
+ * \endcode
1300
+ *
1301
+ * \sa
1302
+ * ::nvtxDomainResourceCreate
1303
+ *
1304
+ * \version \NVTX_VERSION_2
1305
+ * @{ */
1306
+ NVTX_DECLSPEC void NVTX_API nvtxDomainResourceDestroy(nvtxResourceHandle_t resource);
1307
+ /** @} */
1308
+
1309
+
1310
+ /** \name Functions for NVTX Category Naming*/
1311
+
1312
+ /* ------------------------------------------------------------------------- */
1313
+ /**
1314
+ * \brief Annotate an NVTX category used within a domain.
1315
+ *
1316
+ * Categories are used to group sets of events. Each category is identified
1317
+ * through a unique ID and that ID is passed into any of the marker/range
1318
+ * events to assign that event to a specific category. The nvtxDomainNameCategory
1319
+ * function calls allow the user to assign a name to a category ID that is
1320
+ * specific to the domain.
1321
+ *
1322
+ * nvtxDomainNameCategory(NULL, category, name) is equivalent to calling
1323
+ * nvtxNameCategory(category, name).
1324
+ *
1325
+ * \param domain - The domain of scoping the category.
1326
+ * \param category - The category ID to name.
1327
+ * \param name - The name of the category.
1328
+ *
1329
+ * \remarks The category names are tracked per domain.
1330
+ *
1331
+ * \par Example:
1332
+ * \code
1333
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("example");
1334
+ * nvtxDomainNameCategoryA(domain, 1, "Memory Allocation");
1335
+ * nvtxDomainNameCategoryW(domain, 2, L"Memory Transfer");
1336
+ * \endcode
1337
+ *
1338
+ * \version \NVTX_VERSION_2
1339
+ * @{ */
1340
+ NVTX_DECLSPEC void NVTX_API nvtxDomainNameCategoryA(nvtxDomainHandle_t domain, uint32_t category, const char* name);
1341
+ NVTX_DECLSPEC void NVTX_API nvtxDomainNameCategoryW(nvtxDomainHandle_t domain, uint32_t category, const wchar_t* name);
1342
+ /** @} */
1343
+
1344
+ /** \brief Annotate an NVTX category.
1345
+ *
1346
+ * Categories are used to group sets of events. Each category is identified
1347
+ * through a unique ID and that ID is passed into any of the marker/range
1348
+ * events to assign that event to a specific category. The nvtxNameCategory
1349
+ * function calls allow the user to assign a name to a category ID.
1350
+ *
1351
+ * \param category - The category ID to name.
1352
+ * \param name - The name of the category.
1353
+ *
1354
+ * \remarks The category names are tracked per process.
1355
+ *
1356
+ * \par Example:
1357
+ * \code
1358
+ * nvtxNameCategory(1, "Memory Allocation");
1359
+ * nvtxNameCategory(2, "Memory Transfer");
1360
+ * nvtxNameCategory(3, "Memory Object Lifetime");
1361
+ * \endcode
1362
+ *
1363
+ * \version \NVTX_VERSION_1
1364
+ * @{ */
1365
+ NVTX_DECLSPEC void NVTX_API nvtxNameCategoryA(uint32_t category, const char* name);
1366
+ NVTX_DECLSPEC void NVTX_API nvtxNameCategoryW(uint32_t category, const wchar_t* name);
1367
+ /** @} */
1368
+
1369
+ /** \name Functions for OS Threads Naming*/
1370
+
1371
+ /* ------------------------------------------------------------------------- */
1372
+ /** \brief Annotate an OS thread.
1373
+ *
1374
+ * Allows the user to name an active thread of the current process. If an
1375
+ * invalid thread ID is provided or a thread ID from a different process is
1376
+ * used the behavior of the tool is implementation dependent.
1377
+ *
1378
+ * The thread name is associated to the default domain. To support domains
1379
+ * use resource objects via ::nvtxDomainResourceCreate.
1380
+ *
1381
+ * \param threadId - The ID of the thread to name.
1382
+ * \param name - The name of the thread.
1383
+ *
1384
+ * \par Example:
1385
+ * \code
1386
+ * nvtxNameOsThread(GetCurrentThreadId(), "MAIN_THREAD");
1387
+ * \endcode
1388
+ *
1389
+ * \version \NVTX_VERSION_1
1390
+ * @{ */
1391
+ NVTX_DECLSPEC void NVTX_API nvtxNameOsThreadA(uint32_t threadId, const char* name);
1392
+ NVTX_DECLSPEC void NVTX_API nvtxNameOsThreadW(uint32_t threadId, const wchar_t* name);
1393
+ /** @} */
1394
+
1395
+
1396
+ /** @} */ /*END defgroup*/
1397
+ /* ========================================================================= */
1398
+ /** \defgroup STRING_REGISTRATION String Registration
1399
+ *
1400
+ * Registered strings are intended to increase performance by lowering instrumentation
1401
+ * overhead. String may be registered once and the handle may be passed in place of
1402
+ * a string where an the APIs may allow.
1403
+ *
1404
+ * See \ref STRING_REGISTRATION for more details
1405
+ *
1406
+ * @{
1407
+ */
1408
+
1409
+ /* ------------------------------------------------------------------------- */
1410
+ /** \brief Register a string.
1411
+
1412
+ * Registers an immutable string with NVTX. Once registered the pointer used
1413
+ * to register the domain name can be used in nvtxEventAttributes_t
1414
+ * \ref MESSAGE_FIELD. This allows NVTX implementation to skip copying the
1415
+ * contents of the message on each event invocation.
1416
+ *
1417
+ * String registration is an optimization. It is recommended to use string
1418
+ * registration if the string will be passed to an event many times.
1419
+ *
1420
+ * String are not unregistered, except that by unregistering the entire domain
1421
+ *
1422
+ * \param domain - Domain handle. If NULL then the global domain is used.
1423
+ * \param string - A unique pointer to a sequence of characters.
1424
+ *
1425
+ * \return A handle representing the registered string.
1426
+ *
1427
+ * \par Example:
1428
+ * \code
1429
+ * nvtxDomainCreateA("com.nvidia.nvtx.example");
1430
+ * nvtxStringHandle_t message = nvtxDomainRegisterStringA(domain, "registered string");
1431
+ * nvtxEventAttributes_t eventAttrib = {0};
1432
+ * eventAttrib.version = NVTX_VERSION;
1433
+ * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
1434
+ * eventAttrib.messageType = NVTX_MESSAGE_TYPE_REGISTERED;
1435
+ * eventAttrib.message.registered = message;
1436
+ * \endcode
1437
+ *
1438
+ * \version \NVTX_VERSION_2
1439
+ * @{ */
1440
+ NVTX_DECLSPEC nvtxStringHandle_t NVTX_API nvtxDomainRegisterStringA(nvtxDomainHandle_t domain, const char* string);
1441
+ NVTX_DECLSPEC nvtxStringHandle_t NVTX_API nvtxDomainRegisterStringW(nvtxDomainHandle_t domain, const wchar_t* string);
1442
+ /** @} */
1443
+
1444
+ /** @} */ /*END defgroup*/
1445
+ /* ========================================================================= */
1446
+ /** \defgroup DOMAINS Domains
1447
+ *
1448
+ * Domains are used to group events to a developer defined scope. Middleware
1449
+ * vendors may also scope their own events to avoid collisions with the
1450
+ * the application developer's events, so that the application developer may
1451
+ * inspect both parts and easily differentiate or filter them. By default
1452
+ * all events are scoped to a global domain where NULL is provided or when
1453
+ * using APIs provided b versions of NVTX below v2
1454
+ *
1455
+ * Domains are intended to be typically long lived objects with the intention
1456
+ * of logically separating events of large modules from each other such as
1457
+ * middleware libraries from each other and the main application.
1458
+ *
1459
+ * See \ref DOMAINS for more details
1460
+ *
1461
+ * @{
1462
+ */
1463
+
1464
+ /* ------------------------------------------------------------------------- */
1465
+ /** \brief Register a NVTX domain.
1466
+ *
1467
+ * Domains are used to scope annotations. All NVTX_VERSION_0 and NVTX_VERSION_1
1468
+ * annotations are scoped to the global domain. The function nvtxDomainCreate
1469
+ * creates a new named domain.
1470
+ *
1471
+ * Each domain maintains its own nvtxRangePush and nvtxRangePop stack.
1472
+ *
1473
+ * \param name - A unique string representing the domain.
1474
+ *
1475
+ * \return A handle representing the domain.
1476
+ *
1477
+ * \par Example:
1478
+ * \code
1479
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("com.nvidia.nvtx.example");
1480
+ *
1481
+ * nvtxMarkA("nvtxMarkA to global domain");
1482
+ *
1483
+ * nvtxEventAttributes_t eventAttrib1 = {0};
1484
+ * eventAttrib1.version = NVTX_VERSION;
1485
+ * eventAttrib1.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
1486
+ * eventAttrib1.message.ascii = "nvtxDomainMarkEx to global domain";
1487
+ * nvtxDomainMarkEx(NULL, &eventAttrib1);
1488
+ *
1489
+ * nvtxEventAttributes_t eventAttrib2 = {0};
1490
+ * eventAttrib2.version = NVTX_VERSION;
1491
+ * eventAttrib2.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
1492
+ * eventAttrib2.message.ascii = "nvtxDomainMarkEx to com.nvidia.nvtx.example";
1493
+ * nvtxDomainMarkEx(domain, &eventAttrib2);
1494
+ * nvtxDomainDestroy(domain);
1495
+ * \endcode
1496
+ *
1497
+ * \sa
1498
+ * ::nvtxDomainDestroy
1499
+ *
1500
+ * \version \NVTX_VERSION_2
1501
+ * @{ */
1502
+ NVTX_DECLSPEC nvtxDomainHandle_t NVTX_API nvtxDomainCreateA(const char* name);
1503
+ NVTX_DECLSPEC nvtxDomainHandle_t NVTX_API nvtxDomainCreateW(const wchar_t* name);
1504
+ /** @} */
1505
+
1506
+ /* ------------------------------------------------------------------------- */
1507
+ /** \brief Unregister a NVTX domain.
1508
+ *
1509
+ * Unregisters the domain handle and frees all domain specific resources.
1510
+ *
1511
+ * \param domain - the domain handle
1512
+ *
1513
+ * \par Example:
1514
+ * \code
1515
+ * nvtxDomainHandle_t domain = nvtxDomainCreateA("com.nvidia.nvtx.example");
1516
+ * nvtxDomainDestroy(domain);
1517
+ * \endcode
1518
+ *
1519
+ * \sa
1520
+ * ::nvtxDomainCreateA
1521
+ * ::nvtxDomainCreateW
1522
+ *
1523
+ * \version \NVTX_VERSION_2
1524
+ * @{ */
1525
+ NVTX_DECLSPEC void NVTX_API nvtxDomainDestroy(nvtxDomainHandle_t domain);
1526
+ /** @} */
1527
+
1528
+
1529
+ /** @} */ /*END defgroup*/
1530
+ /* ========================================================================= */
1531
+ /** \cond SHOW_HIDDEN */
1532
+
1533
+ #ifdef UNICODE
1534
+ #define nvtxMark nvtxMarkW
1535
+ #define nvtxRangeStart nvtxRangeStartW
1536
+ #define nvtxRangePush nvtxRangePushW
1537
+ #define nvtxNameCategory nvtxNameCategoryW
1538
+ #define nvtxNameOsThread nvtxNameOsThreadW
1539
+ /* NVTX_VERSION_2 */
1540
+ #define nvtxDomainCreate nvtxDomainCreateW
1541
+ #define nvtxDomainRegisterString nvtxDomainRegisterStringW
1542
+ #define nvtxDomainNameCategory nvtxDomainNameCategoryW
1543
+ #else
1544
+ #define nvtxMark nvtxMarkA
1545
+ #define nvtxRangeStart nvtxRangeStartA
1546
+ #define nvtxRangePush nvtxRangePushA
1547
+ #define nvtxNameCategory nvtxNameCategoryA
1548
+ #define nvtxNameOsThread nvtxNameOsThreadA
1549
+ /* NVTX_VERSION_2 */
1550
+ #define nvtxDomainCreate nvtxDomainCreateA
1551
+ #define nvtxDomainRegisterString nvtxDomainRegisterStringA
1552
+ #define nvtxDomainNameCategory nvtxDomainNameCategoryA
1553
+ #endif
1554
+
1555
+ /** \endcond */
1556
+
1557
+ #ifdef __cplusplus
1558
+ }
1559
+ #endif /* __cplusplus */
1560
+
1561
+ #endif /* NVTOOLSEXT_H_ */
llava_next/lib/python3.10/site-packages/nvidia/nvtx/include/nvToolsExtCuda.h ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2009-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO USER:
5
+ *
6
+ * This source code is subject to NVIDIA ownership rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * This software and the information contained herein is PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
11
+ * of a form of NVIDIA software license agreement.
12
+ *
13
+ * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
14
+ * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
15
+ * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
16
+ * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
17
+ * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
18
+ * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
19
+ * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
20
+ * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
21
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
22
+ * OR PERFORMANCE OF THIS SOURCE CODE.
23
+ *
24
+ * U.S. Government End Users. This source code is a "commercial item" as
25
+ * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
26
+ * "commercial computer software" and "commercial computer software
27
+ * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
28
+ * and is provided to the U.S. Government only as a commercial end item.
29
+ * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
30
+ * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
31
+ * source code with only those rights set forth herein.
32
+ *
33
+ * Any use of this source code in individual and commercial software must
34
+ * include, in the user documentation and internal comments to the code,
35
+ * the above Disclaimer and U.S. Government End Users Notice.
36
+ */
37
+
38
+ #ifndef NVTOOLSEXT_CUDA_H_
39
+ #define NVTOOLSEXT_CUDA_H_
40
+
41
+ #include "cuda.h"
42
+
43
+ #include "nvToolsExt.h"
44
+
45
+ #ifdef __cplusplus
46
+ extern "C" {
47
+ #endif /* __cplusplus */
48
+
49
+ /* ========================================================================= */
50
+ /** \name Functions for CUDA Resource Naming
51
+ */
52
+ /** \addtogroup RESOURCE_NAMING
53
+ * \section RESOURCE_NAMING_CUDA CUDA Resource Naming
54
+ *
55
+ * This section covers the API functions that allow to annotate CUDA resources
56
+ * with user-provided names.
57
+ *
58
+ * @{
59
+ */
60
+
61
+ /* ------------------------------------------------------------------------- */
62
+ /* \cond SHOW_HIDDEN
63
+ * \brief Used to build a non-colliding value for resource types separated class
64
+ * \version \NVTX_VERSION_2
65
+ */
66
+ #define NVTX_RESOURCE_CLASS_CUDA 4
67
+ /** \endcond */
68
+
69
+ /* ------------------------------------------------------------------------- */
70
+ /** \brief Resource types for CUDA
71
+ */
72
+ typedef enum nvtxResourceCUDAType_t
73
+ {
74
+ NVTX_RESOURCE_TYPE_CUDA_DEVICE = NVTX_RESOURCE_MAKE_TYPE(CUDA, 1), /* CUdevice */
75
+ NVTX_RESOURCE_TYPE_CUDA_CONTEXT = NVTX_RESOURCE_MAKE_TYPE(CUDA, 2), /* CUcontext */
76
+ NVTX_RESOURCE_TYPE_CUDA_STREAM = NVTX_RESOURCE_MAKE_TYPE(CUDA, 3), /* CUstream */
77
+ NVTX_RESOURCE_TYPE_CUDA_EVENT = NVTX_RESOURCE_MAKE_TYPE(CUDA, 4) /* CUevent */
78
+ } nvtxResourceCUDAType_t;
79
+
80
+
81
+ /* ------------------------------------------------------------------------- */
82
+ /** \brief Annotates a CUDA device.
83
+ *
84
+ * Allows the user to associate a CUDA device with a user-provided name.
85
+ *
86
+ * \param device - The handle of the CUDA device to name.
87
+ * \param name - The name of the CUDA device.
88
+ *
89
+ * \version \NVTX_VERSION_1
90
+ * @{ */
91
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuDeviceA(CUdevice device, const char* name);
92
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuDeviceW(CUdevice device, const wchar_t* name);
93
+ /** @} */
94
+
95
+ /* ------------------------------------------------------------------------- */
96
+ /** \brief Annotates a CUDA context.
97
+ *
98
+ * Allows the user to associate a CUDA context with a user-provided name.
99
+ *
100
+ * \param context - The handle of the CUDA context to name.
101
+ * \param name - The name of the CUDA context.
102
+ *
103
+ * \par Example:
104
+ * \code
105
+ * CUresult status = cuCtxCreate( &cuContext, 0, cuDevice );
106
+ * if ( CUDA_SUCCESS != status )
107
+ * goto Error;
108
+ * nvtxNameCuContext(cuContext, "CTX_NAME");
109
+ * \endcode
110
+ *
111
+ * \version \NVTX_VERSION_1
112
+ * @{ */
113
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuContextA(CUcontext context, const char* name);
114
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuContextW(CUcontext context, const wchar_t* name);
115
+ /** @} */
116
+
117
+ /* ------------------------------------------------------------------------- */
118
+ /** \brief Annotates a CUDA stream.
119
+ *
120
+ * Allows the user to associate a CUDA stream with a user-provided name.
121
+ *
122
+ * \param stream - The handle of the CUDA stream to name.
123
+ * \param name - The name of the CUDA stream.
124
+ *
125
+ * \version \NVTX_VERSION_1
126
+ * @{ */
127
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuStreamA(CUstream stream, const char* name);
128
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuStreamW(CUstream stream, const wchar_t* name);
129
+ /** @} */
130
+
131
+ /* ------------------------------------------------------------------------- */
132
+ /** \brief Annotates a CUDA event.
133
+ *
134
+ * Allows the user to associate a CUDA event with a user-provided name.
135
+ *
136
+ * \param event - The handle of the CUDA event to name.
137
+ * \param name - The name of the CUDA event.
138
+ *
139
+ * \version \NVTX_VERSION_1
140
+ * @{ */
141
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuEventA(CUevent event, const char* name);
142
+ NVTX_DECLSPEC void NVTX_API nvtxNameCuEventW(CUevent event, const wchar_t* name);
143
+ /** @} */
144
+
145
+ /** @} */ /* END RESOURCE_NAMING */
146
+
147
+ /* ========================================================================= */
148
+ #ifdef UNICODE
149
+ #define nvtxNameCuDevice nvtxNameCuDeviceW
150
+ #define nvtxNameCuContext nvtxNameCuContextW
151
+ #define nvtxNameCuStream nvtxNameCuStreamW
152
+ #define nvtxNameCuEvent nvtxNameCuEventW
153
+ #else
154
+ #define nvtxNameCuDevice nvtxNameCuDeviceA
155
+ #define nvtxNameCuContext nvtxNameCuContextA
156
+ #define nvtxNameCuStream nvtxNameCuStreamA
157
+ #define nvtxNameCuEvent nvtxNameCuEventA
158
+ #endif
159
+
160
+ #ifdef __cplusplus
161
+ }
162
+ #endif /* __cplusplus */
163
+
164
+ #endif /* NVTOOLSEXT_CUDA_H_ */