diff --git a/.gitattributes b/.gitattributes index a080832efb74929ee09ce509b49f4f2c9176e8b2..3114ea516b8228b9c6b0f9dba1c8464f92442d89 100644 --- a/.gitattributes +++ b/.gitattributes @@ -4059,3 +4059,24 @@ tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/lib/libcufile.so.0 tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/lib/libcudart.so.12 filter=lfs diff=lfs merge=lfs -text tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.6 filter=lfs diff=lfs merge=lfs -text tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.12 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libpcsamplingutil.so filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/libcublas.so.12 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/libnvblas.so.12 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/lib/libcusparse.so.12 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/lib/libcufft.so.11 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn.so.9 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_adv.so.9 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_cnn.so.9 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_engines_precompiled.so.9 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_engines_runtime_compiled.so.9 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_graph.so.9 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_heuristic.so.9 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_ops.so.9 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/lib/libcusolver.so.11 filter=lfs diff=lfs merge=lfs -text +tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/lib/libcusolverMg.so.11 filter=lfs diff=lfs merge=lfs -text diff --git a/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae134ba0285b3eabee2b7c65dcd4819049622f2b Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/_decode.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/_decode.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ac77cbc6d62eaf24b36ad47180464e5341d1372 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/_decode.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/_encode.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/_encode.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e4910c2c713da52679f6173759c820d9a57aad3 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/_encode.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/_format.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/_format.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3de81fa2e5915621f5c112506e13885195ebca46 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/_format.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/_parse.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/_parse.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14154538a29443deb49367e8f25ad9c925240638 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/_parse.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/_url.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/_url.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ba57606bdbee6d6b3d0c1745bbf495384371b42 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/mdurl/__pycache__/_url.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/msgpack-1.1.1.dist-info/licenses/COPYING b/tool_server/.venv/lib/python3.12/site-packages/msgpack-1.1.1.dist-info/licenses/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..f067af3aae15a5b494f0eeb15a7064c176e30fd5 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/msgpack-1.1.1.dist-info/licenses/COPYING @@ -0,0 +1,14 @@ +Copyright (C) 2008-2011 INADA Naoki + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/tool_server/.venv/lib/python3.12/site-packages/msgpack/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/msgpack/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a93b77583f3c015aebaf6aa49b2e631c7e9a18c2 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/msgpack/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/msgpack/__pycache__/exceptions.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/msgpack/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49eb9bc204e0d654183817e2c0d5c4ea8f4e75b3 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/msgpack/__pycache__/exceptions.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/msgpack/__pycache__/ext.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/msgpack/__pycache__/ext.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44bfb12f441f01441edd7d76f5c22b8e38b4254a Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/msgpack/__pycache__/ext.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/msgpack/__pycache__/fallback.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/msgpack/__pycache__/fallback.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3375561f87fb1568809d5a8fe55edfd3b4517011 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/msgpack/__pycache__/fallback.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca8f1ebfb10dd36f9003a0465fb3d181dfb73382 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/_json_schema.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/_json_schema.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd08004c345c252b7ad738810115b4b63cbb1035 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/_json_schema.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/_utils.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/_utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c97d31ad01f6481c50257b2857a2afa56d41e7f4 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/_utils.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/_version.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/_version.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45aa4ea9338f758a9614c88cc4d4a5cc6450564c Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/_version.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/inspect.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/inspect.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62f5b579f6a3982cc7c8194308a32328439dd190 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/inspect.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/json.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/json.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f342d8a7388d5511088e268570df124dbcc83091 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/json.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/msgpack.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/msgpack.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d0680be90cb1bc3c606454a3553b1e11f7c6b81 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/msgpack.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/structs.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/structs.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b72f3f6256f1375fb0d4b038cdca6e23bd48cf8d Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/structs.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/toml.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/toml.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b20cb43fb32068f3230cb9e8e55472cdb2bc8319 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/toml.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/yaml.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/yaml.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..524e8645a67be59bfb59863b002b9c62270ab28b Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/msgspec/__pycache__/yaml.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/multidict-6.6.4.dist-info/licenses/LICENSE b/tool_server/.venv/lib/python3.12/site-packages/multidict-6.6.4.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..8727172ae058e56805bd8ed0f988b6788711dcfd --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/multidict-6.6.4.dist-info/licenses/LICENSE @@ -0,0 +1,13 @@ + Copyright 2016 Andrew Svetlov and aio-libs contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tool_server/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/licenses/LICENSE.txt b/tool_server/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..0bf9a8f3f4922a0b70aac95ac7aab9574975342b --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/networkx-3.5.dist-info/licenses/LICENSE.txt @@ -0,0 +1,37 @@ +NetworkX is distributed with the 3-clause BSD license. + +:: + + Copyright (c) 2004-2025, NetworkX Developers + Aric Hagberg + Dan Schult + Pieter Swart + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NetworkX Developers nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/tool_server/.venv/lib/python3.12/site-packages/ninja-1.13.0.dist-info/licenses/AUTHORS.rst b/tool_server/.venv/lib/python3.12/site-packages/ninja-1.13.0.dist-info/licenses/AUTHORS.rst new file mode 100644 index 0000000000000000000000000000000000000000..b1b06c180f71d5a97709335823145b230576f2c5 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/ninja-1.13.0.dist-info/licenses/AUTHORS.rst @@ -0,0 +1,5 @@ +======= +Credits +======= + +Please see the GitHub project page at https://github.com/scikit-build/ninja-python-distributions/graphs/contributors diff --git a/tool_server/.venv/lib/python3.12/site-packages/ninja-1.13.0.dist-info/licenses/LICENSE_Apache_20 b/tool_server/.venv/lib/python3.12/site-packages/ninja-1.13.0.dist-info/licenses/LICENSE_Apache_20 new file mode 100644 index 0000000000000000000000000000000000000000..37ec93a14fdcd0d6e525d97c0cfa6b314eaa98d8 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/ninja-1.13.0.dist-info/licenses/LICENSE_Apache_20 @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d336da73f2866e148eee0a2b2e82592efea4066 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/__init__.py b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae741ba96d069b55e7a14742d7e759097065a1ee Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/cublas.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/cublas.h new file mode 100644 index 0000000000000000000000000000000000000000..96eadad8a8e8c3979b99910ceea41ceaf2c8b58e --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/cublas.h @@ -0,0 +1,891 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * This is the public header file for the CUBLAS library, defining the API + * + * CUBLAS is an implementation of BLAS (Basic Linear Algebra Subroutines) + * on top of the CUDA runtime. + */ + +#if !defined(CUBLAS_H_) +#define CUBLAS_H_ + +#if defined(CUBLAS_V2_H_) +#error "It is an error to include both cublas.h and cublas_v2.h" +#endif + +#include + +#ifndef CUBLASWINAPI +#ifdef _WIN32 +#define CUBLASWINAPI __stdcall +#else +#define CUBLASWINAPI +#endif +#endif + +#undef CUBLASAPI +#ifdef __CUDACC__ +#define CUBLASAPI __host__ +#else +#define CUBLASAPI +#endif + +#include "cublas_api.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/* CUBLAS data types */ +#define cublasStatus cublasStatus_t + +cublasStatus CUBLASWINAPI cublasInit(void); +cublasStatus CUBLASWINAPI cublasShutdown(void); +cublasStatus CUBLASWINAPI cublasGetError(void); + +cublasStatus CUBLASWINAPI cublasGetVersion(int* version); +cublasStatus CUBLASWINAPI cublasAlloc(int n, int elemSize, void** devicePtr); + +cublasStatus CUBLASWINAPI cublasFree(void* devicePtr); + +cublasStatus CUBLASWINAPI cublasSetKernelStream(cudaStream_t stream); + +/* ---------------- CUBLAS BLAS1 functions ---------------- */ +/* NRM2 */ +float CUBLASWINAPI cublasSnrm2(int n, const float* x, int incx); +double CUBLASWINAPI cublasDnrm2(int n, const double* x, int incx); +float CUBLASWINAPI cublasScnrm2(int n, const cuComplex* x, int incx); +double CUBLASWINAPI cublasDznrm2(int n, const cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* DOT */ +float CUBLASWINAPI cublasSdot(int n, const float* x, int incx, const float* y, int incy); +double CUBLASWINAPI cublasDdot(int n, const double* x, int incx, const double* y, int incy); +cuComplex CUBLASWINAPI cublasCdotu(int n, const cuComplex* x, int incx, const cuComplex* y, int incy); +cuComplex CUBLASWINAPI cublasCdotc(int n, const cuComplex* x, int incx, const cuComplex* y, int incy); +cuDoubleComplex CUBLASWINAPI cublasZdotu(int n, const cuDoubleComplex* x, int incx, const cuDoubleComplex* y, int incy); +cuDoubleComplex CUBLASWINAPI cublasZdotc(int n, const cuDoubleComplex* x, int incx, const cuDoubleComplex* y, int incy); +/*------------------------------------------------------------------------*/ +/* SCAL */ +void CUBLASWINAPI cublasSscal(int n, float alpha, float* x, int incx); +void CUBLASWINAPI cublasDscal(int n, double alpha, double* x, int incx); +void CUBLASWINAPI cublasCscal(int n, cuComplex alpha, cuComplex* x, int incx); +void CUBLASWINAPI cublasZscal(int n, cuDoubleComplex alpha, cuDoubleComplex* x, int incx); + +void CUBLASWINAPI cublasCsscal(int n, float alpha, cuComplex* x, int incx); +void CUBLASWINAPI cublasZdscal(int n, double alpha, cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* AXPY */ +void CUBLASWINAPI cublasSaxpy(int n, float alpha, const float* x, int incx, float* y, int incy); +void CUBLASWINAPI cublasDaxpy(int n, double alpha, const double* x, int incx, double* y, int incy); +void CUBLASWINAPI cublasCaxpy(int n, cuComplex alpha, const cuComplex* x, int incx, cuComplex* y, int incy); +void CUBLASWINAPI +cublasZaxpy(int n, cuDoubleComplex alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy); +/*------------------------------------------------------------------------*/ +/* COPY */ +void CUBLASWINAPI cublasScopy(int n, const float* x, int incx, float* y, int incy); +void CUBLASWINAPI cublasDcopy(int n, const double* x, int incx, double* y, int incy); +void CUBLASWINAPI cublasCcopy(int n, const cuComplex* x, int incx, cuComplex* y, int incy); +void CUBLASWINAPI cublasZcopy(int n, const cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy); +/*------------------------------------------------------------------------*/ +/* SWAP */ +void CUBLASWINAPI cublasSswap(int n, float* x, int incx, float* y, int incy); +void CUBLASWINAPI cublasDswap(int n, double* x, int incx, double* y, int incy); +void CUBLASWINAPI cublasCswap(int n, cuComplex* x, int incx, cuComplex* y, int incy); +void CUBLASWINAPI cublasZswap(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy); +/*------------------------------------------------------------------------*/ +/* AMAX */ +int CUBLASWINAPI cublasIsamax(int n, const float* x, int incx); +int CUBLASWINAPI cublasIdamax(int n, const double* x, int incx); +int CUBLASWINAPI cublasIcamax(int n, const cuComplex* x, int incx); +int CUBLASWINAPI cublasIzamax(int n, const cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* AMIN */ +int CUBLASWINAPI cublasIsamin(int n, const float* x, int incx); +int CUBLASWINAPI cublasIdamin(int n, const double* x, int incx); + +int CUBLASWINAPI cublasIcamin(int n, const cuComplex* x, int incx); +int CUBLASWINAPI cublasIzamin(int n, const cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* ASUM */ +float CUBLASWINAPI cublasSasum(int n, const float* x, int incx); +double CUBLASWINAPI cublasDasum(int n, const double* x, int incx); +float CUBLASWINAPI cublasScasum(int n, const cuComplex* x, int incx); +double CUBLASWINAPI cublasDzasum(int n, const cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* ROT */ +void CUBLASWINAPI cublasSrot(int n, float* x, int incx, float* y, int incy, float sc, float ss); +void CUBLASWINAPI cublasDrot(int n, double* x, int incx, double* y, int incy, double sc, double ss); +void CUBLASWINAPI cublasCrot(int n, cuComplex* x, int incx, cuComplex* y, int incy, float c, cuComplex s); +void CUBLASWINAPI +cublasZrot(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy, double sc, cuDoubleComplex cs); +void CUBLASWINAPI cublasCsrot(int n, cuComplex* x, int incx, cuComplex* y, int incy, float c, float s); +void CUBLASWINAPI cublasZdrot(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy, double c, double s); +/*------------------------------------------------------------------------*/ +/* ROTG */ +void CUBLASWINAPI cublasSrotg(float* sa, float* sb, float* sc, float* ss); +void CUBLASWINAPI cublasDrotg(double* sa, double* sb, double* sc, double* ss); +void CUBLASWINAPI cublasCrotg(cuComplex* ca, cuComplex cb, float* sc, cuComplex* cs); +void CUBLASWINAPI cublasZrotg(cuDoubleComplex* ca, cuDoubleComplex cb, double* sc, cuDoubleComplex* cs); +/*------------------------------------------------------------------------*/ +/* ROTM */ +void CUBLASWINAPI cublasSrotm(int n, float* x, int incx, float* y, int incy, const float* sparam); +void CUBLASWINAPI cublasDrotm(int n, double* x, int incx, double* y, int incy, const double* sparam); +/*------------------------------------------------------------------------*/ +/* ROTMG */ +void CUBLASWINAPI cublasSrotmg(float* sd1, float* sd2, float* sx1, const float* sy1, float* sparam); +void CUBLASWINAPI cublasDrotmg(double* sd1, double* sd2, double* sx1, const double* sy1, double* sparam); + +/* --------------- CUBLAS BLAS2 functions ---------------- */ +/* GEMV */ +void CUBLASWINAPI cublasSgemv(char trans, + int m, + int n, + float alpha, + const float* A, + int lda, + const float* x, + int incx, + float beta, + float* y, + int incy); +void CUBLASWINAPI cublasDgemv(char trans, + int m, + int n, + double alpha, + const double* A, + int lda, + const double* x, + int incx, + double beta, + double* y, + int incy); +void CUBLASWINAPI cublasCgemv(char trans, + int m, + int n, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + cuComplex beta, + cuComplex* y, + int incy); +void CUBLASWINAPI cublasZgemv(char trans, + int m, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex beta, + cuDoubleComplex* y, + int incy); +/*------------------------------------------------------------------------*/ +/* GBMV */ +void CUBLASWINAPI cublasSgbmv(char trans, + int m, + int n, + int kl, + int ku, + float alpha, + const float* A, + int lda, + const float* x, + int incx, + float beta, + float* y, + int incy); +void CUBLASWINAPI cublasDgbmv(char trans, + int m, + int n, + int kl, + int ku, + double alpha, + const double* A, + int lda, + const double* x, + int incx, + double beta, + double* y, + int incy); +void CUBLASWINAPI cublasCgbmv(char trans, + int m, + int n, + int kl, + int ku, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + cuComplex beta, + cuComplex* y, + int incy); +void CUBLASWINAPI cublasZgbmv(char trans, + int m, + int n, + int kl, + int ku, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex beta, + cuDoubleComplex* y, + int incy); +/*------------------------------------------------------------------------*/ +/* TRMV */ +void CUBLASWINAPI cublasStrmv(char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incx); +void CUBLASWINAPI cublasDtrmv(char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incx); +void CUBLASWINAPI +cublasCtrmv(char uplo, char trans, char diag, int n, const cuComplex* A, int lda, cuComplex* x, int incx); +void CUBLASWINAPI +cublasZtrmv(char uplo, char trans, char diag, int n, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* TBMV */ +void CUBLASWINAPI +cublasStbmv(char uplo, char trans, char diag, int n, int k, const float* A, int lda, float* x, int incx); +void CUBLASWINAPI +cublasDtbmv(char uplo, char trans, char diag, int n, int k, const double* A, int lda, double* x, int incx); +void CUBLASWINAPI +cublasCtbmv(char uplo, char trans, char diag, int n, int k, const cuComplex* A, int lda, cuComplex* x, int incx); +void CUBLASWINAPI cublasZtbmv( + char uplo, char trans, char diag, int n, int k, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* TPMV */ +void CUBLASWINAPI cublasStpmv(char uplo, char trans, char diag, int n, const float* AP, float* x, int incx); + +void CUBLASWINAPI cublasDtpmv(char uplo, char trans, char diag, int n, const double* AP, double* x, int incx); + +void CUBLASWINAPI cublasCtpmv(char uplo, char trans, char diag, int n, const cuComplex* AP, cuComplex* x, int incx); + +void CUBLASWINAPI +cublasZtpmv(char uplo, char trans, char diag, int n, const cuDoubleComplex* AP, cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* TRSV */ +void CUBLASWINAPI cublasStrsv(char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incx); + +void CUBLASWINAPI cublasDtrsv(char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incx); + +void CUBLASWINAPI +cublasCtrsv(char uplo, char trans, char diag, int n, const cuComplex* A, int lda, cuComplex* x, int incx); + +void CUBLASWINAPI +cublasZtrsv(char uplo, char trans, char diag, int n, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* TPSV */ +void CUBLASWINAPI cublasStpsv(char uplo, char trans, char diag, int n, const float* AP, float* x, int incx); + +void CUBLASWINAPI cublasDtpsv(char uplo, char trans, char diag, int n, const double* AP, double* x, int incx); + +void CUBLASWINAPI cublasCtpsv(char uplo, char trans, char diag, int n, const cuComplex* AP, cuComplex* x, int incx); + +void CUBLASWINAPI +cublasZtpsv(char uplo, char trans, char diag, int n, const cuDoubleComplex* AP, cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* TBSV */ +void CUBLASWINAPI +cublasStbsv(char uplo, char trans, char diag, int n, int k, const float* A, int lda, float* x, int incx); + +void CUBLASWINAPI +cublasDtbsv(char uplo, char trans, char diag, int n, int k, const double* A, int lda, double* x, int incx); +void CUBLASWINAPI +cublasCtbsv(char uplo, char trans, char diag, int n, int k, const cuComplex* A, int lda, cuComplex* x, int incx); + +void CUBLASWINAPI cublasZtbsv( + char uplo, char trans, char diag, int n, int k, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx); +/*------------------------------------------------------------------------*/ +/* SYMV/HEMV */ +void CUBLASWINAPI cublasSsymv( + char uplo, int n, float alpha, const float* A, int lda, const float* x, int incx, float beta, float* y, int incy); +void CUBLASWINAPI cublasDsymv(char uplo, + int n, + double alpha, + const double* A, + int lda, + const double* x, + int incx, + double beta, + double* y, + int incy); +void CUBLASWINAPI cublasChemv(char uplo, + int n, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + cuComplex beta, + cuComplex* y, + int incy); +void CUBLASWINAPI cublasZhemv(char uplo, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex beta, + cuDoubleComplex* y, + int incy); +/*------------------------------------------------------------------------*/ +/* SBMV/HBMV */ +void CUBLASWINAPI cublasSsbmv(char uplo, + int n, + int k, + float alpha, + const float* A, + int lda, + const float* x, + int incx, + float beta, + float* y, + int incy); +void CUBLASWINAPI cublasDsbmv(char uplo, + int n, + int k, + double alpha, + const double* A, + int lda, + const double* x, + int incx, + double beta, + double* y, + int incy); +void CUBLASWINAPI cublasChbmv(char uplo, + int n, + int k, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + cuComplex beta, + cuComplex* y, + int incy); +void CUBLASWINAPI cublasZhbmv(char uplo, + int n, + int k, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex beta, + cuDoubleComplex* y, + int incy); +/*------------------------------------------------------------------------*/ +/* SPMV/HPMV */ +void CUBLASWINAPI +cublasSspmv(char uplo, int n, float alpha, const float* AP, const float* x, int incx, float beta, float* y, int incy); +void CUBLASWINAPI cublasDspmv( + char uplo, int n, double alpha, const double* AP, const double* x, int incx, double beta, double* y, int incy); +void CUBLASWINAPI cublasChpmv(char uplo, + int n, + cuComplex alpha, + const cuComplex* AP, + const cuComplex* x, + int incx, + cuComplex beta, + cuComplex* y, + int incy); +void CUBLASWINAPI cublasZhpmv(char uplo, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* AP, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex beta, + cuDoubleComplex* y, + int incy); + +/*------------------------------------------------------------------------*/ +/* GER */ +void CUBLASWINAPI +cublasSger(int m, int n, float alpha, const float* x, int incx, const float* y, int incy, float* A, int lda); +void CUBLASWINAPI +cublasDger(int m, int n, double alpha, const double* x, int incx, const double* y, int incy, double* A, int lda); + +void CUBLASWINAPI cublasCgeru( + int m, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* A, int lda); +void CUBLASWINAPI cublasCgerc( + int m, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* A, int lda); +void CUBLASWINAPI cublasZgeru(int m, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* A, + int lda); +void CUBLASWINAPI cublasZgerc(int m, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* A, + int lda); +/*------------------------------------------------------------------------*/ +/* SYR/HER */ +void CUBLASWINAPI cublasSsyr(char uplo, int n, float alpha, const float* x, int incx, float* A, int lda); +void CUBLASWINAPI cublasDsyr(char uplo, int n, double alpha, const double* x, int incx, double* A, int lda); + +void CUBLASWINAPI cublasCher(char uplo, int n, float alpha, const cuComplex* x, int incx, cuComplex* A, int lda); +void CUBLASWINAPI +cublasZher(char uplo, int n, double alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* A, int lda); + +/*------------------------------------------------------------------------*/ +/* SPR/HPR */ +void CUBLASWINAPI cublasSspr(char uplo, int n, float alpha, const float* x, int incx, float* AP); +void CUBLASWINAPI cublasDspr(char uplo, int n, double alpha, const double* x, int incx, double* AP); +void CUBLASWINAPI cublasChpr(char uplo, int n, float alpha, const cuComplex* x, int incx, cuComplex* AP); +void CUBLASWINAPI cublasZhpr(char uplo, int n, double alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* AP); +/*------------------------------------------------------------------------*/ +/* SYR2/HER2 */ +void CUBLASWINAPI +cublasSsyr2(char uplo, int n, float alpha, const float* x, int incx, const float* y, int incy, float* A, int lda); +void CUBLASWINAPI +cublasDsyr2(char uplo, int n, double alpha, const double* x, int incx, const double* y, int incy, double* A, int lda); +void CUBLASWINAPI cublasCher2(char uplo, + int n, + cuComplex alpha, + const cuComplex* x, + int incx, + const cuComplex* y, + int incy, + cuComplex* A, + int lda); +void CUBLASWINAPI cublasZher2(char uplo, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* A, + int lda); + +/*------------------------------------------------------------------------*/ +/* SPR2/HPR2 */ +void CUBLASWINAPI +cublasSspr2(char uplo, int n, float alpha, const float* x, int incx, const float* y, int incy, float* AP); +void CUBLASWINAPI +cublasDspr2(char uplo, int n, double alpha, const double* x, int incx, const double* y, int incy, double* AP); +void CUBLASWINAPI cublasChpr2( + char uplo, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* AP); +void CUBLASWINAPI cublasZhpr2(char uplo, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* AP); +/* ------------------------BLAS3 Functions ------------------------------- */ +/* GEMM */ +void CUBLASWINAPI cublasSgemm(char transa, + char transb, + int m, + int n, + int k, + float alpha, + const float* A, + int lda, + const float* B, + int ldb, + float beta, + float* C, + int ldc); +void CUBLASWINAPI cublasDgemm(char transa, + char transb, + int m, + int n, + int k, + double alpha, + const double* A, + int lda, + const double* B, + int ldb, + double beta, + double* C, + int ldc); +void CUBLASWINAPI cublasCgemm(char transa, + char transb, + int m, + int n, + int k, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + cuComplex beta, + cuComplex* C, + int ldc); +void CUBLASWINAPI cublasZgemm(char transa, + char transb, + int m, + int n, + int k, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + cuDoubleComplex beta, + cuDoubleComplex* C, + int ldc); +/* -------------------------------------------------------*/ +/* SYRK */ +void CUBLASWINAPI +cublasSsyrk(char uplo, char trans, int n, int k, float alpha, const float* A, int lda, float beta, float* C, int ldc); +void CUBLASWINAPI cublasDsyrk( + char uplo, char trans, int n, int k, double alpha, const double* A, int lda, double beta, double* C, int ldc); + +void CUBLASWINAPI cublasCsyrk(char uplo, + char trans, + int n, + int k, + cuComplex alpha, + const cuComplex* A, + int lda, + cuComplex beta, + cuComplex* C, + int ldc); +void CUBLASWINAPI cublasZsyrk(char uplo, + char trans, + int n, + int k, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex beta, + cuDoubleComplex* C, + int ldc); +/* ------------------------------------------------------- */ +/* HERK */ +void CUBLASWINAPI cublasCherk( + char uplo, char trans, int n, int k, float alpha, const cuComplex* A, int lda, float beta, cuComplex* C, int ldc); +void CUBLASWINAPI cublasZherk(char uplo, + char trans, + int n, + int k, + double alpha, + const cuDoubleComplex* A, + int lda, + double beta, + cuDoubleComplex* C, + int ldc); +/* ------------------------------------------------------- */ +/* SYR2K */ +void CUBLASWINAPI cublasSsyr2k(char uplo, + char trans, + int n, + int k, + float alpha, + const float* A, + int lda, + const float* B, + int ldb, + float beta, + float* C, + int ldc); + +void CUBLASWINAPI cublasDsyr2k(char uplo, + char trans, + int n, + int k, + double alpha, + const double* A, + int lda, + const double* B, + int ldb, + double beta, + double* C, + int ldc); +void CUBLASWINAPI cublasCsyr2k(char uplo, + char trans, + int n, + int k, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + cuComplex beta, + cuComplex* C, + int ldc); + +void CUBLASWINAPI cublasZsyr2k(char uplo, + char trans, + int n, + int k, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + cuDoubleComplex beta, + cuDoubleComplex* C, + int ldc); +/* ------------------------------------------------------- */ +/* HER2K */ +void CUBLASWINAPI cublasCher2k(char uplo, + char trans, + int n, + int k, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + float beta, + cuComplex* C, + int ldc); + +void CUBLASWINAPI cublasZher2k(char uplo, + char trans, + int n, + int k, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + double beta, + cuDoubleComplex* C, + int ldc); + +/*------------------------------------------------------------------------*/ +/* SYMM*/ +void CUBLASWINAPI cublasSsymm(char side, + char uplo, + int m, + int n, + float alpha, + const float* A, + int lda, + const float* B, + int ldb, + float beta, + float* C, + int ldc); +void CUBLASWINAPI cublasDsymm(char side, + char uplo, + int m, + int n, + double alpha, + const double* A, + int lda, + const double* B, + int ldb, + double beta, + double* C, + int ldc); + +void CUBLASWINAPI cublasCsymm(char side, + char uplo, + int m, + int n, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + cuComplex beta, + cuComplex* C, + int ldc); + +void CUBLASWINAPI cublasZsymm(char side, + char uplo, + int m, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + cuDoubleComplex beta, + cuDoubleComplex* C, + int ldc); +/*------------------------------------------------------------------------*/ +/* HEMM*/ +void CUBLASWINAPI cublasChemm(char side, + char uplo, + int m, + int n, + cuComplex alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + cuComplex beta, + cuComplex* C, + int ldc); +void CUBLASWINAPI cublasZhemm(char side, + char uplo, + int m, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + cuDoubleComplex beta, + cuDoubleComplex* C, + int ldc); + +/*------------------------------------------------------------------------*/ +/* TRSM*/ +void CUBLASWINAPI cublasStrsm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + float alpha, + const float* A, + int lda, + float* B, + int ldb); + +void CUBLASWINAPI cublasDtrsm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + double alpha, + const double* A, + int lda, + double* B, + int ldb); + +void CUBLASWINAPI cublasCtrsm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + cuComplex alpha, + const cuComplex* A, + int lda, + cuComplex* B, + int ldb); + +void CUBLASWINAPI cublasZtrsm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex* B, + int ldb); +/*------------------------------------------------------------------------*/ +/* TRMM*/ +void CUBLASWINAPI cublasStrmm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + float alpha, + const float* A, + int lda, + float* B, + int ldb); +void CUBLASWINAPI cublasDtrmm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + double alpha, + const double* A, + int lda, + double* B, + int ldb); +void CUBLASWINAPI cublasCtrmm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + cuComplex alpha, + const cuComplex* A, + int lda, + cuComplex* B, + int ldb); +void CUBLASWINAPI cublasZtrmm(char side, + char uplo, + char transa, + char diag, + int m, + int n, + cuDoubleComplex alpha, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex* B, + int ldb); + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#endif /* !defined(CUBLAS_H_) */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/cublasLt.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/cublasLt.h new file mode 100644 index 0000000000000000000000000000000000000000..6f4db7e199750fef6f13ea3f91e040960b3fbbbe --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/cublasLt.h @@ -0,0 +1,2452 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ +#pragma once + +#ifndef CUBLASAPI +#ifdef __CUDACC__ +#define CUBLASAPI __host__ __device__ +#else +#define CUBLASAPI +#endif +#endif + +#include + +#include +#include +#include + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +/** Opaque structure holding CUBLASLT context + */ +typedef struct cublasLtContext* cublasLtHandle_t; + +cublasStatus_t CUBLASWINAPI cublasLtCreate(cublasLtHandle_t* lightHandle); + +cublasStatus_t CUBLASWINAPI cublasLtDestroy(cublasLtHandle_t lightHandle); + +const char* CUBLASWINAPI cublasLtGetStatusName(cublasStatus_t status); + +const char* CUBLASWINAPI cublasLtGetStatusString(cublasStatus_t status); + +size_t CUBLASWINAPI cublasLtGetVersion(void); + +size_t CUBLASWINAPI cublasLtGetCudartVersion(void); + +cublasStatus_t CUBLASWINAPI cublasLtGetProperty(libraryPropertyType type, int* value); + +cublasStatus_t CUBLASWINAPI cublasLtHeuristicsCacheGetCapacity(size_t* capacity); +cublasStatus_t CUBLASWINAPI cublasLtHeuristicsCacheSetCapacity(size_t capacity); + +/** Restricts usage of CPU instructions (ISA) specified by the flags in the mask. + * + * Flags can be combined with bitwise OR(|) operator. Supported flags: + * - 0x1 -- x86-64 AVX512 ISA + * + * Default mask: 0 (any applicable ISA is allowed). + * + * The function returns the previous value of the mask. + * The function takes precedence over the environment variable CUBLASLT_DISABLE_CPU_INSTRUCTIONS_MASK. + */ +unsigned CUBLASWINAPI cublasLtDisableCpuInstructionsSetMask(unsigned mask); + +/** Semi-opaque descriptor for matrix memory layout + */ +typedef struct { + uint64_t data[8]; +} cublasLtMatrixLayoutOpaque_t; + +/** Opaque descriptor for matrix memory layout + */ +typedef cublasLtMatrixLayoutOpaque_t* cublasLtMatrixLayout_t; + +/** Semi-opaque algorithm descriptor (to avoid complicated alloc/free schemes) + * + * This structure can be trivially serialized and later restored for use with the same version of cuBLAS library to save + * on selecting the right configuration again. + */ +typedef struct { + uint64_t data[8]; +} cublasLtMatmulAlgo_t; + +/** Semi-opaque descriptor for cublasLtMatmul() operation details + */ +typedef struct { + uint64_t data[32]; +} cublasLtMatmulDescOpaque_t; + +/** Opaque descriptor for cublasLtMatmul() operation details + */ +typedef cublasLtMatmulDescOpaque_t* cublasLtMatmulDesc_t; + +/** Semi-opaque descriptor for cublasLtMatrixTransform() operation details + */ +typedef struct { + uint64_t data[8]; +} cublasLtMatrixTransformDescOpaque_t; + +/** Opaque descriptor for cublasLtMatrixTransform() operation details + */ +typedef cublasLtMatrixTransformDescOpaque_t* cublasLtMatrixTransformDesc_t; + +/** Semi-opaque descriptor for cublasLtMatmulPreference() operation details + */ +typedef struct { + uint64_t data[8]; +} cublasLtMatmulPreferenceOpaque_t; + +/** Opaque descriptor for cublasLtMatmulAlgoGetHeuristic() configuration + */ +typedef cublasLtMatmulPreferenceOpaque_t* cublasLtMatmulPreference_t; + +/** Tile size (in C/D matrix Rows x Cols) + * + * General order of tile IDs is sorted by size first and by first dimension second. + */ +typedef enum { + CUBLASLT_MATMUL_TILE_UNDEFINED = 0, + CUBLASLT_MATMUL_TILE_8x8 = 1, + CUBLASLT_MATMUL_TILE_8x16 = 2, + CUBLASLT_MATMUL_TILE_16x8 = 3, + CUBLASLT_MATMUL_TILE_8x32 = 4, + CUBLASLT_MATMUL_TILE_16x16 = 5, + CUBLASLT_MATMUL_TILE_32x8 = 6, + CUBLASLT_MATMUL_TILE_8x64 = 7, + CUBLASLT_MATMUL_TILE_16x32 = 8, + CUBLASLT_MATMUL_TILE_32x16 = 9, + CUBLASLT_MATMUL_TILE_64x8 = 10, + CUBLASLT_MATMUL_TILE_32x32 = 11, + CUBLASLT_MATMUL_TILE_32x64 = 12, + CUBLASLT_MATMUL_TILE_64x32 = 13, + CUBLASLT_MATMUL_TILE_32x128 = 14, + CUBLASLT_MATMUL_TILE_64x64 = 15, + CUBLASLT_MATMUL_TILE_128x32 = 16, + CUBLASLT_MATMUL_TILE_64x128 = 17, + CUBLASLT_MATMUL_TILE_128x64 = 18, + CUBLASLT_MATMUL_TILE_64x256 = 19, + CUBLASLT_MATMUL_TILE_128x128 = 20, + CUBLASLT_MATMUL_TILE_256x64 = 21, + CUBLASLT_MATMUL_TILE_64x512 = 22, + CUBLASLT_MATMUL_TILE_128x256 = 23, + CUBLASLT_MATMUL_TILE_256x128 = 24, + CUBLASLT_MATMUL_TILE_512x64 = 25, + CUBLASLT_MATMUL_TILE_64x96 = 26, + CUBLASLT_MATMUL_TILE_96x64 = 27, + CUBLASLT_MATMUL_TILE_96x128 = 28, + CUBLASLT_MATMUL_TILE_128x160 = 29, + CUBLASLT_MATMUL_TILE_160x128 = 30, + CUBLASLT_MATMUL_TILE_192x128 = 31, + CUBLASLT_MATMUL_TILE_128x192 = 32, + CUBLASLT_MATMUL_TILE_128x96 = 33, + CUBLASLT_MATMUL_TILE_32x256 = 34, + CUBLASLT_MATMUL_TILE_256x32 = 35, + CUBLASLT_MATMUL_TILE_8x128 = 36, + CUBLASLT_MATMUL_TILE_8x192 = 37, + CUBLASLT_MATMUL_TILE_8x256 = 38, + CUBLASLT_MATMUL_TILE_8x320 = 39, + CUBLASLT_MATMUL_TILE_8x384 = 40, + CUBLASLT_MATMUL_TILE_8x448 = 41, + CUBLASLT_MATMUL_TILE_8x512 = 42, + CUBLASLT_MATMUL_TILE_8x576 = 43, + CUBLASLT_MATMUL_TILE_8x640 = 44, + CUBLASLT_MATMUL_TILE_8x704 = 45, + CUBLASLT_MATMUL_TILE_8x768 = 46, + CUBLASLT_MATMUL_TILE_16x64 = 47, + CUBLASLT_MATMUL_TILE_16x128 = 48, + CUBLASLT_MATMUL_TILE_16x192 = 49, + CUBLASLT_MATMUL_TILE_16x256 = 50, + CUBLASLT_MATMUL_TILE_16x320 = 51, + CUBLASLT_MATMUL_TILE_16x384 = 52, + CUBLASLT_MATMUL_TILE_16x448 = 53, + CUBLASLT_MATMUL_TILE_16x512 = 54, + CUBLASLT_MATMUL_TILE_16x576 = 55, + CUBLASLT_MATMUL_TILE_16x640 = 56, + CUBLASLT_MATMUL_TILE_16x704 = 57, + CUBLASLT_MATMUL_TILE_16x768 = 58, + CUBLASLT_MATMUL_TILE_24x64 = 59, + CUBLASLT_MATMUL_TILE_24x128 = 60, + CUBLASLT_MATMUL_TILE_24x192 = 61, + CUBLASLT_MATMUL_TILE_24x256 = 62, + CUBLASLT_MATMUL_TILE_24x320 = 63, + CUBLASLT_MATMUL_TILE_24x384 = 64, + CUBLASLT_MATMUL_TILE_24x448 = 65, + CUBLASLT_MATMUL_TILE_24x512 = 66, + CUBLASLT_MATMUL_TILE_24x576 = 67, + CUBLASLT_MATMUL_TILE_24x640 = 68, + CUBLASLT_MATMUL_TILE_24x704 = 69, + CUBLASLT_MATMUL_TILE_24x768 = 70, + CUBLASLT_MATMUL_TILE_32x192 = 71, + CUBLASLT_MATMUL_TILE_32x320 = 72, + CUBLASLT_MATMUL_TILE_32x384 = 73, + CUBLASLT_MATMUL_TILE_32x448 = 74, + CUBLASLT_MATMUL_TILE_32x512 = 75, + CUBLASLT_MATMUL_TILE_32x576 = 76, + CUBLASLT_MATMUL_TILE_32x640 = 77, + CUBLASLT_MATMUL_TILE_32x704 = 78, + CUBLASLT_MATMUL_TILE_32x768 = 79, + CUBLASLT_MATMUL_TILE_40x64 = 80, + CUBLASLT_MATMUL_TILE_40x128 = 81, + CUBLASLT_MATMUL_TILE_40x192 = 82, + CUBLASLT_MATMUL_TILE_40x256 = 83, + CUBLASLT_MATMUL_TILE_40x320 = 84, + CUBLASLT_MATMUL_TILE_40x384 = 85, + CUBLASLT_MATMUL_TILE_40x448 = 86, + CUBLASLT_MATMUL_TILE_40x512 = 87, + CUBLASLT_MATMUL_TILE_40x576 = 88, + CUBLASLT_MATMUL_TILE_40x640 = 89, + CUBLASLT_MATMUL_TILE_40x704 = 90, + CUBLASLT_MATMUL_TILE_40x768 = 91, + CUBLASLT_MATMUL_TILE_48x64 = 92, + CUBLASLT_MATMUL_TILE_48x128 = 93, + CUBLASLT_MATMUL_TILE_48x192 = 94, + CUBLASLT_MATMUL_TILE_48x256 = 95, + CUBLASLT_MATMUL_TILE_48x320 = 96, + CUBLASLT_MATMUL_TILE_48x384 = 97, + CUBLASLT_MATMUL_TILE_48x448 = 98, + CUBLASLT_MATMUL_TILE_48x512 = 99, + CUBLASLT_MATMUL_TILE_48x576 = 100, + CUBLASLT_MATMUL_TILE_48x640 = 101, + CUBLASLT_MATMUL_TILE_48x704 = 102, + CUBLASLT_MATMUL_TILE_48x768 = 103, + CUBLASLT_MATMUL_TILE_56x64 = 104, + CUBLASLT_MATMUL_TILE_56x128 = 105, + CUBLASLT_MATMUL_TILE_56x192 = 106, + CUBLASLT_MATMUL_TILE_56x256 = 107, + CUBLASLT_MATMUL_TILE_56x320 = 108, + CUBLASLT_MATMUL_TILE_56x384 = 109, + CUBLASLT_MATMUL_TILE_56x448 = 110, + CUBLASLT_MATMUL_TILE_56x512 = 111, + CUBLASLT_MATMUL_TILE_56x576 = 112, + CUBLASLT_MATMUL_TILE_56x640 = 113, + CUBLASLT_MATMUL_TILE_56x704 = 114, + CUBLASLT_MATMUL_TILE_56x768 = 115, + CUBLASLT_MATMUL_TILE_64x192 = 116, + CUBLASLT_MATMUL_TILE_64x320 = 117, + CUBLASLT_MATMUL_TILE_64x384 = 118, + CUBLASLT_MATMUL_TILE_64x448 = 119, + CUBLASLT_MATMUL_TILE_64x576 = 120, + CUBLASLT_MATMUL_TILE_64x640 = 121, + CUBLASLT_MATMUL_TILE_64x704 = 122, + CUBLASLT_MATMUL_TILE_64x768 = 123, + CUBLASLT_MATMUL_TILE_72x64 = 124, + CUBLASLT_MATMUL_TILE_72x128 = 125, + CUBLASLT_MATMUL_TILE_72x192 = 126, + CUBLASLT_MATMUL_TILE_72x256 = 127, + CUBLASLT_MATMUL_TILE_72x320 = 128, + CUBLASLT_MATMUL_TILE_72x384 = 129, + CUBLASLT_MATMUL_TILE_72x448 = 130, + CUBLASLT_MATMUL_TILE_72x512 = 131, + CUBLASLT_MATMUL_TILE_72x576 = 132, + CUBLASLT_MATMUL_TILE_72x640 = 133, + CUBLASLT_MATMUL_TILE_80x64 = 134, + CUBLASLT_MATMUL_TILE_80x128 = 135, + CUBLASLT_MATMUL_TILE_80x192 = 136, + CUBLASLT_MATMUL_TILE_80x256 = 137, + CUBLASLT_MATMUL_TILE_80x320 = 138, + CUBLASLT_MATMUL_TILE_80x384 = 139, + CUBLASLT_MATMUL_TILE_80x448 = 140, + CUBLASLT_MATMUL_TILE_80x512 = 141, + CUBLASLT_MATMUL_TILE_80x576 = 142, + CUBLASLT_MATMUL_TILE_88x64 = 143, + CUBLASLT_MATMUL_TILE_88x128 = 144, + CUBLASLT_MATMUL_TILE_88x192 = 145, + CUBLASLT_MATMUL_TILE_88x256 = 146, + CUBLASLT_MATMUL_TILE_88x320 = 147, + CUBLASLT_MATMUL_TILE_88x384 = 148, + CUBLASLT_MATMUL_TILE_88x448 = 149, + CUBLASLT_MATMUL_TILE_88x512 = 150, + CUBLASLT_MATMUL_TILE_96x192 = 151, + CUBLASLT_MATMUL_TILE_96x256 = 152, + CUBLASLT_MATMUL_TILE_96x320 = 153, + CUBLASLT_MATMUL_TILE_96x384 = 154, + CUBLASLT_MATMUL_TILE_96x448 = 155, + CUBLASLT_MATMUL_TILE_96x512 = 156, + CUBLASLT_MATMUL_TILE_104x64 = 157, + CUBLASLT_MATMUL_TILE_104x128 = 158, + CUBLASLT_MATMUL_TILE_104x192 = 159, + CUBLASLT_MATMUL_TILE_104x256 = 160, + CUBLASLT_MATMUL_TILE_104x320 = 161, + CUBLASLT_MATMUL_TILE_104x384 = 162, + CUBLASLT_MATMUL_TILE_104x448 = 163, + CUBLASLT_MATMUL_TILE_112x64 = 164, + CUBLASLT_MATMUL_TILE_112x128 = 165, + CUBLASLT_MATMUL_TILE_112x192 = 166, + CUBLASLT_MATMUL_TILE_112x256 = 167, + CUBLASLT_MATMUL_TILE_112x320 = 168, + CUBLASLT_MATMUL_TILE_112x384 = 169, + CUBLASLT_MATMUL_TILE_120x64 = 170, + CUBLASLT_MATMUL_TILE_120x128 = 171, + CUBLASLT_MATMUL_TILE_120x192 = 172, + CUBLASLT_MATMUL_TILE_120x256 = 173, + CUBLASLT_MATMUL_TILE_120x320 = 174, + CUBLASLT_MATMUL_TILE_120x384 = 175, + CUBLASLT_MATMUL_TILE_128x320 = 176, + CUBLASLT_MATMUL_TILE_128x384 = 177, + CUBLASLT_MATMUL_TILE_136x64 = 178, + CUBLASLT_MATMUL_TILE_136x128 = 179, + CUBLASLT_MATMUL_TILE_136x192 = 180, + CUBLASLT_MATMUL_TILE_136x256 = 181, + CUBLASLT_MATMUL_TILE_136x320 = 182, + CUBLASLT_MATMUL_TILE_144x64 = 183, + CUBLASLT_MATMUL_TILE_144x128 = 184, + CUBLASLT_MATMUL_TILE_144x192 = 185, + CUBLASLT_MATMUL_TILE_144x256 = 186, + CUBLASLT_MATMUL_TILE_144x320 = 187, + CUBLASLT_MATMUL_TILE_152x64 = 188, + CUBLASLT_MATMUL_TILE_152x128 = 189, + CUBLASLT_MATMUL_TILE_152x192 = 190, + CUBLASLT_MATMUL_TILE_152x256 = 191, + CUBLASLT_MATMUL_TILE_152x320 = 192, + CUBLASLT_MATMUL_TILE_160x64 = 193, + CUBLASLT_MATMUL_TILE_160x192 = 194, + CUBLASLT_MATMUL_TILE_160x256 = 195, + CUBLASLT_MATMUL_TILE_168x64 = 196, + CUBLASLT_MATMUL_TILE_168x128 = 197, + CUBLASLT_MATMUL_TILE_168x192 = 198, + CUBLASLT_MATMUL_TILE_168x256 = 199, + CUBLASLT_MATMUL_TILE_176x64 = 200, + CUBLASLT_MATMUL_TILE_176x128 = 201, + CUBLASLT_MATMUL_TILE_176x192 = 202, + CUBLASLT_MATMUL_TILE_176x256 = 203, + CUBLASLT_MATMUL_TILE_184x64 = 204, + CUBLASLT_MATMUL_TILE_184x128 = 205, + CUBLASLT_MATMUL_TILE_184x192 = 206, + CUBLASLT_MATMUL_TILE_184x256 = 207, + CUBLASLT_MATMUL_TILE_192x64 = 208, + CUBLASLT_MATMUL_TILE_192x192 = 209, + CUBLASLT_MATMUL_TILE_192x256 = 210, + CUBLASLT_MATMUL_TILE_200x64 = 211, + CUBLASLT_MATMUL_TILE_200x128 = 212, + CUBLASLT_MATMUL_TILE_200x192 = 213, + CUBLASLT_MATMUL_TILE_208x64 = 214, + CUBLASLT_MATMUL_TILE_208x128 = 215, + CUBLASLT_MATMUL_TILE_208x192 = 216, + CUBLASLT_MATMUL_TILE_216x64 = 217, + CUBLASLT_MATMUL_TILE_216x128 = 218, + CUBLASLT_MATMUL_TILE_216x192 = 219, + CUBLASLT_MATMUL_TILE_224x64 = 220, + CUBLASLT_MATMUL_TILE_224x128 = 221, + CUBLASLT_MATMUL_TILE_224x192 = 222, + CUBLASLT_MATMUL_TILE_232x64 = 223, + CUBLASLT_MATMUL_TILE_232x128 = 224, + CUBLASLT_MATMUL_TILE_232x192 = 225, + CUBLASLT_MATMUL_TILE_240x64 = 226, + CUBLASLT_MATMUL_TILE_240x128 = 227, + CUBLASLT_MATMUL_TILE_240x192 = 228, + CUBLASLT_MATMUL_TILE_248x64 = 229, + CUBLASLT_MATMUL_TILE_248x128 = 230, + CUBLASLT_MATMUL_TILE_248x192 = 231, + CUBLASLT_MATMUL_TILE_256x192 = 232, + CUBLASLT_MATMUL_TILE_264x64 = 233, + CUBLASLT_MATMUL_TILE_264x128 = 234, + CUBLASLT_MATMUL_TILE_272x64 = 235, + CUBLASLT_MATMUL_TILE_272x128 = 236, + CUBLASLT_MATMUL_TILE_280x64 = 237, + CUBLASLT_MATMUL_TILE_280x128 = 238, + CUBLASLT_MATMUL_TILE_288x64 = 239, + CUBLASLT_MATMUL_TILE_288x128 = 240, + CUBLASLT_MATMUL_TILE_296x64 = 241, + CUBLASLT_MATMUL_TILE_296x128 = 242, + CUBLASLT_MATMUL_TILE_304x64 = 243, + CUBLASLT_MATMUL_TILE_304x128 = 244, + CUBLASLT_MATMUL_TILE_312x64 = 245, + CUBLASLT_MATMUL_TILE_312x128 = 246, + CUBLASLT_MATMUL_TILE_320x64 = 247, + CUBLASLT_MATMUL_TILE_320x128 = 248, + CUBLASLT_MATMUL_TILE_328x64 = 249, + CUBLASLT_MATMUL_TILE_328x128 = 250, + CUBLASLT_MATMUL_TILE_336x64 = 251, + CUBLASLT_MATMUL_TILE_336x128 = 252, + CUBLASLT_MATMUL_TILE_344x64 = 253, + CUBLASLT_MATMUL_TILE_344x128 = 254, + CUBLASLT_MATMUL_TILE_352x64 = 255, + CUBLASLT_MATMUL_TILE_352x128 = 256, + CUBLASLT_MATMUL_TILE_360x64 = 257, + CUBLASLT_MATMUL_TILE_360x128 = 258, + CUBLASLT_MATMUL_TILE_368x64 = 259, + CUBLASLT_MATMUL_TILE_368x128 = 260, + CUBLASLT_MATMUL_TILE_376x64 = 261, + CUBLASLT_MATMUL_TILE_376x128 = 262, + CUBLASLT_MATMUL_TILE_384x64 = 263, + CUBLASLT_MATMUL_TILE_384x128 = 264, + CUBLASLT_MATMUL_TILE_392x64 = 265, + CUBLASLT_MATMUL_TILE_400x64 = 266, + CUBLASLT_MATMUL_TILE_408x64 = 267, + CUBLASLT_MATMUL_TILE_416x64 = 268, + CUBLASLT_MATMUL_TILE_424x64 = 269, + CUBLASLT_MATMUL_TILE_432x64 = 270, + CUBLASLT_MATMUL_TILE_440x64 = 271, + CUBLASLT_MATMUL_TILE_448x64 = 272, + CUBLASLT_MATMUL_TILE_456x64 = 273, + CUBLASLT_MATMUL_TILE_464x64 = 274, + CUBLASLT_MATMUL_TILE_472x64 = 275, + CUBLASLT_MATMUL_TILE_480x64 = 276, + CUBLASLT_MATMUL_TILE_488x64 = 277, + CUBLASLT_MATMUL_TILE_496x64 = 278, + CUBLASLT_MATMUL_TILE_504x64 = 279, + CUBLASLT_MATMUL_TILE_520x64 = 280, + CUBLASLT_MATMUL_TILE_528x64 = 281, + CUBLASLT_MATMUL_TILE_536x64 = 282, + CUBLASLT_MATMUL_TILE_544x64 = 283, + CUBLASLT_MATMUL_TILE_552x64 = 284, + CUBLASLT_MATMUL_TILE_560x64 = 285, + CUBLASLT_MATMUL_TILE_568x64 = 286, + CUBLASLT_MATMUL_TILE_576x64 = 287, + CUBLASLT_MATMUL_TILE_584x64 = 288, + CUBLASLT_MATMUL_TILE_592x64 = 289, + CUBLASLT_MATMUL_TILE_600x64 = 290, + CUBLASLT_MATMUL_TILE_608x64 = 291, + CUBLASLT_MATMUL_TILE_616x64 = 292, + CUBLASLT_MATMUL_TILE_624x64 = 293, + CUBLASLT_MATMUL_TILE_632x64 = 294, + CUBLASLT_MATMUL_TILE_640x64 = 295, + CUBLASLT_MATMUL_TILE_648x64 = 296, + CUBLASLT_MATMUL_TILE_656x64 = 297, + CUBLASLT_MATMUL_TILE_664x64 = 298, + CUBLASLT_MATMUL_TILE_672x64 = 299, + CUBLASLT_MATMUL_TILE_680x64 = 300, + CUBLASLT_MATMUL_TILE_688x64 = 301, + CUBLASLT_MATMUL_TILE_696x64 = 302, + CUBLASLT_MATMUL_TILE_704x64 = 303, + CUBLASLT_MATMUL_TILE_712x64 = 304, + CUBLASLT_MATMUL_TILE_720x64 = 305, + CUBLASLT_MATMUL_TILE_728x64 = 306, + CUBLASLT_MATMUL_TILE_736x64 = 307, + CUBLASLT_MATMUL_TILE_744x64 = 308, + CUBLASLT_MATMUL_TILE_752x64 = 309, + CUBLASLT_MATMUL_TILE_760x64 = 310, + CUBLASLT_MATMUL_TILE_768x64 = 311, + CUBLASLT_MATMUL_TILE_64x16 = 312, + CUBLASLT_MATMUL_TILE_64x24 = 313, + CUBLASLT_MATMUL_TILE_64x40 = 314, + CUBLASLT_MATMUL_TILE_64x48 = 315, + CUBLASLT_MATMUL_TILE_64x56 = 316, + CUBLASLT_MATMUL_TILE_64x72 = 317, + CUBLASLT_MATMUL_TILE_64x80 = 318, + CUBLASLT_MATMUL_TILE_64x88 = 319, + CUBLASLT_MATMUL_TILE_64x104 = 320, + CUBLASLT_MATMUL_TILE_64x112 = 321, + CUBLASLT_MATMUL_TILE_64x120 = 322, + CUBLASLT_MATMUL_TILE_64x136 = 323, + CUBLASLT_MATMUL_TILE_64x144 = 324, + CUBLASLT_MATMUL_TILE_64x152 = 325, + CUBLASLT_MATMUL_TILE_64x160 = 326, + CUBLASLT_MATMUL_TILE_64x168 = 327, + CUBLASLT_MATMUL_TILE_64x176 = 328, + CUBLASLT_MATMUL_TILE_64x184 = 329, + CUBLASLT_MATMUL_TILE_64x200 = 330, + CUBLASLT_MATMUL_TILE_64x208 = 331, + CUBLASLT_MATMUL_TILE_64x216 = 332, + CUBLASLT_MATMUL_TILE_64x224 = 333, + CUBLASLT_MATMUL_TILE_64x232 = 334, + CUBLASLT_MATMUL_TILE_64x240 = 335, + CUBLASLT_MATMUL_TILE_64x248 = 336, + CUBLASLT_MATMUL_TILE_64x264 = 337, + CUBLASLT_MATMUL_TILE_64x272 = 338, + CUBLASLT_MATMUL_TILE_64x280 = 339, + CUBLASLT_MATMUL_TILE_64x288 = 340, + CUBLASLT_MATMUL_TILE_64x296 = 341, + CUBLASLT_MATMUL_TILE_64x304 = 342, + CUBLASLT_MATMUL_TILE_64x312 = 343, + CUBLASLT_MATMUL_TILE_64x328 = 344, + CUBLASLT_MATMUL_TILE_64x336 = 345, + CUBLASLT_MATMUL_TILE_64x344 = 346, + CUBLASLT_MATMUL_TILE_64x352 = 347, + CUBLASLT_MATMUL_TILE_64x360 = 348, + CUBLASLT_MATMUL_TILE_64x368 = 349, + CUBLASLT_MATMUL_TILE_64x376 = 350, + CUBLASLT_MATMUL_TILE_64x392 = 351, + CUBLASLT_MATMUL_TILE_64x400 = 352, + CUBLASLT_MATMUL_TILE_64x408 = 353, + CUBLASLT_MATMUL_TILE_64x416 = 354, + CUBLASLT_MATMUL_TILE_64x424 = 355, + CUBLASLT_MATMUL_TILE_64x432 = 356, + CUBLASLT_MATMUL_TILE_64x440 = 357, + CUBLASLT_MATMUL_TILE_64x456 = 358, + CUBLASLT_MATMUL_TILE_64x464 = 359, + CUBLASLT_MATMUL_TILE_64x472 = 360, + CUBLASLT_MATMUL_TILE_64x480 = 361, + CUBLASLT_MATMUL_TILE_64x488 = 362, + CUBLASLT_MATMUL_TILE_64x496 = 363, + CUBLASLT_MATMUL_TILE_64x504 = 364, + CUBLASLT_MATMUL_TILE_64x520 = 365, + CUBLASLT_MATMUL_TILE_64x528 = 366, + CUBLASLT_MATMUL_TILE_64x536 = 367, + CUBLASLT_MATMUL_TILE_64x544 = 368, + CUBLASLT_MATMUL_TILE_64x552 = 369, + CUBLASLT_MATMUL_TILE_64x560 = 370, + CUBLASLT_MATMUL_TILE_64x568 = 371, + CUBLASLT_MATMUL_TILE_64x584 = 372, + CUBLASLT_MATMUL_TILE_64x592 = 373, + CUBLASLT_MATMUL_TILE_64x600 = 374, + CUBLASLT_MATMUL_TILE_64x608 = 375, + CUBLASLT_MATMUL_TILE_64x616 = 376, + CUBLASLT_MATMUL_TILE_64x624 = 377, + CUBLASLT_MATMUL_TILE_64x632 = 378, + CUBLASLT_MATMUL_TILE_64x648 = 379, + CUBLASLT_MATMUL_TILE_64x656 = 380, + CUBLASLT_MATMUL_TILE_64x664 = 381, + CUBLASLT_MATMUL_TILE_64x672 = 382, + CUBLASLT_MATMUL_TILE_64x680 = 383, + CUBLASLT_MATMUL_TILE_64x688 = 384, + CUBLASLT_MATMUL_TILE_64x696 = 385, + CUBLASLT_MATMUL_TILE_64x712 = 386, + CUBLASLT_MATMUL_TILE_64x720 = 387, + CUBLASLT_MATMUL_TILE_64x728 = 388, + CUBLASLT_MATMUL_TILE_64x736 = 389, + CUBLASLT_MATMUL_TILE_64x744 = 390, + CUBLASLT_MATMUL_TILE_64x752 = 391, + CUBLASLT_MATMUL_TILE_64x760 = 392, + CUBLASLT_MATMUL_TILE_128x8 = 393, + CUBLASLT_MATMUL_TILE_128x16 = 394, + CUBLASLT_MATMUL_TILE_128x24 = 395, + CUBLASLT_MATMUL_TILE_128x40 = 396, + CUBLASLT_MATMUL_TILE_128x48 = 397, + CUBLASLT_MATMUL_TILE_128x56 = 398, + CUBLASLT_MATMUL_TILE_128x72 = 399, + CUBLASLT_MATMUL_TILE_128x80 = 400, + CUBLASLT_MATMUL_TILE_128x88 = 401, + CUBLASLT_MATMUL_TILE_128x104 = 402, + CUBLASLT_MATMUL_TILE_128x112 = 403, + CUBLASLT_MATMUL_TILE_128x120 = 404, + CUBLASLT_MATMUL_TILE_128x136 = 405, + CUBLASLT_MATMUL_TILE_128x144 = 406, + CUBLASLT_MATMUL_TILE_128x152 = 407, + CUBLASLT_MATMUL_TILE_128x168 = 408, + CUBLASLT_MATMUL_TILE_128x176 = 409, + CUBLASLT_MATMUL_TILE_128x184 = 410, + CUBLASLT_MATMUL_TILE_128x200 = 411, + CUBLASLT_MATMUL_TILE_128x208 = 412, + CUBLASLT_MATMUL_TILE_128x216 = 413, + CUBLASLT_MATMUL_TILE_128x224 = 414, + CUBLASLT_MATMUL_TILE_128x232 = 415, + CUBLASLT_MATMUL_TILE_128x240 = 416, + CUBLASLT_MATMUL_TILE_128x248 = 417, + CUBLASLT_MATMUL_TILE_128x264 = 418, + CUBLASLT_MATMUL_TILE_128x272 = 419, + CUBLASLT_MATMUL_TILE_128x280 = 420, + CUBLASLT_MATMUL_TILE_128x288 = 421, + CUBLASLT_MATMUL_TILE_128x296 = 422, + CUBLASLT_MATMUL_TILE_128x304 = 423, + CUBLASLT_MATMUL_TILE_128x312 = 424, + CUBLASLT_MATMUL_TILE_128x328 = 425, + CUBLASLT_MATMUL_TILE_128x336 = 426, + CUBLASLT_MATMUL_TILE_128x344 = 427, + CUBLASLT_MATMUL_TILE_128x352 = 428, + CUBLASLT_MATMUL_TILE_128x360 = 429, + CUBLASLT_MATMUL_TILE_128x368 = 430, + CUBLASLT_MATMUL_TILE_128x376 = 431, + CUBLASLT_MATMUL_TILE_128x392 = 432, + CUBLASLT_MATMUL_TILE_128x400 = 433, + CUBLASLT_MATMUL_TILE_128x408 = 434, + CUBLASLT_MATMUL_TILE_128x416 = 435, + CUBLASLT_MATMUL_TILE_128x424 = 436, + CUBLASLT_MATMUL_TILE_128x432 = 437, + CUBLASLT_MATMUL_TILE_128x440 = 438, + CUBLASLT_MATMUL_TILE_128x448 = 439, + CUBLASLT_MATMUL_TILE_128x456 = 440, + CUBLASLT_MATMUL_TILE_128x464 = 441, + CUBLASLT_MATMUL_TILE_128x472 = 442, + CUBLASLT_MATMUL_TILE_128x480 = 443, + CUBLASLT_MATMUL_TILE_128x488 = 444, + CUBLASLT_MATMUL_TILE_128x496 = 445, + CUBLASLT_MATMUL_TILE_128x504 = 446, + CUBLASLT_MATMUL_TILE_128x512 = 447, + CUBLASLT_MATMUL_TILE_192x8 = 448, + CUBLASLT_MATMUL_TILE_192x16 = 449, + CUBLASLT_MATMUL_TILE_192x24 = 450, + CUBLASLT_MATMUL_TILE_192x32 = 451, + CUBLASLT_MATMUL_TILE_192x40 = 452, + CUBLASLT_MATMUL_TILE_192x48 = 453, + CUBLASLT_MATMUL_TILE_192x56 = 454, + CUBLASLT_MATMUL_TILE_192x72 = 455, + CUBLASLT_MATMUL_TILE_192x80 = 456, + CUBLASLT_MATMUL_TILE_192x88 = 457, + CUBLASLT_MATMUL_TILE_192x96 = 458, + CUBLASLT_MATMUL_TILE_192x104 = 459, + CUBLASLT_MATMUL_TILE_192x112 = 460, + CUBLASLT_MATMUL_TILE_192x120 = 461, + CUBLASLT_MATMUL_TILE_192x136 = 462, + CUBLASLT_MATMUL_TILE_192x144 = 463, + CUBLASLT_MATMUL_TILE_192x152 = 464, + CUBLASLT_MATMUL_TILE_192x160 = 465, + CUBLASLT_MATMUL_TILE_192x168 = 466, + CUBLASLT_MATMUL_TILE_192x176 = 467, + CUBLASLT_MATMUL_TILE_192x184 = 468, + CUBLASLT_MATMUL_TILE_192x200 = 469, + CUBLASLT_MATMUL_TILE_192x208 = 470, + CUBLASLT_MATMUL_TILE_192x216 = 471, + CUBLASLT_MATMUL_TILE_192x224 = 472, + CUBLASLT_MATMUL_TILE_192x232 = 473, + CUBLASLT_MATMUL_TILE_192x240 = 474, + CUBLASLT_MATMUL_TILE_192x248 = 475, + CUBLASLT_MATMUL_TILE_192x264 = 476, + CUBLASLT_MATMUL_TILE_192x272 = 477, + CUBLASLT_MATMUL_TILE_192x280 = 478, + CUBLASLT_MATMUL_TILE_192x288 = 479, + CUBLASLT_MATMUL_TILE_192x296 = 480, + CUBLASLT_MATMUL_TILE_192x304 = 481, + CUBLASLT_MATMUL_TILE_192x312 = 482, + CUBLASLT_MATMUL_TILE_192x320 = 483, + CUBLASLT_MATMUL_TILE_192x328 = 484, + CUBLASLT_MATMUL_TILE_192x336 = 485, + CUBLASLT_MATMUL_TILE_256x8 = 486, + CUBLASLT_MATMUL_TILE_256x16 = 487, + CUBLASLT_MATMUL_TILE_256x24 = 488, + CUBLASLT_MATMUL_TILE_256x40 = 489, + CUBLASLT_MATMUL_TILE_256x48 = 490, + CUBLASLT_MATMUL_TILE_256x56 = 491, + CUBLASLT_MATMUL_TILE_256x72 = 492, + CUBLASLT_MATMUL_TILE_256x80 = 493, + CUBLASLT_MATMUL_TILE_256x88 = 494, + CUBLASLT_MATMUL_TILE_256x96 = 495, + CUBLASLT_MATMUL_TILE_256x104 = 496, + CUBLASLT_MATMUL_TILE_256x112 = 497, + CUBLASLT_MATMUL_TILE_256x120 = 498, + CUBLASLT_MATMUL_TILE_256x136 = 499, + CUBLASLT_MATMUL_TILE_256x144 = 500, + CUBLASLT_MATMUL_TILE_256x152 = 501, + CUBLASLT_MATMUL_TILE_256x160 = 502, + CUBLASLT_MATMUL_TILE_256x168 = 503, + CUBLASLT_MATMUL_TILE_256x176 = 504, + CUBLASLT_MATMUL_TILE_256x184 = 505, + CUBLASLT_MATMUL_TILE_256x200 = 506, + CUBLASLT_MATMUL_TILE_256x208 = 507, + CUBLASLT_MATMUL_TILE_256x216 = 508, + CUBLASLT_MATMUL_TILE_256x224 = 509, + CUBLASLT_MATMUL_TILE_256x232 = 510, + CUBLASLT_MATMUL_TILE_256x240 = 511, + CUBLASLT_MATMUL_TILE_256x248 = 512, + CUBLASLT_MATMUL_TILE_256x256 = 513, + CUBLASLT_MATMUL_TILE_320x8 = 514, + CUBLASLT_MATMUL_TILE_320x16 = 515, + CUBLASLT_MATMUL_TILE_320x24 = 516, + CUBLASLT_MATMUL_TILE_320x32 = 517, + CUBLASLT_MATMUL_TILE_320x40 = 518, + CUBLASLT_MATMUL_TILE_320x48 = 519, + CUBLASLT_MATMUL_TILE_320x56 = 520, + CUBLASLT_MATMUL_TILE_320x72 = 521, + CUBLASLT_MATMUL_TILE_320x80 = 522, + CUBLASLT_MATMUL_TILE_320x88 = 523, + CUBLASLT_MATMUL_TILE_320x96 = 524, + CUBLASLT_MATMUL_TILE_320x104 = 525, + CUBLASLT_MATMUL_TILE_320x112 = 526, + CUBLASLT_MATMUL_TILE_320x120 = 527, + CUBLASLT_MATMUL_TILE_320x136 = 528, + CUBLASLT_MATMUL_TILE_320x144 = 529, + CUBLASLT_MATMUL_TILE_320x152 = 530, + CUBLASLT_MATMUL_TILE_320x160 = 531, + CUBLASLT_MATMUL_TILE_320x168 = 532, + CUBLASLT_MATMUL_TILE_320x176 = 533, + CUBLASLT_MATMUL_TILE_320x184 = 534, + CUBLASLT_MATMUL_TILE_320x192 = 535, + CUBLASLT_MATMUL_TILE_320x200 = 536, + CUBLASLT_MATMUL_TILE_384x8 = 537, + CUBLASLT_MATMUL_TILE_384x16 = 538, + CUBLASLT_MATMUL_TILE_384x24 = 539, + CUBLASLT_MATMUL_TILE_384x32 = 540, + CUBLASLT_MATMUL_TILE_384x40 = 541, + CUBLASLT_MATMUL_TILE_384x48 = 542, + CUBLASLT_MATMUL_TILE_384x56 = 543, + CUBLASLT_MATMUL_TILE_384x72 = 544, + CUBLASLT_MATMUL_TILE_384x80 = 545, + CUBLASLT_MATMUL_TILE_384x88 = 546, + CUBLASLT_MATMUL_TILE_384x96 = 547, + CUBLASLT_MATMUL_TILE_384x104 = 548, + CUBLASLT_MATMUL_TILE_384x112 = 549, + CUBLASLT_MATMUL_TILE_384x120 = 550, + CUBLASLT_MATMUL_TILE_384x136 = 551, + CUBLASLT_MATMUL_TILE_384x144 = 552, + CUBLASLT_MATMUL_TILE_384x152 = 553, + CUBLASLT_MATMUL_TILE_384x160 = 554, + CUBLASLT_MATMUL_TILE_384x168 = 555, + CUBLASLT_MATMUL_TILE_448x8 = 556, + CUBLASLT_MATMUL_TILE_448x16 = 557, + CUBLASLT_MATMUL_TILE_448x24 = 558, + CUBLASLT_MATMUL_TILE_448x32 = 559, + CUBLASLT_MATMUL_TILE_448x40 = 560, + CUBLASLT_MATMUL_TILE_448x48 = 561, + CUBLASLT_MATMUL_TILE_448x56 = 562, + CUBLASLT_MATMUL_TILE_448x72 = 563, + CUBLASLT_MATMUL_TILE_448x80 = 564, + CUBLASLT_MATMUL_TILE_448x88 = 565, + CUBLASLT_MATMUL_TILE_448x96 = 566, + CUBLASLT_MATMUL_TILE_448x104 = 567, + CUBLASLT_MATMUL_TILE_448x112 = 568, + CUBLASLT_MATMUL_TILE_448x120 = 569, + CUBLASLT_MATMUL_TILE_448x128 = 570, + CUBLASLT_MATMUL_TILE_448x136 = 571, + CUBLASLT_MATMUL_TILE_448x144 = 572, + CUBLASLT_MATMUL_TILE_512x8 = 573, + CUBLASLT_MATMUL_TILE_512x16 = 574, + CUBLASLT_MATMUL_TILE_512x24 = 575, + CUBLASLT_MATMUL_TILE_512x32 = 576, + CUBLASLT_MATMUL_TILE_512x40 = 577, + CUBLASLT_MATMUL_TILE_512x48 = 578, + CUBLASLT_MATMUL_TILE_512x56 = 579, + CUBLASLT_MATMUL_TILE_512x72 = 580, + CUBLASLT_MATMUL_TILE_512x80 = 581, + CUBLASLT_MATMUL_TILE_512x88 = 582, + CUBLASLT_MATMUL_TILE_512x96 = 583, + CUBLASLT_MATMUL_TILE_512x104 = 584, + CUBLASLT_MATMUL_TILE_512x112 = 585, + CUBLASLT_MATMUL_TILE_512x120 = 586, + CUBLASLT_MATMUL_TILE_512x128 = 587, + CUBLASLT_MATMUL_TILE_576x8 = 588, + CUBLASLT_MATMUL_TILE_576x16 = 589, + CUBLASLT_MATMUL_TILE_576x24 = 590, + CUBLASLT_MATMUL_TILE_576x32 = 591, + CUBLASLT_MATMUL_TILE_576x40 = 592, + CUBLASLT_MATMUL_TILE_576x48 = 593, + CUBLASLT_MATMUL_TILE_576x56 = 594, + CUBLASLT_MATMUL_TILE_576x72 = 595, + CUBLASLT_MATMUL_TILE_576x80 = 596, + CUBLASLT_MATMUL_TILE_576x88 = 597, + CUBLASLT_MATMUL_TILE_576x96 = 598, + CUBLASLT_MATMUL_TILE_576x104 = 599, + CUBLASLT_MATMUL_TILE_576x112 = 600, + CUBLASLT_MATMUL_TILE_640x8 = 601, + CUBLASLT_MATMUL_TILE_640x16 = 602, + CUBLASLT_MATMUL_TILE_640x24 = 603, + CUBLASLT_MATMUL_TILE_640x32 = 604, + CUBLASLT_MATMUL_TILE_640x40 = 605, + CUBLASLT_MATMUL_TILE_640x48 = 606, + CUBLASLT_MATMUL_TILE_640x56 = 607, + CUBLASLT_MATMUL_TILE_640x72 = 608, + CUBLASLT_MATMUL_TILE_640x80 = 609, + CUBLASLT_MATMUL_TILE_640x88 = 610, + CUBLASLT_MATMUL_TILE_640x96 = 611, + CUBLASLT_MATMUL_TILE_704x8 = 612, + CUBLASLT_MATMUL_TILE_704x16 = 613, + CUBLASLT_MATMUL_TILE_704x24 = 614, + CUBLASLT_MATMUL_TILE_704x32 = 615, + CUBLASLT_MATMUL_TILE_704x40 = 616, + CUBLASLT_MATMUL_TILE_704x48 = 617, + CUBLASLT_MATMUL_TILE_704x56 = 618, + CUBLASLT_MATMUL_TILE_704x72 = 619, + CUBLASLT_MATMUL_TILE_704x80 = 620, + CUBLASLT_MATMUL_TILE_704x88 = 621, + CUBLASLT_MATMUL_TILE_768x8 = 622, + CUBLASLT_MATMUL_TILE_768x16 = 623, + CUBLASLT_MATMUL_TILE_768x24 = 624, + CUBLASLT_MATMUL_TILE_768x32 = 625, + CUBLASLT_MATMUL_TILE_768x40 = 626, + CUBLASLT_MATMUL_TILE_768x48 = 627, + CUBLASLT_MATMUL_TILE_768x56 = 628, + CUBLASLT_MATMUL_TILE_768x72 = 629, + CUBLASLT_MATMUL_TILE_768x80 = 630, + CUBLASLT_MATMUL_TILE_END +} cublasLtMatmulTile_t; + +/** Size and number of stages in which elements are read into shared memory + * + * General order of stages IDs is sorted by stage size first and by number of stages second. + */ +typedef enum { + CUBLASLT_MATMUL_STAGES_UNDEFINED = 0, + CUBLASLT_MATMUL_STAGES_16x1 = 1, + CUBLASLT_MATMUL_STAGES_16x2 = 2, + CUBLASLT_MATMUL_STAGES_16x3 = 3, + CUBLASLT_MATMUL_STAGES_16x4 = 4, + CUBLASLT_MATMUL_STAGES_16x5 = 5, + CUBLASLT_MATMUL_STAGES_16x6 = 6, + CUBLASLT_MATMUL_STAGES_32x1 = 7, + CUBLASLT_MATMUL_STAGES_32x2 = 8, + CUBLASLT_MATMUL_STAGES_32x3 = 9, + CUBLASLT_MATMUL_STAGES_32x4 = 10, + CUBLASLT_MATMUL_STAGES_32x5 = 11, + CUBLASLT_MATMUL_STAGES_32x6 = 12, + CUBLASLT_MATMUL_STAGES_64x1 = 13, + CUBLASLT_MATMUL_STAGES_64x2 = 14, + CUBLASLT_MATMUL_STAGES_64x3 = 15, + CUBLASLT_MATMUL_STAGES_64x4 = 16, + CUBLASLT_MATMUL_STAGES_64x5 = 17, + CUBLASLT_MATMUL_STAGES_64x6 = 18, + CUBLASLT_MATMUL_STAGES_128x1 = 19, + CUBLASLT_MATMUL_STAGES_128x2 = 20, + CUBLASLT_MATMUL_STAGES_128x3 = 21, + CUBLASLT_MATMUL_STAGES_128x4 = 22, + CUBLASLT_MATMUL_STAGES_128x5 = 23, + CUBLASLT_MATMUL_STAGES_128x6 = 24, + CUBLASLT_MATMUL_STAGES_32x10 = 25, + CUBLASLT_MATMUL_STAGES_8x4 = 26, + CUBLASLT_MATMUL_STAGES_16x10 = 27, + CUBLASLT_MATMUL_STAGES_8x5 = 28, + CUBLASLT_MATMUL_STAGES_8x3 = 31, + CUBLASLT_MATMUL_STAGES_8xAUTO = 32, + CUBLASLT_MATMUL_STAGES_16xAUTO = 33, + CUBLASLT_MATMUL_STAGES_32xAUTO = 34, + CUBLASLT_MATMUL_STAGES_64xAUTO = 35, + CUBLASLT_MATMUL_STAGES_128xAUTO = 36, + CUBLASLT_MATMUL_STAGES_END +} cublasLtMatmulStages_t; + +/** Thread Block Cluster size + * + * Typically dimensioned similar to cublasLtMatmulTile_t, with the third coordinate unused at this time. + */ +typedef enum { + /** Let library pick cluster shape automatically */ + CUBLASLT_CLUSTER_SHAPE_AUTO = 0, + CUBLASLT_CLUSTER_SHAPE_1x1x1 = 2, + CUBLASLT_CLUSTER_SHAPE_2x1x1 = 3, + CUBLASLT_CLUSTER_SHAPE_4x1x1 = 4, + CUBLASLT_CLUSTER_SHAPE_1x2x1 = 5, + CUBLASLT_CLUSTER_SHAPE_2x2x1 = 6, + CUBLASLT_CLUSTER_SHAPE_4x2x1 = 7, + CUBLASLT_CLUSTER_SHAPE_1x4x1 = 8, + CUBLASLT_CLUSTER_SHAPE_2x4x1 = 9, + CUBLASLT_CLUSTER_SHAPE_4x4x1 = 10, + CUBLASLT_CLUSTER_SHAPE_8x1x1 = 11, + CUBLASLT_CLUSTER_SHAPE_1x8x1 = 12, + CUBLASLT_CLUSTER_SHAPE_8x2x1 = 13, + CUBLASLT_CLUSTER_SHAPE_2x8x1 = 14, + CUBLASLT_CLUSTER_SHAPE_16x1x1 = 15, + CUBLASLT_CLUSTER_SHAPE_1x16x1 = 16, + CUBLASLT_CLUSTER_SHAPE_3x1x1 = 17, + CUBLASLT_CLUSTER_SHAPE_5x1x1 = 18, + CUBLASLT_CLUSTER_SHAPE_6x1x1 = 19, + CUBLASLT_CLUSTER_SHAPE_7x1x1 = 20, + CUBLASLT_CLUSTER_SHAPE_9x1x1 = 21, + CUBLASLT_CLUSTER_SHAPE_10x1x1 = 22, + CUBLASLT_CLUSTER_SHAPE_11x1x1 = 23, + CUBLASLT_CLUSTER_SHAPE_12x1x1 = 24, + CUBLASLT_CLUSTER_SHAPE_13x1x1 = 25, + CUBLASLT_CLUSTER_SHAPE_14x1x1 = 26, + CUBLASLT_CLUSTER_SHAPE_15x1x1 = 27, + CUBLASLT_CLUSTER_SHAPE_3x2x1 = 28, + CUBLASLT_CLUSTER_SHAPE_5x2x1 = 29, + CUBLASLT_CLUSTER_SHAPE_6x2x1 = 30, + CUBLASLT_CLUSTER_SHAPE_7x2x1 = 31, + CUBLASLT_CLUSTER_SHAPE_1x3x1 = 32, + CUBLASLT_CLUSTER_SHAPE_2x3x1 = 33, + CUBLASLT_CLUSTER_SHAPE_3x3x1 = 34, + CUBLASLT_CLUSTER_SHAPE_4x3x1 = 35, + CUBLASLT_CLUSTER_SHAPE_5x3x1 = 36, + CUBLASLT_CLUSTER_SHAPE_3x4x1 = 37, + CUBLASLT_CLUSTER_SHAPE_1x5x1 = 38, + CUBLASLT_CLUSTER_SHAPE_2x5x1 = 39, + CUBLASLT_CLUSTER_SHAPE_3x5x1 = 40, + CUBLASLT_CLUSTER_SHAPE_1x6x1 = 41, + CUBLASLT_CLUSTER_SHAPE_2x6x1 = 42, + CUBLASLT_CLUSTER_SHAPE_1x7x1 = 43, + CUBLASLT_CLUSTER_SHAPE_2x7x1 = 44, + CUBLASLT_CLUSTER_SHAPE_1x9x1 = 45, + CUBLASLT_CLUSTER_SHAPE_1x10x1 = 46, + CUBLASLT_CLUSTER_SHAPE_1x11x1 = 47, + CUBLASLT_CLUSTER_SHAPE_1x12x1 = 48, + CUBLASLT_CLUSTER_SHAPE_1x13x1 = 49, + CUBLASLT_CLUSTER_SHAPE_1x14x1 = 50, + CUBLASLT_CLUSTER_SHAPE_1x15x1 = 51, + CUBLASLT_CLUSTER_SHAPE_END +} cublasLtClusterShape_t; + +/** Inner size of the kernel + * + * Represents various aspects of internal kernel design, that don't impact CUDA grid size but may have other more subtle + * effects. + * + */ +typedef enum { + CUBLASLT_MATMUL_INNER_SHAPE_UNDEFINED = 0, + CUBLASLT_MATMUL_INNER_SHAPE_MMA884 = 1, + CUBLASLT_MATMUL_INNER_SHAPE_MMA1684 = 2, + CUBLASLT_MATMUL_INNER_SHAPE_MMA1688 = 3, + CUBLASLT_MATMUL_INNER_SHAPE_MMA16816 = 4, + CUBLASLT_MATMUL_INNER_SHAPE_END +} cublasLtMatmulInnerShape_t; + +/** Pointer mode to use for alpha/beta */ +typedef enum { + /** matches CUBLAS_POINTER_MODE_HOST, pointer targets a single value host memory */ + CUBLASLT_POINTER_MODE_HOST = CUBLAS_POINTER_MODE_HOST, + /** matches CUBLAS_POINTER_MODE_DEVICE, pointer targets a single value device memory */ + CUBLASLT_POINTER_MODE_DEVICE = CUBLAS_POINTER_MODE_DEVICE, + /** pointer targets an array in device memory */ + CUBLASLT_POINTER_MODE_DEVICE_VECTOR = 2, + /** alpha pointer targets an array in device memory, beta is zero. Note: + CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE is not supported, must be 0. */ + CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO = 3, + /** alpha pointer targets an array in device memory, beta is a single value in host memory. */ + CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST = 4, +} cublasLtPointerMode_t; + +/** Mask to define pointer mode capability */ +typedef enum { + /** see CUBLASLT_POINTER_MODE_HOST */ + CUBLASLT_POINTER_MODE_MASK_HOST = 1, + /** see CUBLASLT_POINTER_MODE_DEVICE */ + CUBLASLT_POINTER_MODE_MASK_DEVICE = 2, + /** see CUBLASLT_POINTER_MODE_DEVICE_VECTOR */ + CUBLASLT_POINTER_MODE_MASK_DEVICE_VECTOR = 4, + /** see CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO */ + CUBLASLT_POINTER_MODE_MASK_ALPHA_DEVICE_VECTOR_BETA_ZERO = 8, + /** see CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST */ + CUBLASLT_POINTER_MODE_MASK_ALPHA_DEVICE_VECTOR_BETA_HOST = 16, +} cublasLtPointerModeMask_t; + +/** Implementation details that may affect numerical behavior of algorithms. */ +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_FMA (0x01ull << 0) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_HMMA (0x02ull << 0) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_IMMA (0x04ull << 0) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_DMMA (0x08ull << 0) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_TENSOR_OP_MASK (0xfeull << 0) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_OP_TYPE_MASK (0xffull << 0) + +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_16F (0x01ull << 8) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_32F (0x02ull << 8) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_64F (0x04ull << 8) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_32I (0x08ull << 8) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_TYPE_MASK (0xffull << 8) + +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_16F (0x01ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_16BF (0x02ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_TF32 (0x04ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_32F (0x08ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_64F (0x10ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8I (0x20ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8F_E4M3 (0x40ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8F_E5M2 (0x80ull << 16) +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_OP_INPUT_TYPE_MASK (0xffull << 16) + +#define CUBLASLT_NUMERICAL_IMPL_FLAGS_GAUSSIAN (0x01ull << 32) +typedef uint64_t cublasLtNumericalImplFlags_t; + +/** Execute matrix multiplication (D = alpha * op(A) * op(B) + beta * C). + * + * \retval CUBLAS_STATUS_NOT_INITIALIZED if cuBLASLt handle has not been initialized + * \retval CUBLAS_STATUS_INVALID_VALUE if parameters are in conflict or in an impossible configuration; e.g. + * when workspaceSizeInBytes is less than workspace required by configured + * algo + * \retval CUBLAS_STATUS_NOT_SUPPORTED if current implementation on selected device doesn't support configured + * operation + * \retval CUBLAS_STATUS_ARCH_MISMATCH if configured operation cannot be run using selected device + * \retval CUBLAS_STATUS_EXECUTION_FAILED if cuda reported execution error from the device + * \retval CUBLAS_STATUS_SUCCESS if the operation completed successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmul(cublasLtHandle_t lightHandle, + cublasLtMatmulDesc_t computeDesc, + const void* alpha, /* host or device pointer */ + const void* A, + cublasLtMatrixLayout_t Adesc, + const void* B, + cublasLtMatrixLayout_t Bdesc, + const void* beta, /* host or device pointer */ + const void* C, + cublasLtMatrixLayout_t Cdesc, + void* D, + cublasLtMatrixLayout_t Ddesc, + const cublasLtMatmulAlgo_t* algo, + void* workspace, + size_t workspaceSizeInBytes, + cudaStream_t stream); + +/** Matrix layout conversion helper (C = alpha * op(A) + beta * op(B)) + * + * Can be used to change memory order of data or to scale and shift the values. + * + * \retval CUBLAS_STATUS_NOT_INITIALIZED if cuBLASLt handle has not been initialized + * \retval CUBLAS_STATUS_INVALID_VALUE if parameters are in conflict or in an impossible configuration; e.g. + * when A is not NULL, but Adesc is NULL + * \retval CUBLAS_STATUS_NOT_SUPPORTED if current implementation on selected device doesn't support configured + * operation + * \retval CUBLAS_STATUS_ARCH_MISMATCH if configured operation cannot be run using selected device + * \retval CUBLAS_STATUS_EXECUTION_FAILED if cuda reported execution error from the device + * \retval CUBLAS_STATUS_SUCCESS if the operation completed successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixTransform(cublasLtHandle_t lightHandle, + cublasLtMatrixTransformDesc_t transformDesc, + const void* alpha, /* host or device pointer */ + const void* A, + cublasLtMatrixLayout_t Adesc, + const void* beta, /* host or device pointer */ + const void* B, + cublasLtMatrixLayout_t Bdesc, + void* C, + cublasLtMatrixLayout_t Cdesc, + cudaStream_t stream); + +/* ---------------------------------------------------------------------------------------*/ +/* Helper functions for cublasLtMatrixLayout_t */ +/* ---------------------------------------------------------------------------------------*/ + +/** Enum for data ordering */ +typedef enum { + /** Column-major + * + * Leading dimension is the stride (in elements) to the beginning of next column in memory. + */ + CUBLASLT_ORDER_COL = 0, + /** Row major + * + * Leading dimension is the stride (in elements) to the beginning of next row in memory. + */ + CUBLASLT_ORDER_ROW = 1, + /** Column-major ordered tiles of 32 columns. + * + * Leading dimension is the stride (in elements) to the beginning of next group of 32-columns. E.g. if matrix has 33 + * columns and 2 rows, ld must be at least (32) * 2 = 64. + */ + CUBLASLT_ORDER_COL32 = 2, + /** Column-major ordered tiles of composite tiles with total 32 columns and 8 rows, tile composed of interleaved + * inner tiles of 4 columns within 4 even or odd rows in an alternating pattern. + * + * Leading dimension is the stride (in elements) to the beginning of the first 32 column x 8 row tile for the next + * 32-wide group of columns. E.g. if matrix has 33 columns and 1 row, ld must be at least (32 * 8) * 1 = 256. + */ + CUBLASLT_ORDER_COL4_4R2_8C = 3, + /** Column-major ordered tiles of composite tiles with total 32 columns ands 32 rows. + * Element offset within the tile is calculated as (((row%8)/2*4+row/8)*2+row%2)*32+col. + * + * Leading dimension is the stride (in elements) to the beginning of the first 32 column x 32 row tile for the next + * 32-wide group of columns. E.g. if matrix has 33 columns and 1 row, ld must be at least (32*32)*1 = 1024. + */ + CUBLASLT_ORDER_COL32_2R_4R4 = 4, + +} cublasLtOrder_t; + +/** Attributes of memory layout */ +typedef enum { + /** Data type, see cudaDataType. + * + * uint32_t + */ + CUBLASLT_MATRIX_LAYOUT_TYPE = 0, + + /** Memory order of the data, see cublasLtOrder_t. + * + * int32_t, default: CUBLASLT_ORDER_COL + */ + CUBLASLT_MATRIX_LAYOUT_ORDER = 1, + + /** Number of rows. + * + * Usually only values that can be expressed as int32_t are supported. + * + * uint64_t + */ + CUBLASLT_MATRIX_LAYOUT_ROWS = 2, + + /** Number of columns. + * + * Usually only values that can be expressed as int32_t are supported. + * + * uint64_t + */ + CUBLASLT_MATRIX_LAYOUT_COLS = 3, + + /** Matrix leading dimension. + * + * For CUBLASLT_ORDER_COL this is stride (in elements) of matrix column, for more details and documentation for + * other memory orders see documentation for cublasLtOrder_t values. + * + * Currently only non-negative values are supported, must be large enough so that matrix memory locations are not + * overlapping (e.g. greater or equal to CUBLASLT_MATRIX_LAYOUT_ROWS in case of CUBLASLT_ORDER_COL). + * + * int64_t; + */ + CUBLASLT_MATRIX_LAYOUT_LD = 4, + + /** Number of matmul operations to perform in the batch. + * + * See also CUBLASLT_ALGO_CAP_STRIDED_BATCH_SUPPORT + * + * int32_t, default: 1 + */ + CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT = 5, + + /** Stride (in elements) to the next matrix for strided batch operation. + * + * When matrix type is planar-complex (CUBLASLT_MATRIX_LAYOUT_PLANE_OFFSET != 0), batch stride + * is interpreted by cublasLtMatmul() in number of real valued sub-elements. E.g. for data of type CUDA_C_16F, + * offset of 1024B is encoded as a stride of value 512 (since each element of the real and imaginary matrices + * is a 2B (16bit) floating point type). + * + * NOTE: A bug in cublasLtMatrixTransform() causes it to interpret the batch stride for a planar-complex matrix + * as if it was specified in number of complex elements. Therefore an offset of 1024B must be encoded as stride + * value 256 when calling cublasLtMatrixTransform() (each complex element is 4B with real and imaginary values 2B + * each). This behavior is expected to be corrected in the next major cuBLAS version. + * + * int64_t, default: 0 + */ + CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET = 6, + + /** Stride (in bytes) to the imaginary plane for planar complex layout. + * + * int64_t, default: 0 - 0 means that layout is regular (real and imaginary parts of complex numbers are interleaved + * in memory in each element) + */ + CUBLASLT_MATRIX_LAYOUT_PLANE_OFFSET = 7, +} cublasLtMatrixLayoutAttribute_t; + +/** Internal. Do not use directly. + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutInit_internal( // + cublasLtMatrixLayout_t matLayout, + size_t size, + cudaDataType type, + uint64_t rows, + uint64_t cols, + int64_t ld); + +/** Initialize matrix layout descriptor in pre-allocated space. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient + * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully + */ +static inline cublasStatus_t cublasLtMatrixLayoutInit( + cublasLtMatrixLayout_t matLayout, cudaDataType type, uint64_t rows, uint64_t cols, int64_t ld) { + return cublasLtMatrixLayoutInit_internal(matLayout, sizeof(*matLayout), type, rows, cols, ld); +} + +/** Create new matrix layout descriptor. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated + * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutCreate( // + cublasLtMatrixLayout_t* matLayout, + cudaDataType type, + uint64_t rows, + uint64_t cols, + int64_t ld); + +/** Destroy matrix layout descriptor. + * + * \retval CUBLAS_STATUS_SUCCESS if operation was successful + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutDestroy(cublasLtMatrixLayout_t matLayout); + +/** Set matrix layout descriptor attribute. + * + * \param[in] matLayout The descriptor + * \param[in] attr The attribute + * \param[in] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * + * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutSetAttribute( // + cublasLtMatrixLayout_t matLayout, + cublasLtMatrixLayoutAttribute_t attr, + const void* buf, + size_t sizeInBytes); + +/** Get matrix layout descriptor attribute. + * + * \param[in] matLayout The descriptor + * \param[in] attr The attribute + * \param[out] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of + * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents + * + * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero + * and buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutGetAttribute( // + cublasLtMatrixLayout_t matLayout, + cublasLtMatrixLayoutAttribute_t attr, + void* buf, + size_t sizeInBytes, + size_t* sizeWritten); + +/* ---------------------------------------------------------------------------------------*/ +/* Helper functions for cublasLtMatmulDesc_t */ +/* ---------------------------------------------------------------------------------------*/ + +/** Matmul descriptor attributes to define details of the operation. */ +typedef enum { + /** Compute type, see cudaDataType. Defines data type used for multiply and accumulate operations and the + * accumulator during matrix multiplication. + * + * int32_t + */ + CUBLASLT_MATMUL_DESC_COMPUTE_TYPE = 0, + + /** Scale type, see cudaDataType. Defines data type of alpha and beta. Accumulator and value from matrix C are + * typically converted to scale type before final scaling. Value is then converted from scale type to type of matrix + * D before being stored in memory. + * + * int32_t, default: same as CUBLASLT_MATMUL_DESC_COMPUTE_TYPE + */ + CUBLASLT_MATMUL_DESC_SCALE_TYPE = 1, + + /** Pointer mode of alpha and beta, see cublasLtPointerMode_t. When CUBLASLT_POINTER_MODE_DEVICE_VECTOR is in use, + * alpha/beta vector lenghts must match number of output matrix rows. + * + * int32_t, default: CUBLASLT_POINTER_MODE_HOST + */ + CUBLASLT_MATMUL_DESC_POINTER_MODE = 2, + + /** Transform of matrix A, see cublasOperation_t. + * + * int32_t, default: CUBLAS_OP_N + */ + CUBLASLT_MATMUL_DESC_TRANSA = 3, + + /** Transform of matrix B, see cublasOperation_t. + * + * int32_t, default: CUBLAS_OP_N + */ + CUBLASLT_MATMUL_DESC_TRANSB = 4, + + /** Transform of matrix C, see cublasOperation_t. + * + * Currently only CUBLAS_OP_N is supported. + * + * int32_t, default: CUBLAS_OP_N + */ + CUBLASLT_MATMUL_DESC_TRANSC = 5, + + /** Matrix fill mode, see cublasFillMode_t. + * + * int32_t, default: CUBLAS_FILL_MODE_FULL + */ + CUBLASLT_MATMUL_DESC_FILL_MODE = 6, + + /** Epilogue function, see cublasLtEpilogue_t. + * + * uint32_t, default: CUBLASLT_EPILOGUE_DEFAULT + */ + CUBLASLT_MATMUL_DESC_EPILOGUE = 7, + + /** Bias or bias gradient vector pointer in the device memory. + * + * Bias case. See CUBLASLT_EPILOGUE_BIAS. + * For bias data type see CUBLASLT_MATMUL_DESC_BIAS_DATA_TYPE. + * + * Bias vector length must match matrix D rows count. + * + * Bias gradient case. See CUBLASLT_EPILOGUE_DRELU_BGRAD and CUBLASLT_EPILOGUE_DGELU_BGRAD. + * Bias gradient vector elements are the same type as the output elements + * (Ctype) with the exception of IMMA kernels (see above). + * + * Routines that don't dereference this pointer, like cublasLtMatmulAlgoGetHeuristic() + * depend on its value to determine expected pointer alignment. + * + * Bias case: const void *, default: NULL + * Bias gradient case: void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_BIAS_POINTER = 8, + + /** Batch stride for bias or bias gradient vector. + * + * Used together with CUBLASLT_MATMUL_DESC_BIAS_POINTER when matrix D's CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT > 1. + * + * int64_t, default: 0 + */ + CUBLASLT_MATMUL_DESC_BIAS_BATCH_STRIDE = 10, + + /** Pointer for epilogue auxiliary buffer. + * + * - Output vector for ReLu bit-mask in forward pass when CUBLASLT_EPILOGUE_RELU_AUX + * or CUBLASLT_EPILOGUE_RELU_AUX_BIAS epilogue is used. + * - Input vector for ReLu bit-mask in backward pass when + * CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is used. + * + * - Output of GELU input matrix in forward pass when + * CUBLASLT_EPILOGUE_GELU_AUX_BIAS epilogue is used. + * - Input of GELU input matrix for backward pass when + * CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue is used. + * + * For aux data type see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_DATA_TYPE. + * + * Routines that don't dereference this pointer, like cublasLtMatmulAlgoGetHeuristic() + * depend on its value to determine expected pointer alignment. + * + * Requires setting CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD attribute. + * + * Forward pass: void *, default: NULL + * Backward pass: const void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER = 11, + + /** Leading dimension for epilogue auxiliary buffer. + * + * - ReLu bit-mask matrix leading dimension in elements (i.e. bits) + * when CUBLASLT_EPILOGUE_RELU_AUX, CUBLASLT_EPILOGUE_RELU_AUX_BIAS or CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is + * used. Must be divisible by 128 and be no less than the number of rows in the output matrix. + * + * - GELU input matrix leading dimension in elements + * when CUBLASLT_EPILOGUE_GELU_AUX_BIAS or CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue used. + * Must be divisible by 8 and be no less than the number of rows in the output matrix. + * + * int64_t, default: 0 + */ + CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD = 12, + + /** Batch stride for epilogue auxiliary buffer. + * + * - ReLu bit-mask matrix batch stride in elements (i.e. bits) + * when CUBLASLT_EPILOGUE_RELU_AUX, CUBLASLT_EPILOGUE_RELU_AUX_BIAS or CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is + * used. Must be divisible by 128. + * + * - GELU input matrix batch stride in elements + * when CUBLASLT_EPILOGUE_GELU_AUX_BIAS or CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue used. + * Must be divisible by 8. + * + * int64_t, default: 0 + */ + CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_BATCH_STRIDE = 13, + + /** Batch stride for alpha vector. + * + * Used together with CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST when matrix D's + * CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT > 1. If CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO is set then + * CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE must be set to 0 as this mode doesnt supported batched alpha vector. + * + * int64_t, default: 0 + */ + CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE = 14, + + /** Number of SMs to target for parallel execution. Optimizes heuristics for execution on a different number of SMs + * when user expects a concurrent stream to be using some of the device resources. + * + * int32_t, default: 0 - use the number reported by the device. + */ + CUBLASLT_MATMUL_DESC_SM_COUNT_TARGET = 15, + + /** Device pointer to the scale factor value that converts data in matrix A to the compute data type range. + * + * The scaling factor value must have the same type as the compute type. + * + * If not specified, or set to NULL, the scaling factor is assumed to be 1. + * + * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul() + * will return CUBLAS_INVALID_VALUE. + * + * const void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_A_SCALE_POINTER = 17, + + /** Device pointer to the scale factor value to convert data in matrix B to compute data type range. + * + * The scaling factor value must have the same type as the compute type. + * + * If not specified, or set to NULL, the scaling factor is assumed to be 1. + * + * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul() + * will return CUBLAS_INVALID_VALUE. + * + * const void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_B_SCALE_POINTER = 18, + + /** Device pointer to the scale factor value to convert data in matrix C to compute data type range. + * + * The scaling factor value must have the same type as the compute type. + * + * If not specified, or set to NULL, the scaling factor is assumed to be 1. + * + * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul() + * will return CUBLAS_INVALID_VALUE. + * + * const void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_C_SCALE_POINTER = 19, + + /** Device pointer to the scale factor value to convert data in matrix D to compute data type range. + * + * The scaling factor value must have the same type as the compute type. + * + * If not specified, or set to NULL, the scaling factor is assumed to be 1. + * + * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul() + * will return CUBLAS_INVALID_VALUE. + * + * const void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_D_SCALE_POINTER = 20, + + /** Device pointer to the memory location that on completion will be set to the maximum of absolute values in the + * output matrix. + * + * The computed value has the same type as the compute type. + * + * If not specified or set to NULL, the maximum absolute value is not computed. If set for an unsupported matrix + * data, scale, and compute type combination, calling cublasLtMatmul() will return CUBLAS_INVALID_VALUE. + * + * void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_AMAX_D_POINTER = 21, + + /** Type of the data to be stored to the memory pointed to by CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + * + * If unset, the data type defaults to the type of elements of the output matrix with some exceptions, see details + * below. + * + * ReLu uses a bit-mask. + * + * GELU input matrix elements type is the same as the type of elements of + * the output matrix with some exceptions, see details below. + * + * For fp8 kernels with output type CUDA_R_8F_E4M3 the aux data type can be CUDA_R_8F_E4M3 or CUDA_R_16F with some + * restrictions. See https://docs.nvidia.com/cuda/cublas/index.html#cublasLtMatmulDescAttributes_t for more details. + * + * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul() + * will return CUBLAS_INVALID_VALUE. + * + * int32_t based on cudaDataType, default: -1 + */ + CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_DATA_TYPE = 22, + + /** Device pointer to the scaling factor value to convert results from compute type data range to storage + * data range in the auxiliary matrix that is set via CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + * + * The scaling factor value must have the same type as the compute type. + * + * If not specified, or set to NULL, the scaling factor is assumed to be 1. If set for an unsupported matrix data, + * scale, and compute type combination, calling cublasLtMatmul() will return CUBLAS_INVALID_VALUE. + * + * void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_SCALE_POINTER = 23, + + /** Device pointer to the memory location that on completion will be set to the maximum of absolute values in the + * buffer that is set via CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + * + * The computed value has the same type as the compute type. + * + * If not specified or set to NULL, the maximum absolute value is not computed. If set for an unsupported matrix + * data, scale, and compute type combination, calling cublasLtMatmul() will return CUBLAS_INVALID_VALUE. + * + * void *, default: NULL + */ + CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_AMAX_POINTER = 24, + + /** Flag for managing fp8 fast accumulation mode. + * When enabled, problem execution might be faster but at the cost of lower accuracy because intermediate results + * will not periodically be promoted to a higher precision. + * + * int8_t, default: 0 - fast accumulation mode is disabled. + */ + CUBLASLT_MATMUL_DESC_FAST_ACCUM = 25, + + /** Type of bias or bias gradient vector in the device memory. + * + * Bias case: see CUBLASLT_EPILOGUE_BIAS. + * + * Bias vector elements are the same type as the elements of output matrix (Dtype) with the following exceptions: + * - IMMA kernels with computeType=CUDA_R_32I and Ctype=CUDA_R_8I where the bias vector elements + * are the same type as alpha, beta (CUBLASLT_MATMUL_DESC_SCALE_TYPE=CUDA_R_32F) + * - fp8 kernels with an output type of CUDA_R_32F, CUDA_R_8F_E4M3 or CUDA_R_8F_E5M2, See + * https://docs.nvidia.com/cuda/cublas/index.html#cublasLtMatmul for details. + * + * int32_t based on cudaDataType, default: -1 + */ + CUBLASLT_MATMUL_DESC_BIAS_DATA_TYPE = 26, + + /** EXPERIMENTAL: Number of atomic synchronization chunks in the row dimension of the output matrix D. + * + * int32_t, default 0 (atomic synchronization disabled) + */ + CUBLASLT_MATMUL_DESC_ATOMIC_SYNC_NUM_CHUNKS_D_ROWS = 27, + + /** EXPERIMENTAL: Number of atomic synchronization chunks in the column dimension of the output matrix D. + * + * int32_t, default 0 (atomic synchronization disabled) + */ + CUBLASLT_MATMUL_DESC_ATOMIC_SYNC_NUM_CHUNKS_D_COLS = 28, + + /** EXPERIMENTAL: Pointer to a device array of input atomic counters consumed by a matmul. + * + * int32_t *, default: NULL + * */ + CUBLASLT_MATMUL_DESC_ATOMIC_SYNC_IN_COUNTERS_POINTER = 29, + + /** EXPERIMENTAL: Pointer to a device array of output atomic counters produced by a matmul. + * + * int32_t *, default: NULL + * */ + CUBLASLT_MATMUL_DESC_ATOMIC_SYNC_OUT_COUNTERS_POINTER = 30, +} cublasLtMatmulDescAttributes_t; + +/** Internal. Do not use directly. + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulDescInit_internal( // + cublasLtMatmulDesc_t matmulDesc, + size_t size, + cublasComputeType_t computeType, + cudaDataType_t scaleType); + +/** Initialize matmul operation descriptor in pre-allocated space. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient + * \retval CUBLAS_STATUS_SUCCESS if desciptor was initialized successfully + */ +static inline cublasStatus_t cublasLtMatmulDescInit( // + cublasLtMatmulDesc_t matmulDesc, + cublasComputeType_t computeType, + cudaDataType_t scaleType) { + return cublasLtMatmulDescInit_internal(matmulDesc, sizeof(*matmulDesc), computeType, scaleType); +} + +/** Create new matmul operation descriptor. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated + * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulDescCreate(cublasLtMatmulDesc_t* matmulDesc, + cublasComputeType_t computeType, + cudaDataType_t scaleType); + +/** Destroy matmul operation descriptor. + * + * \retval CUBLAS_STATUS_SUCCESS if operation was successful + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulDescDestroy(cublasLtMatmulDesc_t matmulDesc); + +/** Set matmul operation descriptor attribute. + * + * \param[in] matmulDesc The descriptor + * \param[in] attr The attribute + * \param[in] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * + * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulDescSetAttribute( // + cublasLtMatmulDesc_t matmulDesc, + cublasLtMatmulDescAttributes_t attr, + const void* buf, + size_t sizeInBytes); + +/** Get matmul operation descriptor attribute. + * + * \param[in] matmulDesc The descriptor + * \param[in] attr The attribute + * \param[out] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of + * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents + * + * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero + * and buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulDescGetAttribute( // + cublasLtMatmulDesc_t matmulDesc, + cublasLtMatmulDescAttributes_t attr, + void* buf, + size_t sizeInBytes, + size_t* sizeWritten); + +/* ---------------------------------------------------------------------------------------*/ +/* Helper functions for cublasLtMatrixTransformDesc_t */ +/* ---------------------------------------------------------------------------------------*/ + +/** Matrix transform descriptor attributes to define details of the operation. + */ +typedef enum { + /** Scale type, see cudaDataType. Inputs are converted to scale type for scaling and summation and results are then + * converted to output type to store in memory. + * + * int32_t + */ + CUBLASLT_MATRIX_TRANSFORM_DESC_SCALE_TYPE, + + /** Pointer mode of alpha and beta, see cublasLtPointerMode_t. + * + * int32_t, default: CUBLASLT_POINTER_MODE_HOST + */ + CUBLASLT_MATRIX_TRANSFORM_DESC_POINTER_MODE, + + /** Transform of matrix A, see cublasOperation_t. + * + * int32_t, default: CUBLAS_OP_N + */ + CUBLASLT_MATRIX_TRANSFORM_DESC_TRANSA, + + /** Transform of matrix B, see cublasOperation_t. + * + * int32_t, default: CUBLAS_OP_N + */ + CUBLASLT_MATRIX_TRANSFORM_DESC_TRANSB, +} cublasLtMatrixTransformDescAttributes_t; + +/** Internal. Do not use directly. + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescInit_internal(cublasLtMatrixTransformDesc_t transformDesc, + size_t size, + cudaDataType scaleType); + +/** Initialize matrix transform operation descriptor in pre-allocated space. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient + * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully + */ +static inline cublasStatus_t cublasLtMatrixTransformDescInit(cublasLtMatrixTransformDesc_t transformDesc, + cudaDataType scaleType) { + return cublasLtMatrixTransformDescInit_internal(transformDesc, sizeof(*transformDesc), scaleType); +} + +/** Create new matrix transform operation descriptor. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated + * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescCreate(cublasLtMatrixTransformDesc_t* transformDesc, + cudaDataType scaleType); + +/** Destroy matrix transform operation descriptor. + * + * \retval CUBLAS_STATUS_SUCCESS if operation was successful + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescDestroy(cublasLtMatrixTransformDesc_t transformDesc); + +/** Set matrix transform operation descriptor attribute. + * + * \param[in] transformDesc The descriptor + * \param[in] attr The attribute + * \param[in] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * + * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescSetAttribute( // + cublasLtMatrixTransformDesc_t transformDesc, + cublasLtMatrixTransformDescAttributes_t attr, + const void* buf, + size_t sizeInBytes); + +/** Get matrix transform operation descriptor attribute. + * + * \param[in] transformDesc The descriptor + * \param[in] attr The attribute + * \param[out] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number + * of bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents + * + * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero + * and buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory + */ +cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescGetAttribute( // + cublasLtMatrixTransformDesc_t transformDesc, + cublasLtMatrixTransformDescAttributes_t attr, + void* buf, + size_t sizeInBytes, + size_t* sizeWritten); + +/** Reduction scheme for portions of the dot-product calculated in parallel (a. k. a. "split - K"). + */ +typedef enum { + /** No reduction scheme, dot-product shall be performed in one sequence. + */ + CUBLASLT_REDUCTION_SCHEME_NONE = 0, + + /** Reduction is performed "in place" - using the output buffer (and output data type) and counters (in workspace) to + * guarantee the sequentiality. + */ + CUBLASLT_REDUCTION_SCHEME_INPLACE = 1, + + /** Intermediate results are stored in compute type in the workspace and reduced in a separate step. + */ + CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE = 2, + + /** Intermediate results are stored in output type in the workspace and reduced in a separate step. + */ + CUBLASLT_REDUCTION_SCHEME_OUTPUT_TYPE = 4, + + CUBLASLT_REDUCTION_SCHEME_MASK = 0x7, +} cublasLtReductionScheme_t; + +/** Postprocessing options for the epilogue + */ +typedef enum { + /** No special postprocessing, just scale and quantize results if necessary. + */ + CUBLASLT_EPILOGUE_DEFAULT = 1, + + /** ReLu, apply ReLu point-wise transform to the results (x:=max(x, 0)). + */ + CUBLASLT_EPILOGUE_RELU = 2, + + /** ReLu, apply ReLu point-wise transform to the results (x:=max(x, 0)). + * + * This epilogue mode produces an extra output, a ReLu bit-mask matrix, + * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_RELU_AUX = (CUBLASLT_EPILOGUE_RELU | 128), + + /** Bias, apply (broadcasted) Bias from bias vector. Bias vector length must match matrix D rows, it must be packed + * (stride between vector elements is 1). Bias vector is broadcasted to all columns and added before applying final + * postprocessing. + */ + CUBLASLT_EPILOGUE_BIAS = 4, + + /** ReLu and Bias, apply Bias and then ReLu transform + */ + CUBLASLT_EPILOGUE_RELU_BIAS = (CUBLASLT_EPILOGUE_RELU | CUBLASLT_EPILOGUE_BIAS), + + /** ReLu and Bias, apply Bias and then ReLu transform + * + * This epilogue mode produces an extra output, a ReLu bit-mask matrix, + * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_RELU_AUX_BIAS = (CUBLASLT_EPILOGUE_RELU_AUX | CUBLASLT_EPILOGUE_BIAS), + + /* ReLu gradient. Apply ReLu gradient to matmul output. Store ReLu gradient in the output matrix. + * + * This epilogue mode requires an extra input, + * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_DRELU = 8 | 128, + + /* ReLu and Bias gradients. Apply independently ReLu and Bias gradient to + * matmul output. Store ReLu gradient in the output matrix, and Bias gradient + * in the auxiliary output (see CUBLASLT_MATMUL_DESC_BIAS_POINTER). + * + * This epilogue mode requires an extra input, + * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_DRELU_BGRAD = CUBLASLT_EPILOGUE_DRELU | 16, + + /** GELU, apply GELU point-wise transform to the results (x:=GELU(x)). + */ + CUBLASLT_EPILOGUE_GELU = 32, + + /** GELU, apply GELU point-wise transform to the results (x:=GELU(x)). + * + * This epilogue mode outputs GELU input as a separate matrix (useful for training). + * See CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_GELU_AUX = (CUBLASLT_EPILOGUE_GELU | 128), + + /** GELU and Bias, apply Bias and then GELU transform + */ + CUBLASLT_EPILOGUE_GELU_BIAS = (CUBLASLT_EPILOGUE_GELU | CUBLASLT_EPILOGUE_BIAS), + + /** GELU and Bias, apply Bias and then GELU transform + * + * This epilogue mode outputs GELU input as a separate matrix (useful for training). + * See CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_GELU_AUX_BIAS = (CUBLASLT_EPILOGUE_GELU_AUX | CUBLASLT_EPILOGUE_BIAS), + + /* GELU gradient. Apply GELU gradient to matmul output. Store GELU gradient in the output matrix. + * + * This epilogue mode requires an extra input, + * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_DGELU = 64 | 128, + + /* GELU and Bias gradients. Apply independently GELU and Bias gradient to + * matmul output. Store GELU gradient in the output matrix, and Bias gradient + * in the auxiliary output (see CUBLASLT_MATMUL_DESC_BIAS_POINTER). + * + * This epilogue mode requires an extra input, + * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER. + */ + CUBLASLT_EPILOGUE_DGELU_BGRAD = CUBLASLT_EPILOGUE_DGELU | 16, + + /** Bias gradient based on the input matrix A. + * + * The bias size corresponds to the number of rows of the matrix D. + * The reduction happens over the GEMM's "k" dimension. + * + * Stores Bias gradient in the auxiliary output + * (see CUBLASLT_MATMUL_DESC_BIAS_POINTER). + */ + CUBLASLT_EPILOGUE_BGRADA = 256, + + /** Bias gradient based on the input matrix B. + * + * The bias size corresponds to the number of columns of the matrix D. + * The reduction happens over the GEMM's "k" dimension. + * + * Stores Bias gradient in the auxiliary output + * (see CUBLASLT_MATMUL_DESC_BIAS_POINTER). + */ + CUBLASLT_EPILOGUE_BGRADB = 512, +} cublasLtEpilogue_t; + +/** Matmul heuristic search mode + */ +typedef enum { + /** ask heuristics for best algo for given usecase + */ + CUBLASLT_SEARCH_BEST_FIT = 0, + /** only try to find best config for preconfigured algo id + */ + CUBLASLT_SEARCH_LIMITED_BY_ALGO_ID = 1, + /** reserved for future use + */ + CUBLASLT_SEARCH_RESERVED_02 = 2, + /** reserved for future use + */ + CUBLASLT_SEARCH_RESERVED_03 = 3, + /** reserved for future use + */ + CUBLASLT_SEARCH_RESERVED_04 = 4, + /** reserved for future use + */ + CUBLASLT_SEARCH_RESERVED_05 = 5, + /** reserved for future use + */ + CUBLASLT_SEARCH_RESERVED_06 = 6, + /** reserved for future use + */ + CUBLASLT_SEARCH_RESERVED_07 = 7, + /** reserved for future use + */ + CUBLASLT_SEARCH_RESERVED_08 = 8, + /** reserved for future use + */ + CUBLASLT_SEARCH_RESERVED_09 = 9, +} cublasLtMatmulSearch_t; + +/** Algo search preference to fine tune the heuristic function. */ +typedef enum { + /** Search mode, see cublasLtMatmulSearch_t. + * + * uint32_t, default: CUBLASLT_SEARCH_BEST_FIT + */ + CUBLASLT_MATMUL_PREF_SEARCH_MODE = 0, + + /** Maximum allowed workspace size in bytes. + * + * uint64_t, default: 0 - no workspace allowed + */ + CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES = 1, + + /** Reduction scheme mask, see cublasLtReductionScheme_t. Filters heuristic result to only include algo configs that + * use one of the required modes. + * + * E.g. mask value of 0x03 will allow only INPLACE and COMPUTE_TYPE reduction schemes. + * + * uint32_t, default: CUBLASLT_REDUCTION_SCHEME_MASK (allows all reduction schemes) + */ + CUBLASLT_MATMUL_PREF_REDUCTION_SCHEME_MASK = 3, + + /** Minimum buffer alignment for matrix A (in bytes). + * + * Selecting a smaller value will exclude algorithms that can not work with matrix A that is not as strictly aligned + * as they need. + * + * uint32_t, default: 256 + */ + CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_A_BYTES = 5, + + /** Minimum buffer alignment for matrix B (in bytes). + * + * Selecting a smaller value will exclude algorithms that can not work with matrix B that is not as strictly aligned + * as they need. + * + * uint32_t, default: 256 + */ + CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_B_BYTES = 6, + + /** Minimum buffer alignment for matrix C (in bytes). + * + * Selecting a smaller value will exclude algorithms that can not work with matrix C that is not as strictly aligned + * as they need. + * + * uint32_t, default: 256 + */ + CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_C_BYTES = 7, + + /** Minimum buffer alignment for matrix D (in bytes). + * + * Selecting a smaller value will exclude algorithms that can not work with matrix D that is not as strictly aligned + * as they need. + * + * uint32_t, default: 256 + */ + CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_D_BYTES = 8, + + /** Maximum wave count. + * + * See cublasLtMatmulHeuristicResult_t::wavesCount. + * + * Selecting a non-zero value will exclude algorithms that report device utilization higher than specified. + * + * float, default: 0.0f + */ + CUBLASLT_MATMUL_PREF_MAX_WAVES_COUNT = 9, + + /** Numerical implementation details mask, see cublasLtNumericalImplFlags_t. Filters heuristic result to only include + * algorithms that use the allowed implementations. + * + * uint64_t, default: uint64_t(-1) (allow everything) + */ + CUBLASLT_MATMUL_PREF_IMPL_MASK = 12, +} cublasLtMatmulPreferenceAttributes_t; + +/** Internal. Do not use directly. + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceInit_internal(cublasLtMatmulPreference_t pref, size_t size); + +/** Initialize matmul heuristic search preference descriptor in pre-allocated space. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient + * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully + */ +static inline cublasStatus_t cublasLtMatmulPreferenceInit(cublasLtMatmulPreference_t pref) { + return cublasLtMatmulPreferenceInit_internal(pref, sizeof(*pref)); +} + +/** Create new matmul heuristic search preference descriptor. + * + * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated + * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceCreate(cublasLtMatmulPreference_t* pref); + +/** Destroy matmul heuristic search preference descriptor. + * + * \retval CUBLAS_STATUS_SUCCESS if operation was successful + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceDestroy(cublasLtMatmulPreference_t pref); + +/** Set matmul heuristic search preference descriptor attribute. + * + * \param[in] pref The descriptor + * \param[in] attr The attribute + * \param[in] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * + * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceSetAttribute( // + cublasLtMatmulPreference_t pref, + cublasLtMatmulPreferenceAttributes_t attr, + const void* buf, + size_t sizeInBytes); + +/** Get matmul heuristic search preference descriptor attribute. + * + * \param[in] pref The descriptor + * \param[in] attr The attribute + * \param[out] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of + * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents + * + * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero + * and buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceGetAttribute( // + cublasLtMatmulPreference_t pref, + cublasLtMatmulPreferenceAttributes_t attr, + void* buf, + size_t sizeInBytes, + size_t* sizeWritten); + +/** Results structure used by cublasLtMatmulAlgoGetHeuristic + * + * Holds returned configured algo descriptor and its runtime properties. + */ +typedef struct { + /** Matmul algorithm descriptor. + * + * Must be initialized with cublasLtMatmulAlgoInit() if preferences' CUBLASLT_MATMUL_PERF_SEARCH_MODE is set to + * CUBLASLT_SEARCH_LIMITED_BY_ALGO_ID + */ + cublasLtMatmulAlgo_t algo; + + /** Actual size of workspace memory required. + */ + size_t workspaceSize; + + /** Result status, other fields are only valid if after call to cublasLtMatmulAlgoGetHeuristic() this member is set to + * CUBLAS_STATUS_SUCCESS. + */ + cublasStatus_t state; + + /** Waves count - a device utilization metric. + * + * wavesCount value of 1.0f suggests that when kernel is launched it will fully occupy the GPU. + */ + float wavesCount; + + int reserved[4]; +} cublasLtMatmulHeuristicResult_t; + +/** Query cublasLt heuristic for algorithm appropriate for given use case. + * + * \param[in] lightHandle Pointer to the allocated cuBLASLt handle for the cuBLASLt + * context. See cublasLtHandle_t. + * \param[in] operationDesc Handle to the matrix multiplication descriptor. + * \param[in] Adesc Handle to the layout descriptors for matrix A. + * \param[in] Bdesc Handle to the layout descriptors for matrix B. + * \param[in] Cdesc Handle to the layout descriptors for matrix C. + * \param[in] Ddesc Handle to the layout descriptors for matrix D. + * \param[in] preference Pointer to the structure holding the heuristic search + * preferences descriptor. See cublasLtMatrixLayout_t. + * \param[in] requestedAlgoCount Size of heuristicResultsArray (in elements) and requested + * maximum number of algorithms to return. + * \param[in, out] heuristicResultsArray Output algorithms and associated runtime characteristics, + * ordered in increasing estimated compute time. + * \param[out] returnAlgoCount The number of heuristicResultsArray elements written. + * + * \retval CUBLAS_STATUS_INVALID_VALUE if requestedAlgoCount is less or equal to zero + * \retval CUBLAS_STATUS_NOT_SUPPORTED if no heuristic function available for current configuration + * \retval CUBLAS_STATUS_SUCCESS if query was successful, inspect + * heuristicResultsArray[0 to (returnAlgoCount - 1)].state + * for detail status of results + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoGetHeuristic(cublasLtHandle_t lightHandle, + cublasLtMatmulDesc_t operationDesc, + cublasLtMatrixLayout_t Adesc, + cublasLtMatrixLayout_t Bdesc, + cublasLtMatrixLayout_t Cdesc, + cublasLtMatrixLayout_t Ddesc, + cublasLtMatmulPreference_t preference, + int requestedAlgoCount, + cublasLtMatmulHeuristicResult_t heuristicResultsArray[], + int* returnAlgoCount); + +/* ---------------------------------------------------------------------------------------*/ +/* Lower level API to be able to implement own Heuristic and Find routines */ +/* ---------------------------------------------------------------------------------------*/ + +/** Routine to get all algo IDs that can potentially run + * + * \param[in] int requestedAlgoCount requested number of algos (must be less or equal to size of algoIdsA + * (in elements)) \param[out] algoIdsA array to write algoIds to \param[out] returnAlgoCount number of algoIds + * actually written + * + * \retval CUBLAS_STATUS_INVALID_VALUE if requestedAlgoCount is less or equal to zero + * \retval CUBLAS_STATUS_SUCCESS if query was successful, inspect returnAlgoCount to get actual number of IDs + * available + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoGetIds(cublasLtHandle_t lightHandle, + cublasComputeType_t computeType, + cudaDataType_t scaleType, + cudaDataType_t Atype, + cudaDataType_t Btype, + cudaDataType_t Ctype, + cudaDataType_t Dtype, + int requestedAlgoCount, + int algoIdsArray[], + int* returnAlgoCount); + +/** Initialize algo structure + * + * \retval CUBLAS_STATUS_INVALID_VALUE if algo is NULL or algoId is outside of recognized range + * \retval CUBLAS_STATUS_NOT_SUPPORTED if algoId is not supported for given combination of data types + * \retval CUBLAS_STATUS_SUCCESS if the structure was successfully initialized + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoInit(cublasLtHandle_t lightHandle, + cublasComputeType_t computeType, + cudaDataType_t scaleType, + cudaDataType_t Atype, + cudaDataType_t Btype, + cudaDataType_t Ctype, + cudaDataType_t Dtype, + int algoId, + cublasLtMatmulAlgo_t* algo); + +/** Check configured algo descriptor for correctness and support on current device. + * + * Result includes required workspace size and calculated wave count. + * + * CUBLAS_STATUS_SUCCESS doesn't fully guarantee algo will run (will fail if e.g. buffers are not correctly aligned); + * but if cublasLtMatmulAlgoCheck fails, the algo will not run. + * + * \param[in] algo algo configuration to check + * \param[out] result result structure to report algo runtime characteristics; algo field is never updated + * + * \retval CUBLAS_STATUS_INVALID_VALUE if matrix layout descriptors or operation descriptor don't match algo + * descriptor + * \retval CUBLAS_STATUS_NOT_SUPPORTED if algo configuration or data type combination is not currently supported on + * given device + * \retval CUBLAS_STATUS_ARCH_MISMATCH if algo configuration cannot be run using the selected device + * \retval CUBLAS_STATUS_SUCCESS if check was successful + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoCheck( // + cublasLtHandle_t lightHandle, + cublasLtMatmulDesc_t operationDesc, + cublasLtMatrixLayout_t Adesc, + cublasLtMatrixLayout_t Bdesc, + cublasLtMatrixLayout_t Cdesc, + cublasLtMatrixLayout_t Ddesc, + const cublasLtMatmulAlgo_t* algo, ///< may point to result->algo + cublasLtMatmulHeuristicResult_t* result); + +/** Capabilities Attributes that can be retrieved from an initialized Algo structure + */ +typedef enum { + /** support for split K, see CUBLASLT_ALGO_CONFIG_SPLITK_NUM + * + * int32_t, 0 means no support, supported otherwise + */ + CUBLASLT_ALGO_CAP_SPLITK_SUPPORT = 0, + + /** reduction scheme mask, see cublasLtReductionScheme_t; shows supported reduction schemes, if reduction scheme is + * not masked out it is supported. + * + * e.g. int isReductionSchemeComputeTypeSupported ? (reductionSchemeMask & CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE) == + * CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE ? 1 : 0; + * + * uint32_t + */ + CUBLASLT_ALGO_CAP_REDUCTION_SCHEME_MASK = 1, + + /** support for cta swizzling, see CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING + * + * uint32_t, 0 means no support, 1 means supported value of 1, other values are reserved + */ + CUBLASLT_ALGO_CAP_CTA_SWIZZLING_SUPPORT = 2, + + /** support strided batch + * + * int32_t, 0 means no support, supported otherwise + */ + CUBLASLT_ALGO_CAP_STRIDED_BATCH_SUPPORT = 3, + + /** support results out of place (D != C in D = alpha.A.B + beta.C) + * + * int32_t, 0 means no support, supported otherwise + */ + CUBLASLT_ALGO_CAP_OUT_OF_PLACE_RESULT_SUPPORT = 4, + + /** syrk/herk support (on top of regular gemm) + * + * int32_t, 0 means no support, supported otherwise + */ + CUBLASLT_ALGO_CAP_UPLO_SUPPORT = 5, + + /** tile ids possible to use, see cublasLtMatmulTile_t; if no tile ids are supported use + * CUBLASLT_MATMUL_TILE_UNDEFINED + * + * use cublasLtMatmulAlgoCapGetAttribute() with sizeInBytes=0 to query actual count + * + * array of uint32_t + */ + CUBLASLT_ALGO_CAP_TILE_IDS = 6, + + /** custom option range is from 0 to CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX (inclusive), see + * CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION + * + * int32_t + */ + CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX = 7, + + /** whether algorithm supports custom (not COL or ROW memory order), see cublasLtOrder_t + * + * int32_t 0 means only COL and ROW memory order is allowed, non-zero means that algo might have different + * requirements; + */ + CUBLASLT_ALGO_CAP_CUSTOM_MEMORY_ORDER = 10, + + /** bitmask enumerating pointer modes algorithm supports + * + * uint32_t, see cublasLtPointerModeMask_t + */ + CUBLASLT_ALGO_CAP_POINTER_MODE_MASK = 11, + + /** bitmask enumerating kinds of postprocessing algorithm supports in the epilogue + * + * uint32_t, see cublasLtEpilogue_t + */ + CUBLASLT_ALGO_CAP_EPILOGUE_MASK = 12, + + /** stages ids possible to use, see cublasLtMatmulStages_t; if no stages ids are supported use + * CUBLASLT_MATMUL_STAGES_UNDEFINED + * + * use cublasLtMatmulAlgoCapGetAttribute() with sizeInBytes=0 to query actual count + * + * array of uint32_t + */ + CUBLASLT_ALGO_CAP_STAGES_IDS = 13, + + /** support for nagative ld for all of the matrices + * + * int32_t 0 means no support, supported otherwise + */ + CUBLASLT_ALGO_CAP_LD_NEGATIVE = 14, + + /** details about algorithm's implementation that affect it's numerical behavior + * + * uint64_t, see cublasLtNumericalImplFlags_t + */ + CUBLASLT_ALGO_CAP_NUMERICAL_IMPL_FLAGS = 15, + + /** minimum alignment required for A matrix in bytes + * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order) + * + * uint32_t + */ + CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_A_BYTES = 16, + + /** minimum alignment required for B matrix in bytes + * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order) + * + * uint32_t + */ + CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_B_BYTES = 17, + + /** minimum alignment required for C matrix in bytes + * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order) + * + * uint32_t + */ + CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_C_BYTES = 18, + + /** minimum alignment required for D matrix in bytes + * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order) + * + * uint32_t + */ + CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_D_BYTES = 19, + + /** EXPERIMENTAL: support for synchronization via atomic counters + * + * int32_t + */ + CUBLASLT_ALGO_CAP_ATOMIC_SYNC = 20, +} cublasLtMatmulAlgoCapAttributes_t; + +/** Get algo capability attribute. + * + * E.g. to get list of supported Tile IDs: + * cublasLtMatmulTile_t tiles[CUBLASLT_MATMUL_TILE_END]; + * size_t num_tiles, size_written; + * if (cublasLtMatmulAlgoCapGetAttribute(algo, CUBLASLT_ALGO_CAP_TILE_IDS, tiles, sizeof(tiles), size_written) == + * CUBLAS_STATUS_SUCCESS) { num_tiles = size_written / sizeof(tiles[0]); + * } + * + * \param[in] algo The algo descriptor + * \param[in] attr The attribute + * \param[out] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of + * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents + * + * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero + * and buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoCapGetAttribute(const cublasLtMatmulAlgo_t* algo, + cublasLtMatmulAlgoCapAttributes_t attr, + void* buf, + size_t sizeInBytes, + size_t* sizeWritten); + +/** Algo Configuration Attributes that can be set according to the Algo capabilities + */ +typedef enum { + /** algorithm index, see cublasLtMatmulAlgoGetIds() + * + * readonly, set by cublasLtMatmulAlgoInit() + * int32_t + */ + CUBLASLT_ALGO_CONFIG_ID = 0, + /** tile id, see cublasLtMatmulTile_t + * + * uint32_t, default: CUBLASLT_MATMUL_TILE_UNDEFINED + */ + CUBLASLT_ALGO_CONFIG_TILE_ID = 1, + /** Number of K splits. If the number of K splits is greater than one, SPLITK_NUM parts + * of matrix multiplication will be computed in parallel. The results will be accumulated + * according to CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME + * + * int32_t, default: 1 + */ + CUBLASLT_ALGO_CONFIG_SPLITK_NUM = 2, + /** reduction scheme, see cublasLtReductionScheme_t + * + * uint32_t, default: CUBLASLT_REDUCTION_SCHEME_NONE + */ + CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME = 3, + /** cta swizzling, change mapping from CUDA grid coordinates to parts of the matrices + * + * possible values: 0, 1, other values reserved + * + * uint32_t, default: 0 + */ + CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING = 4, + /** custom option, each algorithm can support some custom options that don't fit description of the other config + * attributes, see CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX to get accepted range for any specific case + * + * uint32_t, default: 0 + */ + CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION = 5, + /** stages id, see cublasLtMatmulStages_t + * + * uint32_t, default: CUBLASLT_MATMUL_STAGES_UNDEFINED + */ + CUBLASLT_ALGO_CONFIG_STAGES_ID = 6, + /** inner shape id, see cublasLtMatmulInnerShape_t + * + * uint16_t, default: 0 (CUBLASLT_MATMUL_INNER_SHAPE_UNDEFINED) + */ + CUBLASLT_ALGO_CONFIG_INNER_SHAPE_ID = 7, + /** Thread Block Cluster shape id, see cublasLtClusterShape_t. Defines cluster size to use. + * + * uint16_t, default: 0 (CUBLASLT_CLUSTER_SHAPE_AUTO) + */ + CUBLASLT_ALGO_CONFIG_CLUSTER_SHAPE_ID = 8, +} cublasLtMatmulAlgoConfigAttributes_t; + +/** Set algo configuration attribute. + * + * \param[in] algo The algo descriptor + * \param[in] attr The attribute + * \param[in] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * + * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoConfigSetAttribute(cublasLtMatmulAlgo_t* algo, + cublasLtMatmulAlgoConfigAttributes_t attr, + const void* buf, + size_t sizeInBytes); + +/** Get algo configuration attribute. + * + * \param[in] algo The algo descriptor + * \param[in] attr The attribute + * \param[out] buf memory address containing the new value + * \param[in] sizeInBytes size of buf buffer for verification (in bytes) + * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of + * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents + * + * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero + * and buf is NULL or sizeInBytes doesn't match size of internal storage for + * selected attribute + * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory + */ +cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoConfigGetAttribute(const cublasLtMatmulAlgo_t* algo, + cublasLtMatmulAlgoConfigAttributes_t attr, + void* buf, + size_t sizeInBytes, + size_t* sizeWritten); + +/** Experimental: Logger callback type. + */ +typedef void (*cublasLtLoggerCallback_t)(int logLevel, const char* functionName, const char* message); + +/** Experimental: Logger callback setter. + * + * \param[in] callback a user defined callback function to be called by the logger + * + * \retval CUBLAS_STATUS_SUCCESS if callback was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtLoggerSetCallback(cublasLtLoggerCallback_t callback); + +/** Experimental: Log file setter. + * + * \param[in] file an open file with write permissions + * + * \retval CUBLAS_STATUS_SUCCESS if log file was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtLoggerSetFile(FILE* file); + +/** Experimental: Open log file. + * + * \param[in] logFile log file path. if the log file does not exist, it will be created + * + * \retval CUBLAS_STATUS_SUCCESS if log file was created successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtLoggerOpenFile(const char* logFile); + +/** Experimental: Log level setter. + * + * \param[in] level log level, should be one of the following: + * 0. Off + * 1. Errors + * 2. Performance Trace + * 3. Performance Hints + * 4. Heuristics Trace + * 5. API Trace + * + * \retval CUBLAS_STATUS_INVALID_VALUE if log level is not one of the above levels + * + * \retval CUBLAS_STATUS_SUCCESS if log level was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtLoggerSetLevel(int level); + +/** Experimental: Log mask setter. + * + * \param[in] mask log mask, should be a combination of the following masks: + * 0. Off + * 1. Errors + * 2. Performance Trace + * 4. Performance Hints + * 8. Heuristics Trace + * 16. API Trace + * + * \retval CUBLAS_STATUS_SUCCESS if log mask was set successfully + */ +cublasStatus_t CUBLASWINAPI cublasLtLoggerSetMask(int mask); + +/** Experimental: Disable logging for the entire session. + * + * \retval CUBLAS_STATUS_SUCCESS if disabled logging + */ +cublasStatus_t CUBLASWINAPI cublasLtLoggerForceDisable(); + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/cublasXt.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/cublasXt.h new file mode 100644 index 0000000000000000000000000000000000000000..fe0e6f99b952514874c45208e751f5330e71570c --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/cublasXt.h @@ -0,0 +1,693 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* cublasXt : Host API, Out of Core and Multi-GPU BLAS Library + +*/ + +#if !defined(CUBLAS_XT_H_) +#define CUBLAS_XT_H_ + +#include "driver_types.h" +#include "cuComplex.h" /* import complex data type */ + +#include "cublas_v2.h" + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +struct cublasXtContext; +typedef struct cublasXtContext* cublasXtHandle_t; + +cublasStatus_t CUBLASWINAPI cublasXtCreate(cublasXtHandle_t* handle); +cublasStatus_t CUBLASWINAPI cublasXtDestroy(cublasXtHandle_t handle); +cublasStatus_t CUBLASWINAPI cublasXtGetNumBoards(int nbDevices, int deviceId[], int* nbBoards); +cublasStatus_t CUBLASWINAPI cublasXtMaxBoards(int* nbGpuBoards); +/* This routine selects the Gpus that the user want to use for CUBLAS-XT */ +cublasStatus_t CUBLASWINAPI cublasXtDeviceSelect(cublasXtHandle_t handle, int nbDevices, int deviceId[]); + +/* This routine allows to change the dimension of the tiles ( blockDim x blockDim ) */ +cublasStatus_t CUBLASWINAPI cublasXtSetBlockDim(cublasXtHandle_t handle, int blockDim); +cublasStatus_t CUBLASWINAPI cublasXtGetBlockDim(cublasXtHandle_t handle, int* blockDim); + +typedef enum { CUBLASXT_PINNING_DISABLED = 0, CUBLASXT_PINNING_ENABLED = 1 } cublasXtPinnedMemMode_t; +/* This routine allows to CUBLAS-XT to pin the Host memory if it find out that some of the matrix passed + are not pinned : Pinning/Unpinning the Host memory is still a costly operation + It is better if the user controls the memory on its own (by pinning/unpinning oly when necessary) +*/ +cublasStatus_t CUBLASWINAPI cublasXtGetPinningMemMode(cublasXtHandle_t handle, cublasXtPinnedMemMode_t* mode); +cublasStatus_t CUBLASWINAPI cublasXtSetPinningMemMode(cublasXtHandle_t handle, cublasXtPinnedMemMode_t mode); + +/* This routines is to provide a CPU Blas routines, used for too small sizes or hybrid computation */ +typedef enum { + CUBLASXT_FLOAT = 0, + CUBLASXT_DOUBLE = 1, + CUBLASXT_COMPLEX = 2, + CUBLASXT_DOUBLECOMPLEX = 3, +} cublasXtOpType_t; + +typedef enum { + CUBLASXT_GEMM = 0, + CUBLASXT_SYRK = 1, + CUBLASXT_HERK = 2, + CUBLASXT_SYMM = 3, + CUBLASXT_HEMM = 4, + CUBLASXT_TRSM = 5, + CUBLASXT_SYR2K = 6, + CUBLASXT_HER2K = 7, + + CUBLASXT_SPMM = 8, + CUBLASXT_SYRKX = 9, + CUBLASXT_HERKX = 10, + CUBLASXT_TRMM = 11, + CUBLASXT_ROUTINE_MAX = 12, +} cublasXtBlasOp_t; + +/* Currently only 32-bit integer BLAS routines are supported */ +cublasStatus_t CUBLASWINAPI cublasXtSetCpuRoutine(cublasXtHandle_t handle, + cublasXtBlasOp_t blasOp, + cublasXtOpType_t type, + void* blasFunctor); + +/* Specified the percentage of work that should done by the CPU, default is 0 (no work) */ +cublasStatus_t CUBLASWINAPI cublasXtSetCpuRatio(cublasXtHandle_t handle, + cublasXtBlasOp_t blasOp, + cublasXtOpType_t type, + float ratio); + +/* GEMM */ +cublasStatus_t CUBLASWINAPI cublasXtSgemm(cublasXtHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + size_t m, + size_t n, + size_t k, + const float* alpha, + const float* A, + size_t lda, + const float* B, + size_t ldb, + const float* beta, + float* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtDgemm(cublasXtHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + size_t m, + size_t n, + size_t k, + const double* alpha, + const double* A, + size_t lda, + const double* B, + size_t ldb, + const double* beta, + double* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtCgemm(cublasXtHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + size_t m, + size_t n, + size_t k, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + const cuComplex* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZgemm(cublasXtHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + size_t m, + size_t n, + size_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + size_t ldc); +/* ------------------------------------------------------- */ +/* SYRK */ +cublasStatus_t CUBLASWINAPI cublasXtSsyrk(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const float* alpha, + const float* A, + size_t lda, + const float* beta, + float* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtDsyrk(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const double* alpha, + const double* A, + size_t lda, + const double* beta, + double* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtCsyrk(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZsyrk(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + size_t ldc); +/* -------------------------------------------------------------------- */ +/* HERK */ +cublasStatus_t CUBLASWINAPI cublasXtCherk(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const float* alpha, + const cuComplex* A, + size_t lda, + const float* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZherk(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const double* alpha, + const cuDoubleComplex* A, + size_t lda, + const double* beta, + cuDoubleComplex* C, + size_t ldc); +/* -------------------------------------------------------------------- */ +/* SYR2K */ +cublasStatus_t CUBLASWINAPI cublasXtSsyr2k(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const float* alpha, + const float* A, + size_t lda, + const float* B, + size_t ldb, + const float* beta, + float* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtDsyr2k(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const double* alpha, + const double* A, + size_t lda, + const double* B, + size_t ldb, + const double* beta, + double* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtCsyr2k(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + const cuComplex* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZsyr2k(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + size_t ldc); +/* -------------------------------------------------------------------- */ +/* HERKX : variant extension of HERK */ +cublasStatus_t CUBLASWINAPI cublasXtCherkx(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + const float* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZherkx(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + const double* beta, + cuDoubleComplex* C, + size_t ldc); + +/* -------------------------------------------------------------------- */ +/* TRSM */ +cublasStatus_t CUBLASWINAPI cublasXtStrsm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const float* alpha, + const float* A, + size_t lda, + float* B, + size_t ldb); + +cublasStatus_t CUBLASWINAPI cublasXtDtrsm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const double* alpha, + const double* A, + size_t lda, + double* B, + size_t ldb); + +cublasStatus_t CUBLASWINAPI cublasXtCtrsm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + cuComplex* B, + size_t ldb); + +cublasStatus_t CUBLASWINAPI cublasXtZtrsm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + cuDoubleComplex* B, + size_t ldb); +/* -------------------------------------------------------------------- */ +/* SYMM : Symmetric Multiply Matrix*/ +cublasStatus_t CUBLASWINAPI cublasXtSsymm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const float* alpha, + const float* A, + size_t lda, + const float* B, + size_t ldb, + const float* beta, + float* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtDsymm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const double* alpha, + const double* A, + size_t lda, + const double* B, + size_t ldb, + const double* beta, + double* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtCsymm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + const cuComplex* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZsymm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + size_t ldc); +/* -------------------------------------------------------------------- */ +/* HEMM : Hermitian Matrix Multiply */ +cublasStatus_t CUBLASWINAPI cublasXtChemm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + const cuComplex* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZhemm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + size_t ldc); + +/* -------------------------------------------------------------------- */ +/* SYRKX : variant extension of SYRK */ +cublasStatus_t CUBLASWINAPI cublasXtSsyrkx(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const float* alpha, + const float* A, + size_t lda, + const float* B, + size_t ldb, + const float* beta, + float* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtDsyrkx(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const double* alpha, + const double* A, + size_t lda, + const double* B, + size_t ldb, + const double* beta, + double* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtCsyrkx(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + const cuComplex* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZsyrkx(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + size_t ldc); +/* -------------------------------------------------------------------- */ +/* HER2K : variant extension of HERK */ +cublasStatus_t CUBLASWINAPI cublasXtCher2k(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + const float* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZher2k(cublasXtHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + size_t n, + size_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + const double* beta, + cuDoubleComplex* C, + size_t ldc); + +/* -------------------------------------------------------------------- */ +/* SPMM : Symmetric Packed Multiply Matrix*/ +cublasStatus_t CUBLASWINAPI cublasXtSspmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const float* alpha, + const float* AP, + const float* B, + size_t ldb, + const float* beta, + float* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtDspmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const double* alpha, + const double* AP, + const double* B, + size_t ldb, + const double* beta, + double* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtCspmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const cuComplex* alpha, + const cuComplex* AP, + const cuComplex* B, + size_t ldb, + const cuComplex* beta, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZspmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + size_t m, + size_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* AP, + const cuDoubleComplex* B, + size_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + size_t ldc); + +/* -------------------------------------------------------------------- */ +/* TRMM */ +cublasStatus_t CUBLASWINAPI cublasXtStrmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const float* alpha, + const float* A, + size_t lda, + const float* B, + size_t ldb, + float* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtDtrmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const double* alpha, + const double* A, + size_t lda, + const double* B, + size_t ldb, + double* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtCtrmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const cuComplex* alpha, + const cuComplex* A, + size_t lda, + const cuComplex* B, + size_t ldb, + cuComplex* C, + size_t ldc); + +cublasStatus_t CUBLASWINAPI cublasXtZtrmm(cublasXtHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + size_t m, + size_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + size_t lda, + const cuDoubleComplex* B, + size_t ldb, + cuDoubleComplex* C, + size_t ldc); + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#endif /* !defined(CUBLAS_XT_H_) */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/cublas_api.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/cublas_api.h new file mode 100644 index 0000000000000000000000000000000000000000..d9b2a534763887ee3cd5a73ce2bc3dae6ecc8a82 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/cublas_api.h @@ -0,0 +1,5835 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * This is the public header file for the CUBLAS library, defining the API + * + * CUBLAS is an implementation of BLAS (Basic Linear Algebra Subroutines) + * on top of the CUDA runtime. + */ + +#if !defined(CUBLAS_API_H_) +#define CUBLAS_API_H_ + +#ifndef CUBLASWINAPI +#ifdef _WIN32 +#define CUBLASWINAPI __stdcall +#else +#define CUBLASWINAPI +#endif +#endif + +#ifndef CUBLASAPI +#error "This file should not be included without defining CUBLASAPI" +#endif + +#include + +#include "driver_types.h" +#include "cuComplex.h" /* import complex data type */ + +#include +#include + +#include + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +#define CUBLAS_VER_MAJOR 12 +#define CUBLAS_VER_MINOR 6 +#define CUBLAS_VER_PATCH 4 +#define CUBLAS_VER_BUILD 1 +#define CUBLAS_VERSION (CUBLAS_VER_MAJOR * 10000 + CUBLAS_VER_MINOR * 100 + CUBLAS_VER_PATCH) + +/* CUBLAS status type returns */ +typedef enum { + CUBLAS_STATUS_SUCCESS = 0, + CUBLAS_STATUS_NOT_INITIALIZED = 1, + CUBLAS_STATUS_ALLOC_FAILED = 3, + CUBLAS_STATUS_INVALID_VALUE = 7, + CUBLAS_STATUS_ARCH_MISMATCH = 8, + CUBLAS_STATUS_MAPPING_ERROR = 11, + CUBLAS_STATUS_EXECUTION_FAILED = 13, + CUBLAS_STATUS_INTERNAL_ERROR = 14, + CUBLAS_STATUS_NOT_SUPPORTED = 15, + CUBLAS_STATUS_LICENSE_ERROR = 16 +} cublasStatus_t; + +typedef enum { CUBLAS_FILL_MODE_LOWER = 0, CUBLAS_FILL_MODE_UPPER = 1, CUBLAS_FILL_MODE_FULL = 2 } cublasFillMode_t; + +typedef enum { CUBLAS_DIAG_NON_UNIT = 0, CUBLAS_DIAG_UNIT = 1 } cublasDiagType_t; + +typedef enum { CUBLAS_SIDE_LEFT = 0, CUBLAS_SIDE_RIGHT = 1 } cublasSideMode_t; + +typedef enum { + CUBLAS_OP_N = 0, + CUBLAS_OP_T = 1, + CUBLAS_OP_C = 2, + CUBLAS_OP_HERMITAN = 2, /* synonym if CUBLAS_OP_C */ + CUBLAS_OP_CONJG = 3 /* conjugate, placeholder - not supported in the current release */ +} cublasOperation_t; + +typedef enum { CUBLAS_POINTER_MODE_HOST = 0, CUBLAS_POINTER_MODE_DEVICE = 1 } cublasPointerMode_t; + +typedef enum { CUBLAS_ATOMICS_NOT_ALLOWED = 0, CUBLAS_ATOMICS_ALLOWED = 1 } cublasAtomicsMode_t; + +/*For different GEMM algorithm */ +typedef enum { + CUBLAS_GEMM_DFALT = -1, + CUBLAS_GEMM_DEFAULT = -1, + CUBLAS_GEMM_ALGO0 = 0, + CUBLAS_GEMM_ALGO1 = 1, + CUBLAS_GEMM_ALGO2 = 2, + CUBLAS_GEMM_ALGO3 = 3, + CUBLAS_GEMM_ALGO4 = 4, + CUBLAS_GEMM_ALGO5 = 5, + CUBLAS_GEMM_ALGO6 = 6, + CUBLAS_GEMM_ALGO7 = 7, + CUBLAS_GEMM_ALGO8 = 8, + CUBLAS_GEMM_ALGO9 = 9, + CUBLAS_GEMM_ALGO10 = 10, + CUBLAS_GEMM_ALGO11 = 11, + CUBLAS_GEMM_ALGO12 = 12, + CUBLAS_GEMM_ALGO13 = 13, + CUBLAS_GEMM_ALGO14 = 14, + CUBLAS_GEMM_ALGO15 = 15, + CUBLAS_GEMM_ALGO16 = 16, + CUBLAS_GEMM_ALGO17 = 17, + CUBLAS_GEMM_ALGO18 = 18, // sliced 32x32 + CUBLAS_GEMM_ALGO19 = 19, // sliced 64x32 + CUBLAS_GEMM_ALGO20 = 20, // sliced 128x32 + CUBLAS_GEMM_ALGO21 = 21, // sliced 32x32 -splitK + CUBLAS_GEMM_ALGO22 = 22, // sliced 64x32 -splitK + CUBLAS_GEMM_ALGO23 = 23, // sliced 128x32 -splitK + CUBLAS_GEMM_DEFAULT_TENSOR_OP = 99, + CUBLAS_GEMM_DFALT_TENSOR_OP = 99, + CUBLAS_GEMM_ALGO0_TENSOR_OP = 100, + CUBLAS_GEMM_ALGO1_TENSOR_OP = 101, + CUBLAS_GEMM_ALGO2_TENSOR_OP = 102, + CUBLAS_GEMM_ALGO3_TENSOR_OP = 103, + CUBLAS_GEMM_ALGO4_TENSOR_OP = 104, + CUBLAS_GEMM_ALGO5_TENSOR_OP = 105, + CUBLAS_GEMM_ALGO6_TENSOR_OP = 106, + CUBLAS_GEMM_ALGO7_TENSOR_OP = 107, + CUBLAS_GEMM_ALGO8_TENSOR_OP = 108, + CUBLAS_GEMM_ALGO9_TENSOR_OP = 109, + CUBLAS_GEMM_ALGO10_TENSOR_OP = 110, + CUBLAS_GEMM_ALGO11_TENSOR_OP = 111, + CUBLAS_GEMM_ALGO12_TENSOR_OP = 112, + CUBLAS_GEMM_ALGO13_TENSOR_OP = 113, + CUBLAS_GEMM_ALGO14_TENSOR_OP = 114, + CUBLAS_GEMM_ALGO15_TENSOR_OP = 115 +} cublasGemmAlgo_t; + +/*Enum for default math mode/tensor operation*/ +typedef enum { + CUBLAS_DEFAULT_MATH = 0, + + /* deprecated, same effect as using CUBLAS_COMPUTE_32F_FAST_16F, will be removed in a future release */ + CUBLAS_TENSOR_OP_MATH = 1, + + /* same as using matching _PEDANTIC compute type when using cublasroutine calls or cublasEx() calls with + cudaDataType as compute type */ + CUBLAS_PEDANTIC_MATH = 2, + + /* allow accelerating single precision routines using TF32 tensor cores */ + CUBLAS_TF32_TENSOR_OP_MATH = 3, + + /* flag to force any reductons to use the accumulator type and not output type in case of mixed precision routines + with lower size output type */ + CUBLAS_MATH_DISALLOW_REDUCED_PRECISION_REDUCTION = 16, +} cublasMath_t; + +/* For backward compatibility purposes */ +typedef cudaDataType cublasDataType_t; + +/* Enum for compute type + * + * - default types provide best available performance using all available hardware features + * and guarantee internal storage precision with at least the same precision and range; + * - _PEDANTIC types ensure standard arithmetic and exact specified internal storage format; + * - _FAST types allow for some loss of precision to enable higher throughput arithmetic. + */ +typedef enum { + CUBLAS_COMPUTE_16F = 64, /* half - default */ + CUBLAS_COMPUTE_16F_PEDANTIC = 65, /* half - pedantic */ + CUBLAS_COMPUTE_32F = 68, /* float - default */ + CUBLAS_COMPUTE_32F_PEDANTIC = 69, /* float - pedantic */ + CUBLAS_COMPUTE_32F_FAST_16F = 74, /* float - fast, allows down-converting inputs to half or TF32 */ + CUBLAS_COMPUTE_32F_FAST_16BF = 75, /* float - fast, allows down-converting inputs to bfloat16 or TF32 */ + CUBLAS_COMPUTE_32F_FAST_TF32 = 77, /* float - fast, allows down-converting inputs to TF32 */ + CUBLAS_COMPUTE_64F = 70, /* double - default */ + CUBLAS_COMPUTE_64F_PEDANTIC = 71, /* double - pedantic */ + CUBLAS_COMPUTE_32I = 72, /* signed 32-bit int - default */ + CUBLAS_COMPUTE_32I_PEDANTIC = 73, /* signed 32-bit int - pedantic */ +} cublasComputeType_t; + +/* Opaque structure holding CUBLAS library context */ +struct cublasContext; +typedef struct cublasContext* cublasHandle_t; + +/* Cublas logging */ +typedef void (*cublasLogCallback)(const char* msg); + +/* cuBLAS Exported API {{{ */ + +/* --------------- CUBLAS Helper Functions ---------------- */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCreate_v2(cublasHandle_t* handle); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDestroy_v2(cublasHandle_t handle); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetVersion_v2(cublasHandle_t handle, int* version); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetProperty(libraryPropertyType type, int* value); + +CUBLASAPI size_t CUBLASWINAPI cublasGetCudartVersion(void); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSetWorkspace_v2(cublasHandle_t handle, + void* workspace, + size_t workspaceSizeInBytes); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSetStream_v2(cublasHandle_t handle, cudaStream_t streamId); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetStream_v2(cublasHandle_t handle, cudaStream_t* streamId); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetPointerMode_v2(cublasHandle_t handle, cublasPointerMode_t* mode); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSetPointerMode_v2(cublasHandle_t handle, cublasPointerMode_t mode); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetAtomicsMode(cublasHandle_t handle, cublasAtomicsMode_t* mode); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSetAtomicsMode(cublasHandle_t handle, cublasAtomicsMode_t mode); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetMathMode(cublasHandle_t handle, cublasMath_t* mode); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSetMathMode(cublasHandle_t handle, cublasMath_t mode); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetSmCountTarget(cublasHandle_t handle, int* smCountTarget); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSetSmCountTarget(cublasHandle_t handle, int smCountTarget); + +CUBLASAPI const char* CUBLASWINAPI cublasGetStatusName(cublasStatus_t status); + +CUBLASAPI const char* CUBLASWINAPI cublasGetStatusString(cublasStatus_t status); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasLoggerConfigure(int logIsOn, + int logToStdOut, + int logToStdErr, + const char* logFileName); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSetLoggerCallback(cublasLogCallback userCallback); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGetLoggerCallback(cublasLogCallback* userCallback); + +cublasStatus_t CUBLASWINAPI cublasSetVector(int n, int elemSize, const void* x, int incx, void* devicePtr, int incy); + +cublasStatus_t CUBLASWINAPI +cublasSetVector_64(int64_t n, int64_t elemSize, const void* x, int64_t incx, void* devicePtr, int64_t incy); + +cublasStatus_t CUBLASWINAPI cublasGetVector(int n, int elemSize, const void* x, int incx, void* y, int incy); + +cublasStatus_t CUBLASWINAPI +cublasGetVector_64(int64_t n, int64_t elemSize, const void* x, int64_t incx, void* y, int64_t incy); + +cublasStatus_t CUBLASWINAPI cublasSetMatrix(int rows, int cols, int elemSize, const void* A, int lda, void* B, int ldb); + +cublasStatus_t CUBLASWINAPI +cublasSetMatrix_64(int64_t rows, int64_t cols, int64_t elemSize, const void* A, int64_t lda, void* B, int64_t ldb); + +cublasStatus_t CUBLASWINAPI cublasGetMatrix(int rows, int cols, int elemSize, const void* A, int lda, void* B, int ldb); + +cublasStatus_t CUBLASWINAPI +cublasGetMatrix_64(int64_t rows, int64_t cols, int64_t elemSize, const void* A, int64_t lda, void* B, int64_t ldb); + +cublasStatus_t CUBLASWINAPI cublasSetVectorAsync( + int n, int elemSize, const void* hostPtr, int incx, void* devicePtr, int incy, cudaStream_t stream); + +cublasStatus_t CUBLASWINAPI cublasSetVectorAsync_64( + int64_t n, int64_t elemSize, const void* hostPtr, int64_t incx, void* devicePtr, int64_t incy, cudaStream_t stream); + +cublasStatus_t CUBLASWINAPI cublasGetVectorAsync( + int n, int elemSize, const void* devicePtr, int incx, void* hostPtr, int incy, cudaStream_t stream); + +cublasStatus_t CUBLASWINAPI cublasGetVectorAsync_64( + int64_t n, int64_t elemSize, const void* devicePtr, int64_t incx, void* hostPtr, int64_t incy, cudaStream_t stream); + +cublasStatus_t CUBLASWINAPI +cublasSetMatrixAsync(int rows, int cols, int elemSize, const void* A, int lda, void* B, int ldb, cudaStream_t stream); + +cublasStatus_t CUBLASWINAPI cublasSetMatrixAsync_64(int64_t rows, + int64_t cols, + int64_t elemSize, + const void* A, + int64_t lda, + void* B, + int64_t ldb, + cudaStream_t stream); + +cublasStatus_t CUBLASWINAPI +cublasGetMatrixAsync(int rows, int cols, int elemSize, const void* A, int lda, void* B, int ldb, cudaStream_t stream); + +cublasStatus_t CUBLASWINAPI cublasGetMatrixAsync_64(int64_t rows, + int64_t cols, + int64_t elemSize, + const void* A, + int64_t lda, + void* B, + int64_t ldb, + cudaStream_t stream); + +CUBLASAPI void CUBLASWINAPI cublasXerbla(const char* srName, int info); + +/* --------------- CUBLAS BLAS1 Functions ---------------- */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasNrm2Ex(cublasHandle_t handle, + int n, + const void* x, + cudaDataType xType, + int incx, + void* result, + cudaDataType resultType, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasNrm2Ex_64(cublasHandle_t handle, + int64_t n, + const void* x, + cudaDataType xType, + int64_t incx, + void* result, + cudaDataType resultType, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSnrm2_v2(cublasHandle_t handle, int n, const float* x, int incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSnrm2_v2_64(cublasHandle_t handle, int64_t n, const float* x, int64_t incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDnrm2_v2(cublasHandle_t handle, int n, const double* x, int incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDnrm2_v2_64(cublasHandle_t handle, int64_t n, const double* x, int64_t incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasScnrm2_v2(cublasHandle_t handle, int n, const cuComplex* x, int incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasScnrm2_v2_64(cublasHandle_t handle, int64_t n, const cuComplex* x, int64_t incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDznrm2_v2(cublasHandle_t handle, int n, const cuDoubleComplex* x, int incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDznrm2_v2_64(cublasHandle_t handle, int64_t n, const cuDoubleComplex* x, int64_t incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDotEx(cublasHandle_t handle, + int n, + const void* x, + cudaDataType xType, + int incx, + const void* y, + cudaDataType yType, + int incy, + void* result, + cudaDataType resultType, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDotEx_64(cublasHandle_t handle, + int64_t n, + const void* x, + cudaDataType xType, + int64_t incx, + const void* y, + cudaDataType yType, + int64_t incy, + void* result, + cudaDataType resultType, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDotcEx(cublasHandle_t handle, + int n, + const void* x, + cudaDataType xType, + int incx, + const void* y, + cudaDataType yType, + int incy, + void* result, + cudaDataType resultType, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDotcEx_64(cublasHandle_t handle, + int64_t n, + const void* x, + cudaDataType xType, + int64_t incx, + const void* y, + cudaDataType yType, + int64_t incy, + void* result, + cudaDataType resultType, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSdot_v2(cublasHandle_t handle, int n, const float* x, int incx, const float* y, int incy, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSdot_v2_64( + cublasHandle_t handle, int64_t n, const float* x, int64_t incx, const float* y, int64_t incy, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDdot_v2(cublasHandle_t handle, int n, const double* x, int incx, const double* y, int incy, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDdot_v2_64( + cublasHandle_t handle, int64_t n, const double* x, int64_t incx, const double* y, int64_t incy, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCdotu_v2( + cublasHandle_t handle, int n, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCdotu_v2_64(cublasHandle_t handle, + int64_t n, + const cuComplex* x, + int64_t incx, + const cuComplex* y, + int64_t incy, + cuComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCdotc_v2( + cublasHandle_t handle, int n, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCdotc_v2_64(cublasHandle_t handle, + int64_t n, + const cuComplex* x, + int64_t incx, + const cuComplex* y, + int64_t incy, + cuComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdotu_v2(cublasHandle_t handle, + int n, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdotu_v2_64(cublasHandle_t handle, + int64_t n, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* y, + int64_t incy, + cuDoubleComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdotc_v2(cublasHandle_t handle, + int n, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdotc_v2_64(cublasHandle_t handle, + int64_t n, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* y, + int64_t incy, + cuDoubleComplex* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasScalEx(cublasHandle_t handle, + int n, + const void* alpha, + cudaDataType alphaType, + void* x, + cudaDataType xType, + int incx, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasScalEx_64(cublasHandle_t handle, + int64_t n, + const void* alpha, + cudaDataType alphaType, + void* x, + cudaDataType xType, + int64_t incx, + cudaDataType executionType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSscal_v2(cublasHandle_t handle, int n, const float* alpha, float* x, int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSscal_v2_64(cublasHandle_t handle, int64_t n, const float* alpha, float* x, int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDscal_v2(cublasHandle_t handle, int n, const double* alpha, double* x, int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDscal_v2_64(cublasHandle_t handle, int64_t n, const double* alpha, double* x, int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCscal_v2(cublasHandle_t handle, int n, const cuComplex* alpha, cuComplex* x, int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCscal_v2_64(cublasHandle_t handle, int64_t n, const cuComplex* alpha, cuComplex* x, int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCsscal_v2(cublasHandle_t handle, int n, const float* alpha, cuComplex* x, int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCsscal_v2_64(cublasHandle_t handle, int64_t n, const float* alpha, cuComplex* x, int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZscal_v2(cublasHandle_t handle, int n, const cuDoubleComplex* alpha, cuDoubleComplex* x, int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZscal_v2_64(cublasHandle_t handle, int64_t n, const cuDoubleComplex* alpha, cuDoubleComplex* x, int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZdscal_v2(cublasHandle_t handle, int n, const double* alpha, cuDoubleComplex* x, int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZdscal_v2_64(cublasHandle_t handle, int64_t n, const double* alpha, cuDoubleComplex* x, int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasAxpyEx(cublasHandle_t handle, + int n, + const void* alpha, + cudaDataType alphaType, + const void* x, + cudaDataType xType, + int incx, + void* y, + cudaDataType yType, + int incy, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasAxpyEx_64(cublasHandle_t handle, + int64_t n, + const void* alpha, + cudaDataType alphaType, + const void* x, + cudaDataType xType, + int64_t incx, + void* y, + cudaDataType yType, + int64_t incy, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSaxpy_v2(cublasHandle_t handle, int n, const float* alpha, const float* x, int incx, float* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSaxpy_v2_64( + cublasHandle_t handle, int64_t n, const float* alpha, const float* x, int64_t incx, float* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDaxpy_v2(cublasHandle_t handle, int n, const double* alpha, const double* x, int incx, double* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDaxpy_v2_64( + cublasHandle_t handle, int64_t n, const double* alpha, const double* x, int64_t incx, double* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCaxpy_v2( + cublasHandle_t handle, int n, const cuComplex* alpha, const cuComplex* x, int incx, cuComplex* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCaxpy_v2_64(cublasHandle_t handle, + int64_t n, + const cuComplex* alpha, + const cuComplex* x, + int64_t incx, + cuComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZaxpy_v2(cublasHandle_t handle, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZaxpy_v2_64(cublasHandle_t handle, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int64_t incx, + cuDoubleComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCopyEx( + cublasHandle_t handle, int n, const void* x, cudaDataType xType, int incx, void* y, cudaDataType yType, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCopyEx_64(cublasHandle_t handle, + int64_t n, + const void* x, + cudaDataType xType, + int64_t incx, + void* y, + cudaDataType yType, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasScopy_v2(cublasHandle_t handle, int n, const float* x, int incx, float* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasScopy_v2_64(cublasHandle_t handle, int64_t n, const float* x, int64_t incx, float* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDcopy_v2(cublasHandle_t handle, int n, const double* x, int incx, double* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDcopy_v2_64(cublasHandle_t handle, int64_t n, const double* x, int64_t incx, double* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCcopy_v2(cublasHandle_t handle, int n, const cuComplex* x, int incx, cuComplex* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCcopy_v2_64(cublasHandle_t handle, int64_t n, const cuComplex* x, int64_t incx, cuComplex* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZcopy_v2(cublasHandle_t handle, int n, const cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZcopy_v2_64( + cublasHandle_t handle, int64_t n, const cuDoubleComplex* x, int64_t incx, cuDoubleComplex* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSswap_v2(cublasHandle_t handle, int n, float* x, int incx, float* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSswap_v2_64(cublasHandle_t handle, int64_t n, float* x, int64_t incx, float* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDswap_v2(cublasHandle_t handle, int n, double* x, int incx, double* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDswap_v2_64(cublasHandle_t handle, int64_t n, double* x, int64_t incx, double* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCswap_v2(cublasHandle_t handle, int n, cuComplex* x, int incx, cuComplex* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCswap_v2_64(cublasHandle_t handle, int64_t n, cuComplex* x, int64_t incx, cuComplex* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZswap_v2(cublasHandle_t handle, int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZswap_v2_64(cublasHandle_t handle, int64_t n, cuDoubleComplex* x, int64_t incx, cuDoubleComplex* y, int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSwapEx( + cublasHandle_t handle, int n, void* x, cudaDataType xType, int incx, void* y, cudaDataType yType, int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSwapEx_64(cublasHandle_t handle, + int64_t n, + void* x, + cudaDataType xType, + int64_t incx, + void* y, + cudaDataType yType, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIsamax_v2(cublasHandle_t handle, int n, const float* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIsamax_v2_64(cublasHandle_t handle, int64_t n, const float* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIdamax_v2(cublasHandle_t handle, int n, const double* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIdamax_v2_64(cublasHandle_t handle, int64_t n, const double* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIcamax_v2(cublasHandle_t handle, int n, const cuComplex* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIcamax_v2_64(cublasHandle_t handle, int64_t n, const cuComplex* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIzamax_v2(cublasHandle_t handle, int n, const cuDoubleComplex* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIzamax_v2_64(cublasHandle_t handle, int64_t n, const cuDoubleComplex* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIamaxEx(cublasHandle_t handle, int n, const void* x, cudaDataType xType, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIamaxEx_64(cublasHandle_t handle, int64_t n, const void* x, cudaDataType xType, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIsamin_v2(cublasHandle_t handle, int n, const float* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIsamin_v2_64(cublasHandle_t handle, int64_t n, const float* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIdamin_v2(cublasHandle_t handle, int n, const double* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIdamin_v2_64(cublasHandle_t handle, int64_t n, const double* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIcamin_v2(cublasHandle_t handle, int n, const cuComplex* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIcamin_v2_64(cublasHandle_t handle, int64_t n, const cuComplex* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIzamin_v2(cublasHandle_t handle, int n, const cuDoubleComplex* x, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIzamin_v2_64(cublasHandle_t handle, int64_t n, const cuDoubleComplex* x, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIaminEx(cublasHandle_t handle, int n, const void* x, cudaDataType xType, int incx, int* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasIaminEx_64(cublasHandle_t handle, int64_t n, const void* x, cudaDataType xType, int64_t incx, int64_t* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasAsumEx(cublasHandle_t handle, + int n, + const void* x, + cudaDataType xType, + int incx, + void* result, + cudaDataType resultType, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasAsumEx_64(cublasHandle_t handle, + int64_t n, + const void* x, + cudaDataType xType, + int64_t incx, + void* result, + cudaDataType resultType, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSasum_v2(cublasHandle_t handle, int n, const float* x, int incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSasum_v2_64(cublasHandle_t handle, int64_t n, const float* x, int64_t incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDasum_v2(cublasHandle_t handle, int n, const double* x, int incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDasum_v2_64(cublasHandle_t handle, int64_t n, const double* x, int64_t incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasScasum_v2(cublasHandle_t handle, int n, const cuComplex* x, int incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasScasum_v2_64(cublasHandle_t handle, int64_t n, const cuComplex* x, int64_t incx, float* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDzasum_v2(cublasHandle_t handle, int n, const cuDoubleComplex* x, int incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDzasum_v2_64(cublasHandle_t handle, int64_t n, const cuDoubleComplex* x, int64_t incx, double* result); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSrot_v2(cublasHandle_t handle, int n, float* x, int incx, float* y, int incy, const float* c, const float* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSrot_v2_64( + cublasHandle_t handle, int64_t n, float* x, int64_t incx, float* y, int64_t incy, const float* c, const float* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDrot_v2(cublasHandle_t handle, int n, double* x, int incx, double* y, int incy, const double* c, const double* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDrot_v2_64(cublasHandle_t handle, + int64_t n, + double* x, + int64_t incx, + double* y, + int64_t incy, + const double* c, + const double* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCrot_v2( + cublasHandle_t handle, int n, cuComplex* x, int incx, cuComplex* y, int incy, const float* c, const cuComplex* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCrot_v2_64(cublasHandle_t handle, + int64_t n, + cuComplex* x, + int64_t incx, + cuComplex* y, + int64_t incy, + const float* c, + const cuComplex* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsrot_v2( + cublasHandle_t handle, int n, cuComplex* x, int incx, cuComplex* y, int incy, const float* c, const float* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsrot_v2_64(cublasHandle_t handle, + int64_t n, + cuComplex* x, + int64_t incx, + cuComplex* y, + int64_t incy, + const float* c, + const float* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZrot_v2(cublasHandle_t handle, + int n, + cuDoubleComplex* x, + int incx, + cuDoubleComplex* y, + int incy, + const double* c, + const cuDoubleComplex* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZrot_v2_64(cublasHandle_t handle, + int64_t n, + cuDoubleComplex* x, + int64_t incx, + cuDoubleComplex* y, + int64_t incy, + const double* c, + const cuDoubleComplex* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdrot_v2(cublasHandle_t handle, + int n, + cuDoubleComplex* x, + int incx, + cuDoubleComplex* y, + int incy, + const double* c, + const double* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdrot_v2_64(cublasHandle_t handle, + int64_t n, + cuDoubleComplex* x, + int64_t incx, + cuDoubleComplex* y, + int64_t incy, + const double* c, + const double* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasRotEx(cublasHandle_t handle, + int n, + void* x, + cudaDataType xType, + int incx, + void* y, + cudaDataType yType, + int incy, + const void* c, + const void* s, + cudaDataType csType, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasRotEx_64(cublasHandle_t handle, + int64_t n, + void* x, + cudaDataType xType, + int64_t incx, + void* y, + cudaDataType yType, + int64_t incy, + const void* c, + const void* s, + cudaDataType csType, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSrotg_v2(cublasHandle_t handle, float* a, float* b, float* c, float* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDrotg_v2(cublasHandle_t handle, double* a, double* b, double* c, double* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCrotg_v2(cublasHandle_t handle, cuComplex* a, cuComplex* b, float* c, cuComplex* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasZrotg_v2(cublasHandle_t handle, cuDoubleComplex* a, cuDoubleComplex* b, double* c, cuDoubleComplex* s); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasRotgEx(cublasHandle_t handle, + void* a, + void* b, + cudaDataType abType, + void* c, + void* s, + cudaDataType csType, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSrotm_v2(cublasHandle_t handle, int n, float* x, int incx, float* y, int incy, const float* param); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSrotm_v2_64(cublasHandle_t handle, int64_t n, float* x, int64_t incx, float* y, int64_t incy, const float* param); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDrotm_v2(cublasHandle_t handle, int n, double* x, int incx, double* y, int incy, const double* param); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDrotm_v2_64( + cublasHandle_t handle, int64_t n, double* x, int64_t incx, double* y, int64_t incy, const double* param); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasRotmEx(cublasHandle_t handle, + int n, + void* x, + cudaDataType xType, + int incx, + void* y, + cudaDataType yType, + int incy, + const void* param, + cudaDataType paramType, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasRotmEx_64(cublasHandle_t handle, + int64_t n, + void* x, + cudaDataType xType, + int64_t incx, + void* y, + cudaDataType yType, + int64_t incy, + const void* param, + cudaDataType paramType, + cudaDataType executiontype); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSrotmg_v2(cublasHandle_t handle, float* d1, float* d2, float* x1, const float* y1, float* param); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDrotmg_v2(cublasHandle_t handle, double* d1, double* d2, double* x1, const double* y1, double* param); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasRotmgEx(cublasHandle_t handle, + void* d1, + cudaDataType d1Type, + void* d2, + cudaDataType d2Type, + void* x1, + cudaDataType x1Type, + const void* y1, + cudaDataType y1Type, + void* param, + cudaDataType paramType, + cudaDataType executiontype); + +/* --------------- CUBLAS BLAS2 Functions ---------------- */ + +/* GEMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const float* A, + int lda, + const float* x, + int incx, + const float* beta, + float* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const float* A, + int64_t lda, + const float* x, + int64_t incx, + const float* beta, + float* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const double* alpha, + const double* A, + int lda, + const double* x, + int incx, + const double* beta, + double* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const double* alpha, + const double* A, + int64_t lda, + const double* x, + int64_t incx, + const double* beta, + double* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + const cuComplex* beta, + cuComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* x, + int64_t incx, + const cuComplex* beta, + cuComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int64_t incy); + +/* GBMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgbmv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int kl, + int ku, + const float* alpha, + const float* A, + int lda, + const float* x, + int incx, + const float* beta, + float* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgbmv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + int64_t kl, + int64_t ku, + const float* alpha, + const float* A, + int64_t lda, + const float* x, + int64_t incx, + const float* beta, + float* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgbmv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int kl, + int ku, + const double* alpha, + const double* A, + int lda, + const double* x, + int incx, + const double* beta, + double* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgbmv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + int64_t kl, + int64_t ku, + const double* alpha, + const double* A, + int64_t lda, + const double* x, + int64_t incx, + const double* beta, + double* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgbmv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int kl, + int ku, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + const cuComplex* beta, + cuComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgbmv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + int64_t kl, + int64_t ku, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* x, + int64_t incx, + const cuComplex* beta, + cuComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgbmv_v2(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int kl, + int ku, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgbmv_v2_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + int64_t kl, + int64_t ku, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int64_t incy); + +/* TRMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const float* A, + int lda, + float* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const float* A, + int64_t lda, + float* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const double* A, + int lda, + double* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const double* A, + int64_t lda, + double* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuComplex* A, + int lda, + cuComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuComplex* A, + int64_t lda, + cuComplex* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuDoubleComplex* A, + int64_t lda, + cuDoubleComplex* x, + int64_t incx); + +/* TBMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const float* A, + int lda, + float* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const float* A, + int64_t lda, + float* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const double* A, + int lda, + double* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const double* A, + int64_t lda, + double* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const cuComplex* A, + int lda, + cuComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const cuComplex* A, + int64_t lda, + cuComplex* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const cuDoubleComplex* A, + int64_t lda, + cuDoubleComplex* x, + int64_t incx); + +/* TPMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStpmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const float* AP, + float* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStpmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const float* AP, + float* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtpmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const double* AP, + double* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtpmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const double* AP, + double* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtpmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuComplex* AP, + cuComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtpmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuComplex* AP, + cuComplex* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtpmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuDoubleComplex* AP, + cuDoubleComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtpmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuDoubleComplex* AP, + cuDoubleComplex* x, + int64_t incx); + +/* TRSV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const float* A, + int lda, + float* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const float* A, + int64_t lda, + float* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const double* A, + int lda, + double* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const double* A, + int64_t lda, + double* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuComplex* A, + int lda, + cuComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuComplex* A, + int64_t lda, + cuComplex* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuDoubleComplex* A, + int64_t lda, + cuDoubleComplex* x, + int64_t incx); + +/* TPSV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStpsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const float* AP, + float* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStpsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const float* AP, + float* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtpsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const double* AP, + double* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtpsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const double* AP, + double* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtpsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuComplex* AP, + cuComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtpsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuComplex* AP, + cuComplex* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtpsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + const cuDoubleComplex* AP, + cuDoubleComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtpsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + const cuDoubleComplex* AP, + cuDoubleComplex* x, + int64_t incx); + +/* TBSV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStbsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const float* A, + int lda, + float* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStbsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const float* A, + int64_t lda, + float* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtbsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const double* A, + int lda, + double* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtbsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const double* A, + int64_t lda, + double* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtbsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const cuComplex* A, + int lda, + cuComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtbsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const cuComplex* A, + int64_t lda, + cuComplex* x, + int64_t incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtbsv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int n, + int k, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex* x, + int incx); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtbsv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t n, + int64_t k, + const cuDoubleComplex* A, + int64_t lda, + cuDoubleComplex* x, + int64_t incx); + +/* SYMV/HEMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsymv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const float* alpha, + const float* A, + int lda, + const float* x, + int incx, + const float* beta, + float* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsymv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const float* A, + int64_t lda, + const float* x, + int64_t incx, + const float* beta, + float* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsymv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const double* alpha, + const double* A, + int lda, + const double* x, + int incx, + const double* beta, + double* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsymv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const double* A, + int64_t lda, + const double* x, + int64_t incx, + const double* beta, + double* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsymv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + const cuComplex* beta, + cuComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsymv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* x, + int64_t incx, + const cuComplex* beta, + cuComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsymv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsymv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChemv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + const cuComplex* beta, + cuComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChemv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* x, + int64_t incx, + const cuComplex* beta, + cuComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhemv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhemv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int64_t incy); + +/* SBMV/HBMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + int k, + const float* alpha, + const float* A, + int lda, + const float* x, + int incx, + const float* beta, + float* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + int64_t k, + const float* alpha, + const float* A, + int64_t lda, + const float* x, + int64_t incx, + const float* beta, + float* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + int k, + const double* alpha, + const double* A, + int lda, + const double* x, + int incx, + const double* beta, + double* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + int64_t k, + const double* alpha, + const double* A, + int64_t lda, + const double* x, + int64_t incx, + const double* beta, + double* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + const cuComplex* beta, + cuComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* x, + int64_t incx, + const cuComplex* beta, + cuComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhbmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhbmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int64_t incy); + +/* SPMV/HPMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSspmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const float* alpha, + const float* AP, + const float* x, + int incx, + const float* beta, + float* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSspmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const float* AP, + const float* x, + int64_t incx, + const float* beta, + float* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDspmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const double* alpha, + const double* AP, + const double* x, + int incx, + const double* beta, + double* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDspmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const double* AP, + const double* x, + int64_t incx, + const double* beta, + double* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChpmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex* alpha, + const cuComplex* AP, + const cuComplex* x, + int incx, + const cuComplex* beta, + cuComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChpmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuComplex* alpha, + const cuComplex* AP, + const cuComplex* x, + int64_t incx, + const cuComplex* beta, + cuComplex* y, + int64_t incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhpmv_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* AP, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int incy); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhpmv_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* AP, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int64_t incy); + +/* GER */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSger_v2(cublasHandle_t handle, + int m, + int n, + const float* alpha, + const float* x, + int incx, + const float* y, + int incy, + float* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSger_v2_64(cublasHandle_t handle, + int64_t m, + int64_t n, + const float* alpha, + const float* x, + int64_t incx, + const float* y, + int64_t incy, + float* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDger_v2(cublasHandle_t handle, + int m, + int n, + const double* alpha, + const double* x, + int incx, + const double* y, + int incy, + double* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDger_v2_64(cublasHandle_t handle, + int64_t m, + int64_t n, + const double* alpha, + const double* x, + int64_t incx, + const double* y, + int64_t incy, + double* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgeru_v2(cublasHandle_t handle, + int m, + int n, + const cuComplex* alpha, + const cuComplex* x, + int incx, + const cuComplex* y, + int incy, + cuComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgeru_v2_64(cublasHandle_t handle, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* x, + int64_t incx, + const cuComplex* y, + int64_t incy, + cuComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgerc_v2(cublasHandle_t handle, + int m, + int n, + const cuComplex* alpha, + const cuComplex* x, + int incx, + const cuComplex* y, + int incy, + cuComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgerc_v2_64(cublasHandle_t handle, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* x, + int64_t incx, + const cuComplex* y, + int64_t incy, + cuComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgeru_v2(cublasHandle_t handle, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgeru_v2_64(cublasHandle_t handle, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* y, + int64_t incy, + cuDoubleComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgerc_v2(cublasHandle_t handle, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgerc_v2_64(cublasHandle_t handle, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* y, + int64_t incy, + cuDoubleComplex* A, + int64_t lda); + +/* SYR/HER */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyr_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const float* alpha, + const float* x, + int incx, + float* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const float* x, + int64_t incx, + float* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyr_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const double* alpha, + const double* x, + int incx, + double* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const double* x, + int64_t incx, + double* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyr_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex* alpha, + const cuComplex* x, + int incx, + cuComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuComplex* alpha, + const cuComplex* x, + int64_t incx, + cuComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyr_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int64_t incx, + cuDoubleComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCher_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const float* alpha, + const cuComplex* x, + int incx, + cuComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCher_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const cuComplex* x, + int64_t incx, + cuComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZher_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const double* alpha, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZher_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const cuDoubleComplex* x, + int64_t incx, + cuDoubleComplex* A, + int64_t lda); + +/* SPR/HPR */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSspr_v2( + cublasHandle_t handle, cublasFillMode_t uplo, int n, const float* alpha, const float* x, int incx, float* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSspr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const float* x, + int64_t incx, + float* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDspr_v2( + cublasHandle_t handle, cublasFillMode_t uplo, int n, const double* alpha, const double* x, int incx, double* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDspr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const double* x, + int64_t incx, + double* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChpr_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const float* alpha, + const cuComplex* x, + int incx, + cuComplex* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChpr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const cuComplex* x, + int64_t incx, + cuComplex* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhpr_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const double* alpha, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhpr_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const cuDoubleComplex* x, + int64_t incx, + cuDoubleComplex* AP); + +/* SYR2/HER2 */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const float* alpha, + const float* x, + int incx, + const float* y, + int incy, + float* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const float* x, + int64_t incx, + const float* y, + int64_t incy, + float* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const double* alpha, + const double* x, + int incx, + const double* y, + int incy, + double* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const double* x, + int64_t incx, + const double* y, + int64_t incy, + double* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex* alpha, + const cuComplex* x, + int incx, + const cuComplex* y, + int incy, + cuComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuComplex* alpha, + const cuComplex* x, + int64_t incx, + const cuComplex* y, + int64_t incy, + cuComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* y, + int64_t incy, + cuDoubleComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCher2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex* alpha, + const cuComplex* x, + int incx, + const cuComplex* y, + int incy, + cuComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCher2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuComplex* alpha, + const cuComplex* x, + int64_t incx, + const cuComplex* y, + int64_t incy, + cuComplex* A, + int64_t lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZher2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* A, + int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZher2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* y, + int64_t incy, + cuDoubleComplex* A, + int64_t lda); + +/* SPR2/HPR2 */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSspr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const float* alpha, + const float* x, + int incx, + const float* y, + int incy, + float* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSspr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const float* alpha, + const float* x, + int64_t incx, + const float* y, + int64_t incy, + float* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDspr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const double* alpha, + const double* x, + int incx, + const double* y, + int incy, + double* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDspr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const double* alpha, + const double* x, + int64_t incx, + const double* y, + int64_t incy, + double* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChpr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex* alpha, + const cuComplex* x, + int incx, + const cuComplex* y, + int incy, + cuComplex* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChpr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuComplex* alpha, + const cuComplex* x, + int64_t incx, + const cuComplex* y, + int64_t incy, + cuComplex* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhpr2_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int incx, + const cuDoubleComplex* y, + int incy, + cuDoubleComplex* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhpr2_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* x, + int64_t incx, + const cuDoubleComplex* y, + int64_t incy, + cuDoubleComplex* AP); + +/* BATCH GEMV */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const float* const Aarray[], + int lda, + const float* const xarray[], + int incx, + const float* beta, + float* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const float* const Aarray[], + int64_t lda, + const float* const xarray[], + int64_t incx, + const float* beta, + float* const yarray[], + int64_t incy, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const double* alpha, + const double* const Aarray[], + int lda, + const double* const xarray[], + int incx, + const double* beta, + double* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const double* alpha, + const double* const Aarray[], + int64_t lda, + const double* const xarray[], + int64_t incx, + const double* beta, + double* const yarray[], + int64_t incy, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const cuComplex* alpha, + const cuComplex* const Aarray[], + int lda, + const cuComplex* const xarray[], + int incx, + const cuComplex* beta, + cuComplex* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* const Aarray[], + int64_t lda, + const cuComplex* const xarray[], + int64_t incx, + const cuComplex* beta, + cuComplex* const yarray[], + int64_t incy, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* const Aarray[], + int lda, + const cuDoubleComplex* const xarray[], + int incx, + const cuDoubleComplex* beta, + cuDoubleComplex* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* const Aarray[], + int64_t lda, + const cuDoubleComplex* const xarray[], + int64_t incx, + const cuDoubleComplex* beta, + cuDoubleComplex* const yarray[], + int64_t incy, + int64_t batchCount); + +#if defined(__cplusplus) + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSHgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __half* const Aarray[], + int lda, + const __half* const xarray[], + int incx, + const float* beta, + __half* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSHgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __half* const Aarray[], + int64_t lda, + const __half* const xarray[], + int64_t incx, + const float* beta, + __half* const yarray[], + int64_t incy, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSSgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __half* const Aarray[], + int lda, + const __half* const xarray[], + int incx, + const float* beta, + float* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSSgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __half* const Aarray[], + int64_t lda, + const __half* const xarray[], + int64_t incx, + const float* beta, + float* const yarray[], + int64_t incy, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSTgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __nv_bfloat16* const Aarray[], + int lda, + const __nv_bfloat16* const xarray[], + int incx, + const float* beta, + __nv_bfloat16* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSTgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __nv_bfloat16* const Aarray[], + int64_t lda, + const __nv_bfloat16* const xarray[], + int64_t incx, + const float* beta, + __nv_bfloat16* const yarray[], + int64_t incy, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSSgemvBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __nv_bfloat16* const Aarray[], + int lda, + const __nv_bfloat16* const xarray[], + int incx, + const float* beta, + float* const yarray[], + int incy, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSSgemvBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __nv_bfloat16* const Aarray[], + int64_t lda, + const __nv_bfloat16* const xarray[], + int64_t incx, + const float* beta, + float* const yarray[], + int64_t incy, + int64_t batchCount); + +#endif + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const float* A, + int lda, + long long int strideA, + const float* x, + int incx, + long long int stridex, + const float* beta, + float* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const float* A, + int64_t lda, + long long int strideA, + const float* x, + int64_t incx, + long long int stridex, + const float* beta, + float* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const double* alpha, + const double* A, + int lda, + long long int strideA, + const double* x, + int incx, + long long int stridex, + const double* beta, + double* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const double* alpha, + const double* A, + int64_t lda, + long long int strideA, + const double* x, + int64_t incx, + long long int stridex, + const double* beta, + double* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + long long int strideA, + const cuComplex* x, + int incx, + long long int stridex, + const cuComplex* beta, + cuComplex* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + long long int strideA, + const cuComplex* x, + int64_t incx, + long long int stridex, + const cuComplex* beta, + cuComplex* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + long long int strideA, + const cuDoubleComplex* x, + int incx, + long long int stridex, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + long long int strideA, + const cuDoubleComplex* x, + int64_t incx, + long long int stridex, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +#if defined(__cplusplus) + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSHgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __half* A, + int lda, + long long int strideA, + const __half* x, + int incx, + long long int stridex, + const float* beta, + __half* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSHgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __half* A, + int64_t lda, + long long int strideA, + const __half* x, + int64_t incx, + long long int stridex, + const float* beta, + __half* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSSgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __half* A, + int lda, + long long int strideA, + const __half* x, + int incx, + long long int stridex, + const float* beta, + float* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHSSgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __half* A, + int64_t lda, + long long int strideA, + const __half* x, + int64_t incx, + long long int stridex, + const float* beta, + float* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSTgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __nv_bfloat16* A, + int lda, + long long int strideA, + const __nv_bfloat16* x, + int incx, + long long int stridex, + const float* beta, + __nv_bfloat16* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSTgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __nv_bfloat16* A, + int64_t lda, + long long int strideA, + const __nv_bfloat16* x, + int64_t incx, + long long int stridex, + const float* beta, + __nv_bfloat16* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSSgemvStridedBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + const float* alpha, + const __nv_bfloat16* A, + int lda, + long long int strideA, + const __nv_bfloat16* x, + int incx, + long long int stridex, + const float* beta, + float* y, + int incy, + long long int stridey, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasTSSgemvStridedBatched_64(cublasHandle_t handle, + cublasOperation_t trans, + int64_t m, + int64_t n, + const float* alpha, + const __nv_bfloat16* A, + int64_t lda, + long long int strideA, + const __nv_bfloat16* x, + int64_t incx, + long long int stridex, + const float* beta, + float* y, + int64_t incy, + long long int stridey, + int64_t batchCount); + +#endif + +/* ---------------- CUBLAS BLAS3 Functions ---------------- */ + +/* GEMM */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemm_v2(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + const float* A, + int lda, + const float* B, + int ldb, + const float* beta, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemm_v2_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const float* alpha, + const float* A, + int64_t lda, + const float* B, + int64_t ldb, + const float* beta, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemm_v2(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const double* alpha, + const double* A, + int lda, + const double* B, + int ldb, + const double* beta, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemm_v2_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const double* alpha, + const double* A, + int64_t lda, + const double* B, + int64_t ldb, + const double* beta, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm_v2(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const cuComplex* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm_v2_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const cuComplex* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3m(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const cuComplex* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3m_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const cuComplex* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3mEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int lda, + const void* B, + cudaDataType Btype, + int ldb, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3mEx_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const void* B, + cudaDataType Btype, + int64_t ldb, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemm_v2(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemm_v2_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemm3m(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemm3m_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc); + +#if defined(__cplusplus) + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHgemm(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const __half* alpha, + const __half* A, + int lda, + const __half* B, + int ldb, + const __half* beta, + __half* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHgemm_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const __half* alpha, + const __half* A, + int64_t lda, + const __half* B, + int64_t ldb, + const __half* beta, + __half* C, + int64_t ldc); + +#endif + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemmEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + const void* A, + cudaDataType Atype, + int lda, + const void* B, + cudaDataType Btype, + int ldb, + const float* beta, + void* C, + cudaDataType Ctype, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemmEx_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const float* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const void* B, + cudaDataType Btype, + int64_t ldb, + const float* beta, + void* C, + cudaDataType Ctype, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGemmEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const void* alpha, + const void* A, + cudaDataType Atype, + int lda, + const void* B, + cudaDataType Btype, + int ldb, + const void* beta, + void* C, + cudaDataType Ctype, + int ldc, + cublasComputeType_t computeType, + cublasGemmAlgo_t algo); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGemmEx_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const void* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const void* B, + cudaDataType Btype, + int64_t ldb, + const void* beta, + void* C, + cudaDataType Ctype, + int64_t ldc, + cublasComputeType_t computeType, + cublasGemmAlgo_t algo); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemmEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int lda, + const void* B, + cudaDataType Btype, + int ldb, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemmEx_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const void* B, + cudaDataType Btype, + int64_t ldb, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int64_t ldc); + +/* SYRK */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyrk_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const float* alpha, + const float* A, + int lda, + const float* beta, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyrk_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const float* alpha, + const float* A, + int64_t lda, + const float* beta, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyrk_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const double* alpha, + const double* A, + int lda, + const double* beta, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyrk_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const double* alpha, + const double* A, + int64_t lda, + const double* beta, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrk_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrk_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyrk_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyrk_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrkEx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int lda, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrkEx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrk3mEx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int lda, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrk3mEx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuComplex* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const cuComplex* beta, + void* C, + cudaDataType Ctype, + int64_t ldc); + +/* HERK */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherk_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const float* alpha, + const cuComplex* A, + int lda, + const float* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherk_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const float* alpha, + const cuComplex* A, + int64_t lda, + const float* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZherk_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const double* alpha, + const cuDoubleComplex* A, + int lda, + const double* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZherk_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const double* alpha, + const cuDoubleComplex* A, + int64_t lda, + const double* beta, + cuDoubleComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherkEx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const float* alpha, + const void* A, + cudaDataType Atype, + int lda, + const float* beta, + void* C, + cudaDataType Ctype, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherkEx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const float* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const float* beta, + void* C, + cudaDataType Ctype, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherk3mEx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const float* alpha, + const void* A, + cudaDataType Atype, + int lda, + const float* beta, + void* C, + cudaDataType Ctype, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherk3mEx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const float* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + const float* beta, + void* C, + cudaDataType Ctype, + int64_t ldc); + +/* SYR2K / HER2K */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyr2k_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const float* alpha, + const float* A, + int lda, + const float* B, + int ldb, + const float* beta, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyr2k_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const float* alpha, + const float* A, + int64_t lda, + const float* B, + int64_t ldb, + const float* beta, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyr2k_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const double* alpha, + const double* A, + int lda, + const double* B, + int ldb, + const double* beta, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyr2k_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const double* alpha, + const double* A, + int64_t lda, + const double* B, + int64_t ldb, + const double* beta, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyr2k_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const cuComplex* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyr2k_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const cuComplex* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyr2k_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyr2k_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCher2k_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const float* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCher2k_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const float* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZher2k_v2(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const double* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZher2k_v2_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const double* beta, + cuDoubleComplex* C, + int64_t ldc); + +/* SYRKX / HERKX */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyrkx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const float* alpha, + const float* A, + int lda, + const float* B, + int ldb, + const float* beta, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsyrkx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const float* alpha, + const float* A, + int64_t lda, + const float* B, + int64_t ldb, + const float* beta, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyrkx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const double* alpha, + const double* A, + int lda, + const double* B, + int ldb, + const double* beta, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsyrkx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const double* alpha, + const double* A, + int64_t lda, + const double* B, + int64_t ldb, + const double* beta, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrkx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const cuComplex* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsyrkx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const cuComplex* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyrkx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsyrkx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherkx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const float* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCherkx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const float* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZherkx(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const double* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZherkx_64(cublasHandle_t handle, + cublasFillMode_t uplo, + cublasOperation_t trans, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const double* beta, + cuDoubleComplex* C, + int64_t ldc); + +/* SYMM */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsymm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int m, + int n, + const float* alpha, + const float* A, + int lda, + const float* B, + int ldb, + const float* beta, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSsymm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int64_t m, + int64_t n, + const float* alpha, + const float* A, + int64_t lda, + const float* B, + int64_t ldb, + const float* beta, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsymm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int m, + int n, + const double* alpha, + const double* A, + int lda, + const double* B, + int ldb, + const double* beta, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDsymm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int64_t m, + int64_t n, + const double* alpha, + const double* A, + int64_t lda, + const double* B, + int64_t ldb, + const double* beta, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsymm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const cuComplex* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCsymm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const cuComplex* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsymm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZsymm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc); + +/* HEMM */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChemm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + const cuComplex* beta, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasChemm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + const cuComplex* beta, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhemm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZhemm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc); + +/* TRSM */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrsm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const float* alpha, + const float* A, + int lda, + float* B, + int ldb); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrsm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const float* alpha, + const float* A, + int64_t lda, + float* B, + int64_t ldb); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrsm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const double* alpha, + const double* A, + int lda, + double* B, + int ldb); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrsm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const double* alpha, + const double* A, + int64_t lda, + double* B, + int64_t ldb); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrsm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + cuComplex* B, + int ldb); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrsm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + cuComplex* B, + int64_t ldb); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrsm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + cuDoubleComplex* B, + int ldb); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrsm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + cuDoubleComplex* B, + int64_t ldb); + +/* TRMM */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrmm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const float* alpha, + const float* A, + int lda, + const float* B, + int ldb, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrmm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const float* alpha, + const float* A, + int64_t lda, + const float* B, + int64_t ldb, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrmm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const double* alpha, + const double* A, + int lda, + const double* B, + int ldb, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrmm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const double* alpha, + const double* A, + int64_t lda, + const double* B, + int64_t ldb, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrmm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* B, + int ldb, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrmm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* B, + int64_t ldb, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrmm_v2(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* B, + int ldb, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrmm_v2_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* B, + int64_t ldb, + cuDoubleComplex* C, + int64_t ldc); + +/* BATCH GEMM */ + +#if defined(__cplusplus) + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHgemmBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const __half* alpha, + const __half* const Aarray[], + int lda, + const __half* const Barray[], + int ldb, + const __half* beta, + __half* const Carray[], + int ldc, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHgemmBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const __half* alpha, + const __half* const Aarray[], + int64_t lda, + const __half* const Barray[], + int64_t ldb, + const __half* beta, + __half* const Carray[], + int64_t ldc, + int64_t batchCount); + +#endif + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemmBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + const float* const Aarray[], + int lda, + const float* const Barray[], + int ldb, + const float* beta, + float* const Carray[], + int ldc, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemmBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const float* alpha, + const float* const Aarray[], + int64_t lda, + const float* const Barray[], + int64_t ldb, + const float* beta, + float* const Carray[], + int64_t ldc, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemmBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const double* alpha, + const double* const Aarray[], + int lda, + const double* const Barray[], + int ldb, + const double* beta, + double* const Carray[], + int ldc, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemmBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const double* alpha, + const double* const Aarray[], + int64_t lda, + const double* const Barray[], + int64_t ldb, + const double* beta, + double* const Carray[], + int64_t ldc, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemmBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const cuComplex* const Aarray[], + int lda, + const cuComplex* const Barray[], + int ldb, + const cuComplex* beta, + cuComplex* const Carray[], + int ldc, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemmBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* const Aarray[], + int64_t lda, + const cuComplex* const Barray[], + int64_t ldb, + const cuComplex* beta, + cuComplex* const Carray[], + int64_t ldc, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3mBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const cuComplex* const Aarray[], + int lda, + const cuComplex* const Barray[], + int ldb, + const cuComplex* beta, + cuComplex* const Carray[], + int ldc, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3mBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* const Aarray[], + int64_t lda, + const cuComplex* const Barray[], + int64_t ldb, + const cuComplex* beta, + cuComplex* const Carray[], + int64_t ldc, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemmBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* const Aarray[], + int lda, + const cuDoubleComplex* const Barray[], + int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* const Carray[], + int ldc, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemmBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* const Aarray[], + int64_t lda, + const cuDoubleComplex* const Barray[], + int64_t ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* const Carray[], + int64_t ldc, + int64_t batchCount); + +#if defined(__cplusplus) + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHgemmStridedBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const __half* alpha, + const __half* A, + int lda, + long long int strideA, + const __half* B, + int ldb, + long long int strideB, + const __half* beta, + __half* C, + int ldc, + long long int strideC, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasHgemmStridedBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const __half* alpha, + const __half* A, + int64_t lda, + long long int strideA, + const __half* B, + int64_t ldb, + long long int strideB, + const __half* beta, + __half* C, + int64_t ldc, + long long int strideC, + int64_t batchCount); + +#endif + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemmStridedBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + const float* A, + int lda, + long long int strideA, + const float* B, + int ldb, + long long int strideB, + const float* beta, + float* C, + int ldc, + long long int strideC, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemmStridedBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const float* alpha, + const float* A, + int64_t lda, + long long int strideA, + const float* B, + int64_t ldb, + long long int strideB, + const float* beta, + float* C, + int64_t ldc, + long long int strideC, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemmStridedBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const double* alpha, + const double* A, + int lda, + long long int strideA, + const double* B, + int ldb, + long long int strideB, + const double* beta, + double* C, + int ldc, + long long int strideC, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemmStridedBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const double* alpha, + const double* A, + int64_t lda, + long long int strideA, + const double* B, + int64_t ldb, + long long int strideB, + const double* beta, + double* C, + int64_t ldc, + long long int strideC, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemmStridedBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + long long int strideA, + const cuComplex* B, + int ldb, + long long int strideB, + const cuComplex* beta, + cuComplex* C, + int ldc, + long long int strideC, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemmStridedBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + long long int strideA, + const cuComplex* B, + int64_t ldb, + long long int strideB, + const cuComplex* beta, + cuComplex* C, + int64_t ldc, + long long int strideC, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3mStridedBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuComplex* alpha, + const cuComplex* A, + int lda, + long long int strideA, + const cuComplex* B, + int ldb, + long long int strideB, + const cuComplex* beta, + cuComplex* C, + int ldc, + long long int strideC, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgemm3mStridedBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + long long int strideA, + const cuComplex* B, + int64_t ldb, + long long int strideB, + const cuComplex* beta, + cuComplex* C, + int64_t ldc, + long long int strideC, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemmStridedBatched(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + long long int strideA, + const cuDoubleComplex* B, + int ldb, + long long int strideB, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc, + long long int strideC, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgemmStridedBatched_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + long long int strideA, + const cuDoubleComplex* B, + int64_t ldb, + long long int strideB, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int64_t ldc, + long long int strideC, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGemmBatchedEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const void* alpha, + const void* const Aarray[], + cudaDataType Atype, + int lda, + const void* const Barray[], + cudaDataType Btype, + int ldb, + const void* beta, + void* const Carray[], + cudaDataType Ctype, + int ldc, + int batchCount, + cublasComputeType_t computeType, + cublasGemmAlgo_t algo); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGemmBatchedEx_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const void* alpha, + const void* const Aarray[], + cudaDataType Atype, + int64_t lda, + const void* const Barray[], + cudaDataType Btype, + int64_t ldb, + const void* beta, + void* const Carray[], + cudaDataType Ctype, + int64_t ldc, + int64_t batchCount, + cublasComputeType_t computeType, + cublasGemmAlgo_t algo); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGemmStridedBatchedEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const void* alpha, + const void* A, + cudaDataType Atype, + int lda, + long long int strideA, + const void* B, + cudaDataType Btype, + int ldb, + long long int strideB, + const void* beta, + void* C, + cudaDataType Ctype, + int ldc, + long long int strideC, + int batchCount, + cublasComputeType_t computeType, + cublasGemmAlgo_t algo); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGemmStridedBatchedEx_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + int64_t k, + const void* alpha, + const void* A, + cudaDataType Atype, + int64_t lda, + long long int strideA, + const void* B, + cudaDataType Btype, + int64_t ldb, + long long int strideB, + const void* beta, + void* C, + cudaDataType Ctype, + int64_t ldc, + long long int strideC, + int64_t batchCount, + cublasComputeType_t computeType, + cublasGemmAlgo_t algo); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemmGroupedBatched(cublasHandle_t handle, + const cublasOperation_t transa_array[], + const cublasOperation_t transb_array[], + const int m_array[], + const int n_array[], + const int k_array[], + const float alpha_array[], + const float* const Aarray[], + const int lda_array[], + const float* const Barray[], + const int ldb_array[], + const float beta_array[], + float* const Carray[], + const int ldc_array[], + int group_count, + const int group_size[]); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgemmGroupedBatched_64(cublasHandle_t handle, + const cublasOperation_t transa_array[], + const cublasOperation_t transb_array[], + const int64_t m_array[], + const int64_t n_array[], + const int64_t k_array[], + const float alpha_array[], + const float* const Aarray[], + const int64_t lda_array[], + const float* const Barray[], + const int64_t ldb_array[], + const float beta_array[], + float* const Carray[], + const int64_t ldc_array[], + int64_t group_count, + const int64_t group_size[]); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemmGroupedBatched(cublasHandle_t handle, + const cublasOperation_t transa_array[], + const cublasOperation_t transb_array[], + const int m_array[], + const int n_array[], + const int k_array[], + const double alpha_array[], + const double* const Aarray[], + const int lda_array[], + const double* const Barray[], + const int ldb_array[], + const double beta_array[], + double* const Carray[], + const int ldc_array[], + int group_count, + const int group_size[]); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgemmGroupedBatched_64(cublasHandle_t handle, + const cublasOperation_t transa_array[], + const cublasOperation_t transb_array[], + const int64_t m_array[], + const int64_t n_array[], + const int64_t k_array[], + const double alpha_array[], + const double* const Aarray[], + const int64_t lda_array[], + const double* const Barray[], + const int64_t ldb_array[], + const double beta_array[], + double* const Carray[], + const int64_t ldc_array[], + int64_t group_count, + const int64_t group_size[]); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGemmGroupedBatchedEx(cublasHandle_t handle, + const cublasOperation_t transa_array[], + const cublasOperation_t transb_array[], + const int m_array[], + const int n_array[], + const int k_array[], + const void* alpha_array, + const void* const Aarray[], + cudaDataType_t Atype, + const int lda_array[], + const void* const Barray[], + cudaDataType_t Btype, + const int ldb_array[], + const void* beta_array, + void* const Carray[], + cudaDataType_t Ctype, + const int ldc_array[], + int group_count, + const int group_size[], + cublasComputeType_t computeType); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasGemmGroupedBatchedEx_64(cublasHandle_t handle, + const cublasOperation_t transa_array[], + const cublasOperation_t transb_array[], + const int64_t m_array[], + const int64_t n_array[], + const int64_t k_array[], + const void* alpha_array, + const void* const Aarray[], + cudaDataType_t Atype, + const int64_t lda_array[], + const void* const Barray[], + cudaDataType_t Btype, + const int64_t ldb_array[], + const void* beta_array, + void* const Carray[], + cudaDataType_t Ctype, + const int64_t ldc_array[], + int64_t group_count, + const int64_t group_size[], + cublasComputeType_t computeType); + +/* ---------------- CUBLAS BLAS-like Extension ---------------- */ + +/* GEAM */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgeam(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + const float* alpha, + const float* A, + int lda, + const float* beta, + const float* B, + int ldb, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgeam_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + const float* alpha, + const float* A, + int64_t lda, + const float* beta, + const float* B, + int64_t ldb, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgeam(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + const double* alpha, + const double* A, + int lda, + const double* beta, + const double* B, + int ldb, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgeam_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + const double* alpha, + const double* A, + int64_t lda, + const double* beta, + const double* B, + int64_t ldb, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgeam(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + const cuComplex* beta, + const cuComplex* B, + int ldb, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgeam_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* A, + int64_t lda, + const cuComplex* beta, + const cuComplex* B, + int64_t ldb, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgeam(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* beta, + const cuDoubleComplex* B, + int ldb, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgeam_64(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* beta, + const cuDoubleComplex* B, + int64_t ldb, + cuDoubleComplex* C, + int64_t ldc); + +/* TRSM - Batched Triangular Solver */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrsmBatched(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const float* alpha, + const float* const A[], + int lda, + float* const B[], + int ldb, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasStrsmBatched_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const float* alpha, + const float* const A[], + int64_t lda, + float* const B[], + int64_t ldb, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrsmBatched(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const double* alpha, + const double* const A[], + int lda, + double* const B[], + int ldb, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDtrsmBatched_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const double* alpha, + const double* const A[], + int64_t lda, + double* const B[], + int64_t ldb, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrsmBatched(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const cuComplex* alpha, + const cuComplex* const A[], + int lda, + cuComplex* const B[], + int ldb, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCtrsmBatched_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const cuComplex* alpha, + const cuComplex* const A[], + int64_t lda, + cuComplex* const B[], + int64_t ldb, + int64_t batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrsmBatched(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* const A[], + int lda, + cuDoubleComplex* const B[], + int ldb, + int batchCount); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrsmBatched_64(cublasHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + cublasDiagType_t diag, + int64_t m, + int64_t n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* const A[], + int64_t lda, + cuDoubleComplex* const B[], + int64_t ldb, + int64_t batchCount); + +/* DGMM */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSdgmm(cublasHandle_t handle, + cublasSideMode_t mode, + int m, + int n, + const float* A, + int lda, + const float* x, + int incx, + float* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSdgmm_64(cublasHandle_t handle, + cublasSideMode_t mode, + int64_t m, + int64_t n, + const float* A, + int64_t lda, + const float* x, + int64_t incx, + float* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDdgmm(cublasHandle_t handle, + cublasSideMode_t mode, + int m, + int n, + const double* A, + int lda, + const double* x, + int incx, + double* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDdgmm_64(cublasHandle_t handle, + cublasSideMode_t mode, + int64_t m, + int64_t n, + const double* A, + int64_t lda, + const double* x, + int64_t incx, + double* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCdgmm(cublasHandle_t handle, + cublasSideMode_t mode, + int m, + int n, + const cuComplex* A, + int lda, + const cuComplex* x, + int incx, + cuComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCdgmm_64(cublasHandle_t handle, + cublasSideMode_t mode, + int64_t m, + int64_t n, + const cuComplex* A, + int64_t lda, + const cuComplex* x, + int64_t incx, + cuComplex* C, + int64_t ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdgmm(cublasHandle_t handle, + cublasSideMode_t mode, + int m, + int n, + const cuDoubleComplex* A, + int lda, + const cuDoubleComplex* x, + int incx, + cuDoubleComplex* C, + int ldc); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZdgmm_64(cublasHandle_t handle, + cublasSideMode_t mode, + int64_t m, + int64_t n, + const cuDoubleComplex* A, + int64_t lda, + const cuDoubleComplex* x, + int64_t incx, + cuDoubleComplex* C, + int64_t ldc); + +/* Batched - MATINV*/ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSmatinvBatched(cublasHandle_t handle, + int n, + const float* const A[], + int lda, + float* const Ainv[], + int lda_inv, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDmatinvBatched(cublasHandle_t handle, + int n, + const double* const A[], + int lda, + double* const Ainv[], + int lda_inv, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCmatinvBatched(cublasHandle_t handle, + int n, + const cuComplex* const A[], + int lda, + cuComplex* const Ainv[], + int lda_inv, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZmatinvBatched(cublasHandle_t handle, + int n, + const cuDoubleComplex* const A[], + int lda, + cuDoubleComplex* const Ainv[], + int lda_inv, + int* info, + int batchSize); + +/* Batch QR Factorization */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgeqrfBatched(cublasHandle_t handle, + int m, + int n, + float* const Aarray[], + int lda, + float* const TauArray[], + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgeqrfBatched(cublasHandle_t handle, + int m, + int n, + double* const Aarray[], + int lda, + double* const TauArray[], + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgeqrfBatched(cublasHandle_t handle, + int m, + int n, + cuComplex* const Aarray[], + int lda, + cuComplex* const TauArray[], + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgeqrfBatched(cublasHandle_t handle, + int m, + int n, + cuDoubleComplex* const Aarray[], + int lda, + cuDoubleComplex* const TauArray[], + int* info, + int batchSize); + +/* Least Square Min only m >= n and Non-transpose supported */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgelsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int nrhs, + float* const Aarray[], + int lda, + float* const Carray[], + int ldc, + int* info, + int* devInfoArray, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgelsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int nrhs, + double* const Aarray[], + int lda, + double* const Carray[], + int ldc, + int* info, + int* devInfoArray, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgelsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int nrhs, + cuComplex* const Aarray[], + int lda, + cuComplex* const Carray[], + int ldc, + int* info, + int* devInfoArray, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgelsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int m, + int n, + int nrhs, + cuDoubleComplex* const Aarray[], + int lda, + cuDoubleComplex* const Carray[], + int ldc, + int* info, + int* devInfoArray, + int batchSize); + +/* TPTTR : Triangular Pack format to Triangular format */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasStpttr(cublasHandle_t handle, cublasFillMode_t uplo, int n, const float* AP, float* A, int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDtpttr(cublasHandle_t handle, cublasFillMode_t uplo, int n, const double* AP, double* A, int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCtpttr(cublasHandle_t handle, cublasFillMode_t uplo, int n, const cuComplex* AP, cuComplex* A, int lda); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtpttr( + cublasHandle_t handle, cublasFillMode_t uplo, int n, const cuDoubleComplex* AP, cuDoubleComplex* A, int lda); + +/* TRTTP : Triangular format to Triangular Pack format */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasStrttp(cublasHandle_t handle, cublasFillMode_t uplo, int n, const float* A, int lda, float* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDtrttp(cublasHandle_t handle, cublasFillMode_t uplo, int n, const double* A, int lda, double* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCtrttp(cublasHandle_t handle, cublasFillMode_t uplo, int n, const cuComplex* A, int lda, cuComplex* AP); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZtrttp( + cublasHandle_t handle, cublasFillMode_t uplo, int n, const cuDoubleComplex* A, int lda, cuDoubleComplex* AP); + +/* Batched LU - GETRF*/ + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasSgetrfBatched(cublasHandle_t handle, int n, float* const A[], int lda, int* P, int* info, int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasDgetrfBatched(cublasHandle_t handle, int n, double* const A[], int lda, int* P, int* info, int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI +cublasCgetrfBatched(cublasHandle_t handle, int n, cuComplex* const A[], int lda, int* P, int* info, int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgetrfBatched( + cublasHandle_t handle, int n, cuDoubleComplex* const A[], int lda, int* P, int* info, int batchSize); + +/* Batched inversion based on LU factorization from getrf */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgetriBatched(cublasHandle_t handle, + int n, + const float* const A[], + int lda, + const int* P, + float* const C[], + int ldc, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgetriBatched(cublasHandle_t handle, + int n, + const double* const A[], + int lda, + const int* P, + double* const C[], + int ldc, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgetriBatched(cublasHandle_t handle, + int n, + const cuComplex* const A[], + int lda, + const int* P, + cuComplex* const C[], + int ldc, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgetriBatched(cublasHandle_t handle, + int n, + const cuDoubleComplex* const A[], + int lda, + const int* P, + cuDoubleComplex* const C[], + int ldc, + int* info, + int batchSize); + +/* Batched solver based on LU factorization from getrf */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasSgetrsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int n, + int nrhs, + const float* const Aarray[], + int lda, + const int* devIpiv, + float* const Barray[], + int ldb, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasDgetrsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int n, + int nrhs, + const double* const Aarray[], + int lda, + const int* devIpiv, + double* const Barray[], + int ldb, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasCgetrsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int n, + int nrhs, + const cuComplex* const Aarray[], + int lda, + const int* devIpiv, + cuComplex* const Barray[], + int ldb, + int* info, + int batchSize); + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasZgetrsBatched(cublasHandle_t handle, + cublasOperation_t trans, + int n, + int nrhs, + const cuDoubleComplex* const Aarray[], + int lda, + const int* devIpiv, + cuDoubleComplex* const Barray[], + int ldb, + int* info, + int batchSize); + +/* Deprecated */ + +CUBLASAPI cublasStatus_t CUBLASWINAPI cublasUint8gemmBias(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + cublasOperation_t transc, + int m, + int n, + int k, + const unsigned char* A, + int A_bias, + int lda, + const unsigned char* B, + int B_bias, + int ldb, + unsigned char* C, + int C_bias, + int ldc, + int C_mult, + int C_shift); + +/* }}} cuBLAS Exported API */ + +#if defined(__cplusplus) +} + +static inline cublasStatus_t cublasMigrateComputeType(cublasHandle_t handle, + cudaDataType_t dataType, + cublasComputeType_t* computeType) { + cublasMath_t mathMode = CUBLAS_DEFAULT_MATH; + cublasStatus_t status = CUBLAS_STATUS_SUCCESS; + + status = cublasGetMathMode(handle, &mathMode); + if (status != CUBLAS_STATUS_SUCCESS) { + return status; + } + + bool isPedantic = ((mathMode & 0xf) == CUBLAS_PEDANTIC_MATH); + + switch (dataType) { + case CUDA_R_32F: + case CUDA_C_32F: + *computeType = isPedantic ? CUBLAS_COMPUTE_32F_PEDANTIC : CUBLAS_COMPUTE_32F; + return CUBLAS_STATUS_SUCCESS; + case CUDA_R_64F: + case CUDA_C_64F: + *computeType = isPedantic ? CUBLAS_COMPUTE_64F_PEDANTIC : CUBLAS_COMPUTE_64F; + return CUBLAS_STATUS_SUCCESS; + case CUDA_R_16F: + *computeType = isPedantic ? CUBLAS_COMPUTE_16F_PEDANTIC : CUBLAS_COMPUTE_16F; + return CUBLAS_STATUS_SUCCESS; + case CUDA_R_32I: + *computeType = isPedantic ? CUBLAS_COMPUTE_32I_PEDANTIC : CUBLAS_COMPUTE_32I; + return CUBLAS_STATUS_SUCCESS; + default: + return CUBLAS_STATUS_NOT_SUPPORTED; + } +} +/* wrappers to accept old code with cudaDataType computeType when referenced from c++ code */ +static inline cublasStatus_t cublasGemmEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const void* alpha, /* host or device pointer */ + const void* A, + cudaDataType Atype, + int lda, + const void* B, + cudaDataType Btype, + int ldb, + const void* beta, /* host or device pointer */ + void* C, + cudaDataType Ctype, + int ldc, + cudaDataType computeType, + cublasGemmAlgo_t algo) { + cublasComputeType_t migratedComputeType = CUBLAS_COMPUTE_32F; + cublasStatus_t status = CUBLAS_STATUS_SUCCESS; + status = cublasMigrateComputeType(handle, computeType, &migratedComputeType); + if (status != CUBLAS_STATUS_SUCCESS) { + return status; + } + + return cublasGemmEx(handle, + transa, + transb, + m, + n, + k, + alpha, + A, + Atype, + lda, + B, + Btype, + ldb, + beta, + C, + Ctype, + ldc, + migratedComputeType, + algo); +} + +static inline cublasStatus_t cublasGemmBatchedEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const void* alpha, /* host or device pointer */ + const void* const Aarray[], + cudaDataType Atype, + int lda, + const void* const Barray[], + cudaDataType Btype, + int ldb, + const void* beta, /* host or device pointer */ + void* const Carray[], + cudaDataType Ctype, + int ldc, + int batchCount, + cudaDataType computeType, + cublasGemmAlgo_t algo) { + cublasComputeType_t migratedComputeType; + cublasStatus_t status; + status = cublasMigrateComputeType(handle, computeType, &migratedComputeType); + if (status != CUBLAS_STATUS_SUCCESS) { + return status; + } + + return cublasGemmBatchedEx(handle, + transa, + transb, + m, + n, + k, + alpha, + Aarray, + Atype, + lda, + Barray, + Btype, + ldb, + beta, + Carray, + Ctype, + ldc, + batchCount, + migratedComputeType, + algo); +} + +static inline cublasStatus_t cublasGemmStridedBatchedEx(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const void* alpha, /* host or device pointer */ + const void* A, + cudaDataType Atype, + int lda, + long long int strideA, /* purposely signed */ + const void* B, + cudaDataType Btype, + int ldb, + long long int strideB, + const void* beta, /* host or device pointer */ + void* C, + cudaDataType Ctype, + int ldc, + long long int strideC, + int batchCount, + cudaDataType computeType, + cublasGemmAlgo_t algo) { + cublasComputeType_t migratedComputeType; + cublasStatus_t status; + status = cublasMigrateComputeType(handle, computeType, &migratedComputeType); + if (status != CUBLAS_STATUS_SUCCESS) { + return status; + } + + return cublasGemmStridedBatchedEx(handle, + transa, + transb, + m, + n, + k, + alpha, + A, + Atype, + lda, + strideA, + B, + Btype, + ldb, + strideB, + beta, + C, + Ctype, + ldc, + strideC, + batchCount, + migratedComputeType, + algo); +} +#endif /* __cplusplus */ + +#endif /* !defined(CUBLAS_API_H_) */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/cublas_v2.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/cublas_v2.h new file mode 100644 index 0000000000000000000000000000000000000000..bd81a3b1d8e7e3d04d6c54f4c0640af7d8893eab --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/cublas_v2.h @@ -0,0 +1,478 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * This is the public header file for the new CUBLAS library API, it mapped the generic + * Cublas name functions to the actual _v2 implementations. + */ + +#if !defined(CUBLAS_V2_H_) +#define CUBLAS_V2_H_ + +#if defined(CUBLAS_H_) +#error "It is an error to include both cublas.h and cublas_v2.h" +#endif + +#undef CUBLASAPI +#ifdef __CUDACC__ +#define CUBLASAPI __host__ __device__ +#else +#define CUBLASAPI +#endif + +#include "cublas_api.h" + +#define cublasCreate cublasCreate_v2 +#define cublasDestroy cublasDestroy_v2 +#define cublasGetVersion cublasGetVersion_v2 +#define cublasSetWorkspace cublasSetWorkspace_v2 +#define cublasSetStream cublasSetStream_v2 +#define cublasGetStream cublasGetStream_v2 +#define cublasGetPointerMode cublasGetPointerMode_v2 +#define cublasSetPointerMode cublasSetPointerMode_v2 + +/* 32-bit integer */ + +/* Blas1 Routines */ + +#define cublasSnrm2 cublasSnrm2_v2 +#define cublasDnrm2 cublasDnrm2_v2 +#define cublasScnrm2 cublasScnrm2_v2 +#define cublasDznrm2 cublasDznrm2_v2 + +#define cublasSdot cublasSdot_v2 +#define cublasDdot cublasDdot_v2 +#define cublasCdotu cublasCdotu_v2 +#define cublasCdotc cublasCdotc_v2 +#define cublasZdotu cublasZdotu_v2 +#define cublasZdotc cublasZdotc_v2 + +#define cublasSscal cublasSscal_v2 +#define cublasDscal cublasDscal_v2 +#define cublasCscal cublasCscal_v2 +#define cublasCsscal cublasCsscal_v2 +#define cublasZscal cublasZscal_v2 +#define cublasZdscal cublasZdscal_v2 + +#define cublasSaxpy cublasSaxpy_v2 +#define cublasDaxpy cublasDaxpy_v2 +#define cublasCaxpy cublasCaxpy_v2 +#define cublasZaxpy cublasZaxpy_v2 + +#define cublasScopy cublasScopy_v2 +#define cublasDcopy cublasDcopy_v2 +#define cublasCcopy cublasCcopy_v2 +#define cublasZcopy cublasZcopy_v2 + +#define cublasSswap cublasSswap_v2 +#define cublasDswap cublasDswap_v2 +#define cublasCswap cublasCswap_v2 +#define cublasZswap cublasZswap_v2 + +#define cublasIsamax cublasIsamax_v2 +#define cublasIdamax cublasIdamax_v2 +#define cublasIcamax cublasIcamax_v2 +#define cublasIzamax cublasIzamax_v2 + +#define cublasIsamin cublasIsamin_v2 +#define cublasIdamin cublasIdamin_v2 +#define cublasIcamin cublasIcamin_v2 +#define cublasIzamin cublasIzamin_v2 + +#define cublasSasum cublasSasum_v2 +#define cublasDasum cublasDasum_v2 +#define cublasScasum cublasScasum_v2 +#define cublasDzasum cublasDzasum_v2 + +#define cublasSrot cublasSrot_v2 +#define cublasDrot cublasDrot_v2 +#define cublasCrot cublasCrot_v2 +#define cublasCsrot cublasCsrot_v2 +#define cublasZrot cublasZrot_v2 +#define cublasZdrot cublasZdrot_v2 + +#define cublasSrotg cublasSrotg_v2 +#define cublasDrotg cublasDrotg_v2 +#define cublasCrotg cublasCrotg_v2 +#define cublasZrotg cublasZrotg_v2 + +#define cublasSrotm cublasSrotm_v2 +#define cublasDrotm cublasDrotm_v2 + +#define cublasSrotmg cublasSrotmg_v2 +#define cublasDrotmg cublasDrotmg_v2 + +/* Blas2 Routines */ + +#define cublasSgemv cublasSgemv_v2 +#define cublasDgemv cublasDgemv_v2 +#define cublasCgemv cublasCgemv_v2 +#define cublasZgemv cublasZgemv_v2 + +#define cublasSgbmv cublasSgbmv_v2 +#define cublasDgbmv cublasDgbmv_v2 +#define cublasCgbmv cublasCgbmv_v2 +#define cublasZgbmv cublasZgbmv_v2 + +#define cublasStrmv cublasStrmv_v2 +#define cublasDtrmv cublasDtrmv_v2 +#define cublasCtrmv cublasCtrmv_v2 +#define cublasZtrmv cublasZtrmv_v2 + +#define cublasStbmv cublasStbmv_v2 +#define cublasDtbmv cublasDtbmv_v2 +#define cublasCtbmv cublasCtbmv_v2 +#define cublasZtbmv cublasZtbmv_v2 + +#define cublasStpmv cublasStpmv_v2 +#define cublasDtpmv cublasDtpmv_v2 +#define cublasCtpmv cublasCtpmv_v2 +#define cublasZtpmv cublasZtpmv_v2 + +#define cublasStrsv cublasStrsv_v2 +#define cublasDtrsv cublasDtrsv_v2 +#define cublasCtrsv cublasCtrsv_v2 +#define cublasZtrsv cublasZtrsv_v2 + +#define cublasStpsv cublasStpsv_v2 +#define cublasDtpsv cublasDtpsv_v2 +#define cublasCtpsv cublasCtpsv_v2 +#define cublasZtpsv cublasZtpsv_v2 + +#define cublasStbsv cublasStbsv_v2 +#define cublasDtbsv cublasDtbsv_v2 +#define cublasCtbsv cublasCtbsv_v2 +#define cublasZtbsv cublasZtbsv_v2 + +#define cublasSsymv cublasSsymv_v2 +#define cublasDsymv cublasDsymv_v2 +#define cublasCsymv cublasCsymv_v2 +#define cublasZsymv cublasZsymv_v2 +#define cublasChemv cublasChemv_v2 +#define cublasZhemv cublasZhemv_v2 + +#define cublasSsbmv cublasSsbmv_v2 +#define cublasDsbmv cublasDsbmv_v2 +#define cublasChbmv cublasChbmv_v2 +#define cublasZhbmv cublasZhbmv_v2 + +#define cublasSspmv cublasSspmv_v2 +#define cublasDspmv cublasDspmv_v2 +#define cublasChpmv cublasChpmv_v2 +#define cublasZhpmv cublasZhpmv_v2 + +#define cublasSger cublasSger_v2 +#define cublasDger cublasDger_v2 +#define cublasCgeru cublasCgeru_v2 +#define cublasCgerc cublasCgerc_v2 +#define cublasZgeru cublasZgeru_v2 +#define cublasZgerc cublasZgerc_v2 + +#define cublasSsyr cublasSsyr_v2 +#define cublasDsyr cublasDsyr_v2 +#define cublasCsyr cublasCsyr_v2 +#define cublasZsyr cublasZsyr_v2 +#define cublasCher cublasCher_v2 +#define cublasZher cublasZher_v2 + +#define cublasSspr cublasSspr_v2 +#define cublasDspr cublasDspr_v2 +#define cublasChpr cublasChpr_v2 +#define cublasZhpr cublasZhpr_v2 + +#define cublasSsyr2 cublasSsyr2_v2 +#define cublasDsyr2 cublasDsyr2_v2 +#define cublasCsyr2 cublasCsyr2_v2 +#define cublasZsyr2 cublasZsyr2_v2 +#define cublasCher2 cublasCher2_v2 +#define cublasZher2 cublasZher2_v2 + +#define cublasSspr2 cublasSspr2_v2 +#define cublasDspr2 cublasDspr2_v2 +#define cublasChpr2 cublasChpr2_v2 +#define cublasZhpr2 cublasZhpr2_v2 + +/* Blas3 Routines */ + +#define cublasSgemm cublasSgemm_v2 +#define cublasDgemm cublasDgemm_v2 +#define cublasCgemm cublasCgemm_v2 +#define cublasZgemm cublasZgemm_v2 + +#define cublasSsyrk cublasSsyrk_v2 +#define cublasDsyrk cublasDsyrk_v2 +#define cublasCsyrk cublasCsyrk_v2 +#define cublasZsyrk cublasZsyrk_v2 +#define cublasCherk cublasCherk_v2 +#define cublasZherk cublasZherk_v2 + +#define cublasSsyr2k cublasSsyr2k_v2 +#define cublasDsyr2k cublasDsyr2k_v2 +#define cublasCsyr2k cublasCsyr2k_v2 +#define cublasZsyr2k cublasZsyr2k_v2 +#define cublasCher2k cublasCher2k_v2 +#define cublasZher2k cublasZher2k_v2 + +#define cublasSsymm cublasSsymm_v2 +#define cublasDsymm cublasDsymm_v2 +#define cublasCsymm cublasCsymm_v2 +#define cublasZsymm cublasZsymm_v2 +#define cublasChemm cublasChemm_v2 +#define cublasZhemm cublasZhemm_v2 + +#define cublasStrsm cublasStrsm_v2 +#define cublasDtrsm cublasDtrsm_v2 +#define cublasCtrsm cublasCtrsm_v2 +#define cublasZtrsm cublasZtrsm_v2 + +#define cublasStrmm cublasStrmm_v2 +#define cublasDtrmm cublasDtrmm_v2 +#define cublasCtrmm cublasCtrmm_v2 +#define cublasZtrmm cublasZtrmm_v2 + +/* 64-bit integer */ + +/* Blas1 Routines */ + +#define cublasSnrm2_64 cublasSnrm2_v2_64 +#define cublasDnrm2_64 cublasDnrm2_v2_64 +#define cublasScnrm2_64 cublasScnrm2_v2_64 +#define cublasDznrm2_64 cublasDznrm2_v2_64 + +#define cublasSdot_64 cublasSdot_v2_64 +#define cublasDdot_64 cublasDdot_v2_64 +#define cublasCdotu_64 cublasCdotu_v2_64 +#define cublasCdotc_64 cublasCdotc_v2_64 +#define cublasZdotu_64 cublasZdotu_v2_64 +#define cublasZdotc_64 cublasZdotc_v2_64 + +#define cublasSscal_64 cublasSscal_v2_64 +#define cublasDscal_64 cublasDscal_v2_64 +#define cublasCscal_64 cublasCscal_v2_64 +#define cublasCsscal_64 cublasCsscal_v2_64 +#define cublasZscal_64 cublasZscal_v2_64 +#define cublasZdscal_64 cublasZdscal_v2_64 + +#define cublasSaxpy_64 cublasSaxpy_v2_64 +#define cublasDaxpy_64 cublasDaxpy_v2_64 +#define cublasCaxpy_64 cublasCaxpy_v2_64 +#define cublasZaxpy_64 cublasZaxpy_v2_64 + +#define cublasScopy_64 cublasScopy_v2_64 +#define cublasDcopy_64 cublasDcopy_v2_64 +#define cublasCcopy_64 cublasCcopy_v2_64 +#define cublasZcopy_64 cublasZcopy_v2_64 + +#define cublasSswap_64 cublasSswap_v2_64 +#define cublasDswap_64 cublasDswap_v2_64 +#define cublasCswap_64 cublasCswap_v2_64 +#define cublasZswap_64 cublasZswap_v2_64 + +#define cublasIsamax_64 cublasIsamax_v2_64 +#define cublasIdamax_64 cublasIdamax_v2_64 +#define cublasIcamax_64 cublasIcamax_v2_64 +#define cublasIzamax_64 cublasIzamax_v2_64 + +#define cublasIsamin_64 cublasIsamin_v2_64 +#define cublasIdamin_64 cublasIdamin_v2_64 +#define cublasIcamin_64 cublasIcamin_v2_64 +#define cublasIzamin_64 cublasIzamin_v2_64 + +#define cublasSasum_64 cublasSasum_v2_64 +#define cublasDasum_64 cublasDasum_v2_64 +#define cublasScasum_64 cublasScasum_v2_64 +#define cublasDzasum_64 cublasDzasum_v2_64 + +#define cublasSrot_64 cublasSrot_v2_64 +#define cublasDrot_64 cublasDrot_v2_64 +#define cublasCrot_64 cublasCrot_v2_64 +#define cublasCsrot_64 cublasCsrot_v2_64 +#define cublasZrot_64 cublasZrot_v2_64 +#define cublasZdrot_64 cublasZdrot_v2_64 + +#define cublasSrotg_64 cublasSrotg_v2_64 +#define cublasDrotg_64 cublasDrotg_v2_64 +#define cublasCrotg_64 cublasCrotg_v2_64 +#define cublasZrotg_64 cublasZrotg_v2_64 + +#define cublasSrotm_64 cublasSrotm_v2_64 +#define cublasDrotm_64 cublasDrotm_v2_64 + +#define cublasSrotmg_64 cublasSrotmg_v2_64 +#define cublasDrotmg_64 cublasDrotmg_v2_64 + +/* Blas2 Routines */ + +#define cublasSgemv_64 cublasSgemv_v2_64 +#define cublasDgemv_64 cublasDgemv_v2_64 +#define cublasCgemv_64 cublasCgemv_v2_64 +#define cublasZgemv_64 cublasZgemv_v2_64 + +#define cublasSgbmv_64 cublasSgbmv_v2_64 +#define cublasDgbmv_64 cublasDgbmv_v2_64 +#define cublasCgbmv_64 cublasCgbmv_v2_64 +#define cublasZgbmv_64 cublasZgbmv_v2_64 + +#define cublasStrmv_64 cublasStrmv_v2_64 +#define cublasDtrmv_64 cublasDtrmv_v2_64 +#define cublasCtrmv_64 cublasCtrmv_v2_64 +#define cublasZtrmv_64 cublasZtrmv_v2_64 + +#define cublasStbmv_64 cublasStbmv_v2_64 +#define cublasDtbmv_64 cublasDtbmv_v2_64 +#define cublasCtbmv_64 cublasCtbmv_v2_64 +#define cublasZtbmv_64 cublasZtbmv_v2_64 + +#define cublasStpmv_64 cublasStpmv_v2_64 +#define cublasDtpmv_64 cublasDtpmv_v2_64 +#define cublasCtpmv_64 cublasCtpmv_v2_64 +#define cublasZtpmv_64 cublasZtpmv_v2_64 + +#define cublasStrsv_64 cublasStrsv_v2_64 +#define cublasDtrsv_64 cublasDtrsv_v2_64 +#define cublasCtrsv_64 cublasCtrsv_v2_64 +#define cublasZtrsv_64 cublasZtrsv_v2_64 + +#define cublasStpsv_64 cublasStpsv_v2_64 +#define cublasDtpsv_64 cublasDtpsv_v2_64 +#define cublasCtpsv_64 cublasCtpsv_v2_64 +#define cublasZtpsv_64 cublasZtpsv_v2_64 + +#define cublasStbsv_64 cublasStbsv_v2_64 +#define cublasDtbsv_64 cublasDtbsv_v2_64 +#define cublasCtbsv_64 cublasCtbsv_v2_64 +#define cublasZtbsv_64 cublasZtbsv_v2_64 + +#define cublasSsymv_64 cublasSsymv_v2_64 +#define cublasDsymv_64 cublasDsymv_v2_64 +#define cublasCsymv_64 cublasCsymv_v2_64 +#define cublasZsymv_64 cublasZsymv_v2_64 +#define cublasChemv_64 cublasChemv_v2_64 +#define cublasZhemv_64 cublasZhemv_v2_64 + +#define cublasSsbmv_64 cublasSsbmv_v2_64 +#define cublasDsbmv_64 cublasDsbmv_v2_64 +#define cublasChbmv_64 cublasChbmv_v2_64 +#define cublasZhbmv_64 cublasZhbmv_v2_64 + +#define cublasSspmv_64 cublasSspmv_v2_64 +#define cublasDspmv_64 cublasDspmv_v2_64 +#define cublasChpmv_64 cublasChpmv_v2_64 +#define cublasZhpmv_64 cublasZhpmv_v2_64 + +#define cublasSger_64 cublasSger_v2_64 +#define cublasDger_64 cublasDger_v2_64 +#define cublasCgeru_64 cublasCgeru_v2_64 +#define cublasCgerc_64 cublasCgerc_v2_64 +#define cublasZgeru_64 cublasZgeru_v2_64 +#define cublasZgerc_64 cublasZgerc_v2_64 + +#define cublasSsyr_64 cublasSsyr_v2_64 +#define cublasDsyr_64 cublasDsyr_v2_64 +#define cublasCsyr_64 cublasCsyr_v2_64 +#define cublasZsyr_64 cublasZsyr_v2_64 +#define cublasCher_64 cublasCher_v2_64 +#define cublasZher_64 cublasZher_v2_64 + +#define cublasSspr_64 cublasSspr_v2_64 +#define cublasDspr_64 cublasDspr_v2_64 +#define cublasChpr_64 cublasChpr_v2_64 +#define cublasZhpr_64 cublasZhpr_v2_64 + +#define cublasSsyr2_64 cublasSsyr2_v2_64 +#define cublasDsyr2_64 cublasDsyr2_v2_64 +#define cublasCsyr2_64 cublasCsyr2_v2_64 +#define cublasZsyr2_64 cublasZsyr2_v2_64 +#define cublasCher2_64 cublasCher2_v2_64 +#define cublasZher2_64 cublasZher2_v2_64 + +#define cublasSspr2_64 cublasSspr2_v2_64 +#define cublasDspr2_64 cublasDspr2_v2_64 +#define cublasChpr2_64 cublasChpr2_v2_64 +#define cublasZhpr2_64 cublasZhpr2_v2_64 + +/* Blas3 Routines */ + +#define cublasSgemm_64 cublasSgemm_v2_64 +#define cublasDgemm_64 cublasDgemm_v2_64 +#define cublasCgemm_64 cublasCgemm_v2_64 +#define cublasZgemm_64 cublasZgemm_v2_64 + +#define cublasSsyrk_64 cublasSsyrk_v2_64 +#define cublasDsyrk_64 cublasDsyrk_v2_64 +#define cublasCsyrk_64 cublasCsyrk_v2_64 +#define cublasZsyrk_64 cublasZsyrk_v2_64 +#define cublasCherk_64 cublasCherk_v2_64 +#define cublasZherk_64 cublasZherk_v2_64 + +#define cublasSsyr2k_64 cublasSsyr2k_v2_64 +#define cublasDsyr2k_64 cublasDsyr2k_v2_64 +#define cublasCsyr2k_64 cublasCsyr2k_v2_64 +#define cublasZsyr2k_64 cublasZsyr2k_v2_64 +#define cublasCher2k_64 cublasCher2k_v2_64 +#define cublasZher2k_64 cublasZher2k_v2_64 + +#define cublasSsymm_64 cublasSsymm_v2_64 +#define cublasDsymm_64 cublasDsymm_v2_64 +#define cublasCsymm_64 cublasCsymm_v2_64 +#define cublasZsymm_64 cublasZsymm_v2_64 +#define cublasChemm_64 cublasChemm_v2_64 +#define cublasZhemm_64 cublasZhemm_v2_64 + +#define cublasStrsm_64 cublasStrsm_v2_64 +#define cublasDtrsm_64 cublasDtrsm_v2_64 +#define cublasCtrsm_64 cublasCtrsm_v2_64 +#define cublasZtrsm_64 cublasZtrsm_v2_64 + +#define cublasStrmm_64 cublasStrmm_v2_64 +#define cublasDtrmm_64 cublasDtrmm_v2_64 +#define cublasCtrmm_64 cublasCtrmm_v2_64 +#define cublasZtrmm_64 cublasZtrmm_v2_64 + +#endif /* !defined(CUBLAS_V2_H_) */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/nvblas.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/nvblas.h new file mode 100644 index 0000000000000000000000000000000000000000..29ea9153faf7b3e62a6d53c0be1980ae79c49f51 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/include/nvblas.h @@ -0,0 +1,824 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(NVBLAS_H_) +#define NVBLAS_H_ + +#include "driver_types.h" +#include "cuComplex.h" /* import complex data type */ + +#if defined(__cplusplus) +extern "C" { +#endif + +/* GEMM */ +void sgemm_(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const float* alpha, + const float* a, + const int* lda, + const float* b, + const int* ldb, + const float* beta, + float* c, + const int* ldc); + +void dgemm_(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const double* alpha, + const double* a, + const int* lda, + const double* b, + const int* ldb, + const double* beta, + double* c, + const int* ldc); + +void cgemm_(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zgemm_(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +void sgemm(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const float* alpha, + const float* a, + const int* lda, + const float* b, + const int* ldb, + const float* beta, + float* c, + const int* ldc); + +void dgemm(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const double* alpha, + const double* a, + const int* lda, + const double* b, + const int* ldb, + const double* beta, + double* c, + const int* ldc); + +void cgemm(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zgemm(const char* transa, + const char* transb, + const int* m, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +/* SYRK */ +void ssyrk_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const float* alpha, + const float* a, + const int* lda, + const float* beta, + float* c, + const int* ldc); + +void dsyrk_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const double* alpha, + const double* a, + const int* lda, + const double* beta, + double* c, + const int* ldc); + +void csyrk_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zsyrk_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +void ssyrk(const char* uplo, + const char* trans, + const int* n, + const int* k, + const float* alpha, + const float* a, + const int* lda, + const float* beta, + float* c, + const int* ldc); + +void dsyrk(const char* uplo, + const char* trans, + const int* n, + const int* k, + const double* alpha, + const double* a, + const int* lda, + const double* beta, + double* c, + const int* ldc); + +void csyrk(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zsyrk(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +/* HERK */ +void cherk_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const float* alpha, + const cuComplex* a, + const int* lda, + const float* beta, + cuComplex* c, + const int* ldc); + +void zherk_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const double* alpha, + const cuDoubleComplex* a, + const int* lda, + const double* beta, + cuDoubleComplex* c, + const int* ldc); + +void cherk(const char* uplo, + const char* trans, + const int* n, + const int* k, + const float* alpha, + const cuComplex* a, + const int* lda, + const float* beta, + cuComplex* c, + const int* ldc); + +void zherk(const char* uplo, + const char* trans, + const int* n, + const int* k, + const double* alpha, + const cuDoubleComplex* a, + const int* lda, + const double* beta, + cuDoubleComplex* c, + const int* ldc); + +/* TRSM */ +void strsm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const float* alpha, + const float* a, + const int* lda, + float* b, + const int* ldb); + +void dtrsm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const double* alpha, + const double* a, + const int* lda, + double* b, + const int* ldb); + +void ctrsm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + cuComplex* b, + const int* ldb); + +void ztrsm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + cuDoubleComplex* b, + const int* ldb); + +void strsm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const float* alpha, + const float* a, + const int* lda, + float* b, + const int* ldb); + +void dtrsm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const double* alpha, + const double* a, + const int* lda, + double* b, + const int* ldb); + +void ctrsm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + cuComplex* b, + const int* ldb); + +void ztrsm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + cuDoubleComplex* b, + const int* ldb); + +/* SYMM */ +void ssymm_(const char* side, + const char* uplo, + const int* m, + const int* n, + const float* alpha, + const float* a, + const int* lda, + const float* b, + const int* ldb, + const float* beta, + float* c, + const int* ldc); + +void dsymm_(const char* side, + const char* uplo, + const int* m, + const int* n, + const double* alpha, + const double* a, + const int* lda, + const double* b, + const int* ldb, + const double* beta, + double* c, + const int* ldc); + +void csymm_(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zsymm_(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +void ssymm(const char* side, + const char* uplo, + const int* m, + const int* n, + const float* alpha, + const float* a, + const int* lda, + const float* b, + const int* ldb, + const float* beta, + float* c, + const int* ldc); + +void dsymm(const char* side, + const char* uplo, + const int* m, + const int* n, + const double* alpha, + const double* a, + const int* lda, + const double* b, + const int* ldb, + const double* beta, + double* c, + const int* ldc); + +void csymm(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zsymm(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +/* HEMM */ +void chemm_(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zhemm_(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +/* HEMM with no underscore*/ +void chemm(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zhemm(const char* side, + const char* uplo, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +/* SYR2K */ +void ssyr2k_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const float* alpha, + const float* a, + const int* lda, + const float* b, + const int* ldb, + const float* beta, + float* c, + const int* ldc); + +void dsyr2k_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const double* alpha, + const double* a, + const int* lda, + const double* b, + const int* ldb, + const double* beta, + double* c, + const int* ldc); + +void csyr2k_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zsyr2k_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +/* SYR2K no_underscore*/ +void ssyr2k(const char* uplo, + const char* trans, + const int* n, + const int* k, + const float* alpha, + const float* a, + const int* lda, + const float* b, + const int* ldb, + const float* beta, + float* c, + const int* ldc); + +void dsyr2k(const char* uplo, + const char* trans, + const int* n, + const int* k, + const double* alpha, + const double* a, + const int* lda, + const double* b, + const int* ldb, + const double* beta, + double* c, + const int* ldc); + +void csyr2k(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const cuComplex* beta, + cuComplex* c, + const int* ldc); + +void zsyr2k(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* c, + const int* ldc); + +/* HERK */ +void cher2k_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const float* beta, + cuComplex* c, + const int* ldc); + +void zher2k_(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const double* beta, + cuDoubleComplex* c, + const int* ldc); + +/* HER2K with no underscore */ +void cher2k(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + const cuComplex* b, + const int* ldb, + const float* beta, + cuComplex* c, + const int* ldc); + +void zher2k(const char* uplo, + const char* trans, + const int* n, + const int* k, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + const cuDoubleComplex* b, + const int* ldb, + const double* beta, + cuDoubleComplex* c, + const int* ldc); + +/* TRMM */ +void strmm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const float* alpha, + const float* a, + const int* lda, + float* b, + const int* ldb); + +void dtrmm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const double* alpha, + const double* a, + const int* lda, + double* b, + const int* ldb); + +void ctrmm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + cuComplex* b, + const int* ldb); + +void ztrmm_(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + cuDoubleComplex* b, + const int* ldb); + +void strmm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const float* alpha, + const float* a, + const int* lda, + float* b, + const int* ldb); + +void dtrmm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const double* alpha, + const double* a, + const int* lda, + double* b, + const int* ldb); + +void ctrmm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuComplex* alpha, + const cuComplex* a, + const int* lda, + cuComplex* b, + const int* ldb); + +void ztrmm(const char* side, + const char* uplo, + const char* transa, + const char* diag, + const int* m, + const int* n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* a, + const int* lda, + cuDoubleComplex* b, + const int* ldb); + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#endif /* !defined(NVBLAS_H_) */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/__init__.py b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e6a230f2a3aafb60bc07d6da5f723d3dfefacbd Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/libcublas.so.12 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/libcublas.so.12 new file mode 100644 index 0000000000000000000000000000000000000000..37ae7f7100dcd51f8cf4a434df51bb157f6a7a76 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/libcublas.so.12 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9343511e1d2ed51e4d65fa94a25cb8f73b0ebc5ae5487da794e18e6950c98dc +size 108244960 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/libcublasLt.so.12 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/libcublasLt.so.12 new file mode 100644 index 0000000000000000000000000000000000000000..1c6666d3d1a474473f73dfe22fd8e12524aab640 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/libcublasLt.so.12 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4ddaed1410acc604815f0e84cdd6c4a70c61cfd2b95dc96360f0799169f0374 +size 491106832 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/libnvblas.so.12 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/libnvblas.so.12 new file mode 100644 index 0000000000000000000000000000000000000000..72b9c3dcec9e80733fc9f8ff87c0d67d757130db --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/libnvblas.so.12 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bd3351776383a299aeb96dc3aa218f764ad472a4b1972ec02b89c1a5d276b92 +size 757496 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1e7dd7c4919856fad82d72ce751f8a9ecdc57c0 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/Openacc/cupti_openacc.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/Openacc/cupti_openacc.h new file mode 100644 index 0000000000000000000000000000000000000000..b7ea50da7beb2187e77f7606dd70faed0e4b4add --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/Openacc/cupti_openacc.h @@ -0,0 +1,98 @@ +/* + * Copyright 2017 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#include + +#if !defined(_CUPTI_OPENACC_H_) +#define _CUPTI_OPENACC_H_ + +#ifndef CUPTIAPI +#ifdef _WIN32 +#define CUPTIAPI __stdcall +#else +#define CUPTIAPI +#endif +#endif + +#if defined(__LP64__) +#define CUPTILP64 1 +#elif defined(_WIN64) +#define CUPTILP64 1 +#else +#undef CUPTILP64 +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +/** + * \brief Initialize OpenACC support + * + * \param profRegister function of type acc_prof_reg as obtained from acc_register_library + * \param profUnregister function of type acc_prof_reg as obtained from acc_register_library + * \param profLookup function of type acc_prof_lookup as obtained from acc_register_library + */ +CUptiResult CUPTIAPI +cuptiOpenACCInitialize(void *profRegister, void *profUnregister, void *profLookup); + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /*_CUPTI_OPENACC_H_*/ + diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/Openmp/cupti_openmp.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/Openmp/cupti_openmp.h new file mode 100644 index 0000000000000000000000000000000000000000..303dd42878fb02774d872c197ccc27b17f2af69e --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/Openmp/cupti_openmp.h @@ -0,0 +1,100 @@ +/* + * Copyright 2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#include +#include "Openmp/omp-tools.h" + +#if !defined(_CUPTI_OPENMP_H_) +#define _CUPTI_OPENMP_H_ + +#ifndef CUPTIAPI +#ifdef _WIN32 +#define CUPTIAPI __stdcall +#else +#define CUPTIAPI +#endif +#endif + +#if defined(__LP64__) +#define CUPTILP64 1 +#elif defined(_WIN64) +#define CUPTILP64 1 +#else +#undef CUPTILP64 +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +/** + * \brief Initialize OPENMP support (deprecated, used before OpenMP 5.0) + * + */ +int CUPTIAPI cuptiOpenMpInitialize(ompt_function_lookup_t ompt_fn_lookup, const char *runtime_version, unsigned int ompt_version); + +/** + * \brief Initialize OPENMP support + * + */ +int CUPTIAPI cuptiOpenMpInitialize_v2(ompt_function_lookup_t lookup, int initial_device_num, ompt_data_t *tool_data); + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /*_CUPTI_OPENMP_H_*/ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/Openmp/omp-tools.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/Openmp/omp-tools.h new file mode 100644 index 0000000000000000000000000000000000000000..276967d07e8f8c0f7686e5b3b15151edf2415ae7 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/Openmp/omp-tools.h @@ -0,0 +1,1083 @@ +/* + * include/50/omp-tools.h.var + */ + +//===----------------------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.txt for details. +// +//===----------------------------------------------------------------------===// + +#ifndef __OMPT__ +#define __OMPT__ + +/***************************************************************************** + * system include files + *****************************************************************************/ + +#include +#include + +/***************************************************************************** + * iteration macros + *****************************************************************************/ + +#define FOREACH_OMPT_INQUIRY_FN(macro) \ + macro (ompt_enumerate_states) \ + macro (ompt_enumerate_mutex_impls) \ + \ + macro (ompt_set_callback) \ + macro (ompt_get_callback) \ + \ + macro (ompt_get_state) \ + \ + macro (ompt_get_parallel_info) \ + macro (ompt_get_task_info) \ + macro (ompt_get_task_memory) \ + macro (ompt_get_thread_data) \ + macro (ompt_get_unique_id) \ + macro (ompt_finalize_tool) \ + \ + macro(ompt_get_num_procs) \ + macro(ompt_get_num_places) \ + macro(ompt_get_place_proc_ids) \ + macro(ompt_get_place_num) \ + macro(ompt_get_partition_place_nums) \ + macro(ompt_get_proc_id) \ + \ + macro(ompt_get_target_info) \ + macro(ompt_get_num_devices) + +#define FOREACH_OMPT_STATE(macro) \ + \ + /* first available state */ \ + macro (ompt_state_undefined, 0x102) /* undefined thread state */ \ + \ + /* work states (0..15) */ \ + macro (ompt_state_work_serial, 0x000) /* working outside parallel */ \ + macro (ompt_state_work_parallel, 0x001) /* working within parallel */ \ + macro (ompt_state_work_reduction, 0x002) /* performing a reduction */ \ + \ + /* barrier wait states (16..31) */ \ + macro (ompt_state_wait_barrier, 0x010) /* waiting at a barrier */ \ + macro (ompt_state_wait_barrier_implicit_parallel, 0x011) \ + /* implicit barrier at the end of parallel region */\ + macro (ompt_state_wait_barrier_implicit_workshare, 0x012) \ + /* implicit barrier at the end of worksharing */ \ + macro (ompt_state_wait_barrier_implicit, 0x013) /* implicit barrier */ \ + macro (ompt_state_wait_barrier_explicit, 0x014) /* explicit barrier */ \ + \ + /* task wait states (32..63) */ \ + macro (ompt_state_wait_taskwait, 0x020) /* waiting at a taskwait */ \ + macro (ompt_state_wait_taskgroup, 0x021) /* waiting at a taskgroup */ \ + \ + /* mutex wait states (64..127) */ \ + macro (ompt_state_wait_mutex, 0x040) \ + macro (ompt_state_wait_lock, 0x041) /* waiting for lock */ \ + macro (ompt_state_wait_critical, 0x042) /* waiting for critical */ \ + macro (ompt_state_wait_atomic, 0x043) /* waiting for atomic */ \ + macro (ompt_state_wait_ordered, 0x044) /* waiting for ordered */ \ + \ + /* target wait states (128..255) */ \ + macro (ompt_state_wait_target, 0x080) /* waiting for target region */ \ + macro (ompt_state_wait_target_map, 0x081) /* waiting for target data mapping operation */ \ + macro (ompt_state_wait_target_update, 0x082) /* waiting for target update operation */ \ + \ + /* misc (256..511) */ \ + macro (ompt_state_idle, 0x100) /* waiting for work */ \ + macro (ompt_state_overhead, 0x101) /* overhead excluding wait states */ \ + \ + /* implementation-specific states (512..) */ + + +#define FOREACH_KMP_MUTEX_IMPL(macro) \ + macro (kmp_mutex_impl_none, 0) /* unknown implementation */ \ + macro (kmp_mutex_impl_spin, 1) /* based on spin */ \ + macro (kmp_mutex_impl_queuing, 2) /* based on some fair policy */ \ + macro (kmp_mutex_impl_speculative, 3) /* based on HW-supported speculation */ + +#define FOREACH_OMPT_EVENT(macro) \ + \ + /*--- Mandatory Events ---*/ \ + macro (ompt_callback_thread_begin, ompt_callback_thread_begin_t, 1) /* thread begin */ \ + macro (ompt_callback_thread_end, ompt_callback_thread_end_t, 2) /* thread end */ \ + \ + macro (ompt_callback_parallel_begin, ompt_callback_parallel_begin_t, 3) /* parallel begin */ \ + macro (ompt_callback_parallel_end, ompt_callback_parallel_end_t, 4) /* parallel end */ \ + \ + macro (ompt_callback_task_create, ompt_callback_task_create_t, 5) /* task begin */ \ + macro (ompt_callback_task_schedule, ompt_callback_task_schedule_t, 6) /* task schedule */ \ + macro (ompt_callback_implicit_task, ompt_callback_implicit_task_t, 7) /* implicit task */ \ + \ + macro (ompt_callback_target, ompt_callback_target_t, 8) /* target */ \ + macro (ompt_callback_target_data_op, ompt_callback_target_data_op_t, 9) /* target data op */ \ + macro (ompt_callback_target_submit, ompt_callback_target_submit_t, 10) /* target submit */ \ + \ + macro (ompt_callback_control_tool, ompt_callback_control_tool_t, 11) /* control tool */ \ + \ + macro (ompt_callback_device_initialize, ompt_callback_device_initialize_t, 12) /* device initialize */ \ + macro (ompt_callback_device_finalize, ompt_callback_device_finalize_t, 13) /* device finalize */ \ + \ + macro (ompt_callback_device_load, ompt_callback_device_load_t, 14) /* device load */ \ + macro (ompt_callback_device_unload, ompt_callback_device_unload_t, 15) /* device unload */ \ + \ + /* Optional Events */ \ + macro (ompt_callback_sync_region_wait, ompt_callback_sync_region_t, 16) /* sync region wait begin or end */ \ + \ + macro (ompt_callback_mutex_released, ompt_callback_mutex_t, 17) /* mutex released */ \ + \ + macro (ompt_callback_dependences, ompt_callback_dependences_t, 18) /* report task dependences */ \ + macro (ompt_callback_task_dependence, ompt_callback_task_dependence_t, 19) /* report task dependence */ \ + \ + macro (ompt_callback_work, ompt_callback_work_t, 20) /* task at work begin or end */ \ + \ + macro (ompt_callback_master, ompt_callback_master_t, 21) /* task at master begin or end */ \ + \ + macro (ompt_callback_target_map, ompt_callback_target_map_t, 22) /* target map */ \ + \ + macro (ompt_callback_sync_region, ompt_callback_sync_region_t, 23) /* sync region begin or end */ \ + \ + macro (ompt_callback_lock_init, ompt_callback_mutex_acquire_t, 24) /* lock init */ \ + macro (ompt_callback_lock_destroy, ompt_callback_mutex_t, 25) /* lock destroy */ \ + \ + macro (ompt_callback_mutex_acquire, ompt_callback_mutex_acquire_t, 26) /* mutex acquire */ \ + macro (ompt_callback_mutex_acquired, ompt_callback_mutex_t, 27) /* mutex acquired */ \ + \ + macro (ompt_callback_nest_lock, ompt_callback_nest_lock_t, 28) /* nest lock */ \ + \ + macro (ompt_callback_flush, ompt_callback_flush_t, 29) /* after executing flush */ \ + \ + macro (ompt_callback_cancel, ompt_callback_cancel_t, 30) /* cancel innermost binding region */ \ + \ + macro (ompt_callback_reduction, ompt_callback_sync_region_t, 31) /* reduction */ \ + \ + macro (ompt_callback_dispatch, ompt_callback_dispatch_t, 32) /* dispatch of work */ + +/***************************************************************************** + * implementation specific types + *****************************************************************************/ + +typedef enum kmp_mutex_impl_t { +#define kmp_mutex_impl_macro(impl, code) impl = code, + FOREACH_KMP_MUTEX_IMPL(kmp_mutex_impl_macro) +#undef kmp_mutex_impl_macro +} kmp_mutex_impl_t; + +/***************************************************************************** + * definitions generated from spec + *****************************************************************************/ + +typedef enum ompt_callbacks_t { + ompt_callback_thread_begin = 1, + ompt_callback_thread_end = 2, + ompt_callback_parallel_begin = 3, + ompt_callback_parallel_end = 4, + ompt_callback_task_create = 5, + ompt_callback_task_schedule = 6, + ompt_callback_implicit_task = 7, + ompt_callback_target = 8, + ompt_callback_target_data_op = 9, + ompt_callback_target_submit = 10, + ompt_callback_control_tool = 11, + ompt_callback_device_initialize = 12, + ompt_callback_device_finalize = 13, + ompt_callback_device_load = 14, + ompt_callback_device_unload = 15, + ompt_callback_sync_region_wait = 16, + ompt_callback_mutex_released = 17, + ompt_callback_dependences = 18, + ompt_callback_task_dependence = 19, + ompt_callback_work = 20, + ompt_callback_master = 21, + ompt_callback_target_map = 22, + ompt_callback_sync_region = 23, + ompt_callback_lock_init = 24, + ompt_callback_lock_destroy = 25, + ompt_callback_mutex_acquire = 26, + ompt_callback_mutex_acquired = 27, + ompt_callback_nest_lock = 28, + ompt_callback_flush = 29, + ompt_callback_cancel = 30, + ompt_callback_reduction = 31, + ompt_callback_dispatch = 32 +} ompt_callbacks_t; + +typedef enum ompt_record_t { + ompt_record_ompt = 1, + ompt_record_native = 2, + ompt_record_invalid = 3 +} ompt_record_t; + +typedef enum ompt_record_native_t { + ompt_record_native_info = 1, + ompt_record_native_event = 2 +} ompt_record_native_t; + +typedef enum ompt_set_result_t { + ompt_set_error = 0, + ompt_set_never = 1, + ompt_set_impossible = 2, + ompt_set_sometimes = 3, + ompt_set_sometimes_paired = 4, + ompt_set_always = 5 +} ompt_set_result_t; + +typedef uint64_t ompt_id_t; + +typedef uint64_t ompt_device_time_t; + +typedef uint64_t ompt_buffer_cursor_t; + +typedef enum ompt_thread_t { + ompt_thread_initial = 1, + ompt_thread_worker = 2, + ompt_thread_other = 3, + ompt_thread_unknown = 4 +} ompt_thread_t; + +typedef enum ompt_scope_endpoint_t { + ompt_scope_begin = 1, + ompt_scope_end = 2 +} ompt_scope_endpoint_t; + +typedef enum ompt_dispatch_t { + ompt_dispatch_iteration = 1, + ompt_dispatch_section = 2 +} ompt_dispatch_t; + +typedef enum ompt_sync_region_t { + ompt_sync_region_barrier = 1, + ompt_sync_region_barrier_implicit = 2, + ompt_sync_region_barrier_explicit = 3, + ompt_sync_region_barrier_implementation = 4, + ompt_sync_region_taskwait = 5, + ompt_sync_region_taskgroup = 6, + ompt_sync_region_reduction = 7 +} ompt_sync_region_t; + +typedef enum ompt_target_data_op_t { + ompt_target_data_alloc = 1, + ompt_target_data_transfer_to_device = 2, + ompt_target_data_transfer_from_device = 3, + ompt_target_data_delete = 4, + ompt_target_data_associate = 5, + ompt_target_data_disassociate = 6 +} ompt_target_data_op_t; + +typedef enum ompt_work_t { + ompt_work_loop = 1, + ompt_work_sections = 2, + ompt_work_single_executor = 3, + ompt_work_single_other = 4, + ompt_work_workshare = 5, + ompt_work_distribute = 6, + ompt_work_taskloop = 7 +} ompt_work_t; + +typedef enum ompt_mutex_t { + ompt_mutex_lock = 1, + ompt_mutex_test_lock = 2, + ompt_mutex_nest_lock = 3, + ompt_mutex_test_nest_lock = 4, + ompt_mutex_critical = 5, + ompt_mutex_atomic = 6, + ompt_mutex_ordered = 7 +} ompt_mutex_t; + +typedef enum ompt_native_mon_flag_t { + ompt_native_data_motion_explicit = 0x01, + ompt_native_data_motion_implicit = 0x02, + ompt_native_kernel_invocation = 0x04, + ompt_native_kernel_execution = 0x08, + ompt_native_driver = 0x10, + ompt_native_runtime = 0x20, + ompt_native_overhead = 0x40, + ompt_native_idleness = 0x80 +} ompt_native_mon_flag_t; + +typedef enum ompt_task_flag_t { + ompt_task_initial = 0x00000001, + ompt_task_implicit = 0x00000002, + ompt_task_explicit = 0x00000004, + ompt_task_target = 0x00000008, + ompt_task_undeferred = 0x08000000, + ompt_task_untied = 0x10000000, + ompt_task_final = 0x20000000, + ompt_task_mergeable = 0x40000000, + ompt_task_merged = 0x80000000 +} ompt_task_flag_t; + +typedef enum ompt_task_status_t { + ompt_task_complete = 1, + ompt_task_yield = 2, + ompt_task_cancel = 3, + ompt_task_detach = 4, + ompt_task_early_fulfill = 5, + ompt_task_late_fulfill = 6, + ompt_task_switch = 7 +} ompt_task_status_t; + +typedef enum ompt_target_t { + ompt_target = 1, + ompt_target_enter_data = 2, + ompt_target_exit_data = 3, + ompt_target_update = 4 +} ompt_target_t; + +typedef enum ompt_parallel_flag_t { + ompt_parallel_invoker_program = 0x00000001, + ompt_parallel_invoker_runtime = 0x00000002, + ompt_parallel_league = 0x40000000, + ompt_parallel_team = 0x80000000 +} ompt_parallel_flag_t; + +typedef enum ompt_target_map_flag_t { + ompt_target_map_flag_to = 0x01, + ompt_target_map_flag_from = 0x02, + ompt_target_map_flag_alloc = 0x04, + ompt_target_map_flag_release = 0x08, + ompt_target_map_flag_delete = 0x10, + ompt_target_map_flag_implicit = 0x20 +} ompt_target_map_flag_t; + +typedef enum ompt_dependence_type_t { + ompt_dependence_type_in = 1, + ompt_dependence_type_out = 2, + ompt_dependence_type_inout = 3, + ompt_dependence_type_mutexinoutset = 4, + ompt_dependence_type_source = 5, + ompt_dependence_type_sink = 6 +} ompt_dependence_type_t; + +typedef enum ompt_cancel_flag_t { + ompt_cancel_parallel = 0x01, + ompt_cancel_sections = 0x02, + ompt_cancel_loop = 0x04, + ompt_cancel_taskgroup = 0x08, + ompt_cancel_activated = 0x10, + ompt_cancel_detected = 0x20, + ompt_cancel_discarded_task = 0x40 +} ompt_cancel_flag_t; + +typedef uint64_t ompt_hwid_t; + +typedef uint64_t ompt_wait_id_t; + +typedef enum ompt_frame_flag_t { + ompt_frame_runtime = 0x00, + ompt_frame_application = 0x01, + ompt_frame_cfa = 0x10, + ompt_frame_framepointer = 0x20, + ompt_frame_stackaddress = 0x30 +} ompt_frame_flag_t; + +typedef enum ompt_state_t { + ompt_state_work_serial = 0x000, + ompt_state_work_parallel = 0x001, + ompt_state_work_reduction = 0x002, + + ompt_state_wait_barrier = 0x010, + ompt_state_wait_barrier_implicit_parallel = 0x011, + ompt_state_wait_barrier_implicit_workshare = 0x012, + ompt_state_wait_barrier_implicit = 0x013, + ompt_state_wait_barrier_explicit = 0x014, + + ompt_state_wait_taskwait = 0x020, + ompt_state_wait_taskgroup = 0x021, + + ompt_state_wait_mutex = 0x040, + ompt_state_wait_lock = 0x041, + ompt_state_wait_critical = 0x042, + ompt_state_wait_atomic = 0x043, + ompt_state_wait_ordered = 0x044, + + ompt_state_wait_target = 0x080, + ompt_state_wait_target_map = 0x081, + ompt_state_wait_target_update = 0x082, + + ompt_state_idle = 0x100, + ompt_state_overhead = 0x101, + ompt_state_undefined = 0x102 +} ompt_state_t; + +typedef uint64_t (*ompt_get_unique_id_t) (void); + +typedef uint64_t ompd_size_t; + +typedef uint64_t ompd_wait_id_t; + +typedef uint64_t ompd_addr_t; +typedef int64_t ompd_word_t; +typedef uint64_t ompd_seg_t; + +typedef uint64_t ompd_device_t; + +typedef uint64_t ompd_thread_id_t; + +typedef enum ompd_scope_t { + ompd_scope_global = 1, + ompd_scope_address_space = 2, + ompd_scope_thread = 3, + ompd_scope_parallel = 4, + ompd_scope_implicit_task = 5, + ompd_scope_task = 6 +} ompd_scope_t; + +typedef uint64_t ompd_icv_id_t; + +typedef enum ompd_rc_t { + ompd_rc_ok = 0, + ompd_rc_unavailable = 1, + ompd_rc_stale_handle = 2, + ompd_rc_bad_input = 3, + ompd_rc_error = 4, + ompd_rc_unsupported = 5, + ompd_rc_needs_state_tracking = 6, + ompd_rc_incompatible = 7, + ompd_rc_device_read_error = 8, + ompd_rc_device_write_error = 9, + ompd_rc_nomem = 10, +} ompd_rc_t; + +typedef void (*ompt_interface_fn_t) (void); + +typedef ompt_interface_fn_t (*ompt_function_lookup_t) ( + const char *interface_function_name +); + +typedef union ompt_data_t { + uint64_t value; + void *ptr; +} ompt_data_t; + +typedef struct ompt_frame_t { + ompt_data_t exit_frame; + ompt_data_t enter_frame; + int exit_frame_flags; + int enter_frame_flags; +} ompt_frame_t; + +typedef void (*ompt_callback_t) (void); + +typedef void ompt_device_t; + +typedef void ompt_buffer_t; + +typedef void (*ompt_callback_buffer_request_t) ( + int device_num, + ompt_buffer_t **buffer, + size_t *bytes +); + +typedef void (*ompt_callback_buffer_complete_t) ( + int device_num, + ompt_buffer_t *buffer, + size_t bytes, + ompt_buffer_cursor_t begin, + int buffer_owned +); + +typedef void (*ompt_finalize_t) ( + ompt_data_t *tool_data +); + +typedef int (*ompt_initialize_t) ( + ompt_function_lookup_t lookup, + int initial_device_num, + ompt_data_t *tool_data +); + +typedef struct ompt_start_tool_result_t { + ompt_initialize_t initialize; + ompt_finalize_t finalize; + ompt_data_t tool_data; +} ompt_start_tool_result_t; + +typedef struct ompt_record_abstract_t { + ompt_record_native_t rclass; + const char *type; + ompt_device_time_t start_time; + ompt_device_time_t end_time; + ompt_hwid_t hwid; +} ompt_record_abstract_t; + +typedef struct ompt_dependence_t { + ompt_data_t variable; + ompt_dependence_type_t dependence_type; +} ompt_dependence_t; + +typedef int (*ompt_enumerate_states_t) ( + int current_state, + int *next_state, + const char **next_state_name +); + +typedef int (*ompt_enumerate_mutex_impls_t) ( + int current_impl, + int *next_impl, + const char **next_impl_name +); + +typedef ompt_set_result_t (*ompt_set_callback_t) ( + ompt_callbacks_t event, + ompt_callback_t callback +); + +typedef int (*ompt_get_callback_t) ( + ompt_callbacks_t event, + ompt_callback_t *callback +); + +typedef ompt_data_t *(*ompt_get_thread_data_t) (void); + +typedef int (*ompt_get_num_procs_t) (void); + +typedef int (*ompt_get_num_places_t) (void); + +typedef int (*ompt_get_place_proc_ids_t) ( + int place_num, + int ids_size, + int *ids +); + +typedef int (*ompt_get_place_num_t) (void); + +typedef int (*ompt_get_partition_place_nums_t) ( + int place_nums_size, + int *place_nums +); + +typedef int (*ompt_get_proc_id_t) (void); + +typedef int (*ompt_get_state_t) ( + ompt_wait_id_t *wait_id +); + +typedef int (*ompt_get_parallel_info_t) ( + int ancestor_level, + ompt_data_t **parallel_data, + int *team_size +); + +typedef int (*ompt_get_task_info_t) ( + int ancestor_level, + int *flags, + ompt_data_t **task_data, + ompt_frame_t **task_frame, + ompt_data_t **parallel_data, + int *thread_num +); + +typedef int (*ompt_get_task_memory_t)( + void **addr, + size_t *size, + int block +); + +typedef int (*ompt_get_target_info_t) ( + uint64_t *device_num, + ompt_id_t *target_id, + ompt_id_t *host_op_id +); + +typedef int (*ompt_get_num_devices_t) (void); + +typedef void (*ompt_finalize_tool_t) (void); + +typedef int (*ompt_get_device_num_procs_t) ( + ompt_device_t *device +); + +typedef ompt_device_time_t (*ompt_get_device_time_t) ( + ompt_device_t *device +); + +typedef double (*ompt_translate_time_t) ( + ompt_device_t *device, + ompt_device_time_t time +); + +typedef ompt_set_result_t (*ompt_set_trace_ompt_t) ( + ompt_device_t *device, + unsigned int enable, + unsigned int etype +); + +typedef ompt_set_result_t (*ompt_set_trace_native_t) ( + ompt_device_t *device, + int enable, + int flags +); + +typedef int (*ompt_start_trace_t) ( + ompt_device_t *device, + ompt_callback_buffer_request_t request, + ompt_callback_buffer_complete_t complete +); + +typedef int (*ompt_pause_trace_t) ( + ompt_device_t *device, + int begin_pause +); + +typedef int (*ompt_flush_trace_t) ( + ompt_device_t *device +); + +typedef int (*ompt_stop_trace_t) ( + ompt_device_t *device +); + +typedef int (*ompt_advance_buffer_cursor_t) ( + ompt_device_t *device, + ompt_buffer_t *buffer, + size_t size, + ompt_buffer_cursor_t current, + ompt_buffer_cursor_t *next +); + +typedef ompt_record_t (*ompt_get_record_type_t) ( + ompt_buffer_t *buffer, + ompt_buffer_cursor_t current +); + +typedef void *(*ompt_get_record_native_t) ( + ompt_buffer_t *buffer, + ompt_buffer_cursor_t current, + ompt_id_t *host_op_id +); + +typedef ompt_record_abstract_t * +(*ompt_get_record_abstract_t) ( + void *native_record +); + +typedef void (*ompt_callback_thread_begin_t) ( + ompt_thread_t thread_type, + ompt_data_t *thread_data +); + +typedef struct ompt_record_thread_begin_t { + ompt_thread_t thread_type; +} ompt_record_thread_begin_t; + +typedef void (*ompt_callback_thread_end_t) ( + ompt_data_t *thread_data +); + +typedef void (*ompt_callback_parallel_begin_t) ( + ompt_data_t *encountering_task_data, + const ompt_frame_t *encountering_task_frame, + ompt_data_t *parallel_data, + unsigned int requested_parallelism, + int flags, + const void *codeptr_ra +); + +typedef struct ompt_record_parallel_begin_t { + ompt_id_t encountering_task_id; + ompt_id_t parallel_id; + unsigned int requested_parallelism; + int flags; + const void *codeptr_ra; +} ompt_record_parallel_begin_t; + +typedef void (*ompt_callback_parallel_end_t) ( + ompt_data_t *parallel_data, + ompt_data_t *encountering_task_data, + int flags, + const void *codeptr_ra +); + +typedef struct ompt_record_parallel_end_t { + ompt_id_t parallel_id; + ompt_id_t encountering_task_id; + int flags; + const void *codeptr_ra; +} ompt_record_parallel_end_t; + +typedef void (*ompt_callback_work_t) ( + ompt_work_t wstype, + ompt_scope_endpoint_t endpoint, + ompt_data_t *parallel_data, + ompt_data_t *task_data, + uint64_t count, + const void *codeptr_ra +); + +typedef struct ompt_record_work_t { + ompt_work_t wstype; + ompt_scope_endpoint_t endpoint; + ompt_id_t parallel_id; + ompt_id_t task_id; + uint64_t count; + const void *codeptr_ra; +} ompt_record_work_t; + +typedef void (*ompt_callback_dispatch_t) ( + ompt_data_t *parallel_data, + ompt_data_t *task_data, + ompt_dispatch_t kind, + ompt_data_t instance +); + +typedef struct ompt_record_dispatch_t { + ompt_id_t parallel_id; + ompt_id_t task_id; + ompt_dispatch_t kind; + ompt_data_t instance; +} ompt_record_dispatch_t; + +typedef void (*ompt_callback_task_create_t) ( + ompt_data_t *encountering_task_data, + const ompt_frame_t *encountering_task_frame, + ompt_data_t *new_task_data, + int flags, + int has_dependences, + const void *codeptr_ra +); + +typedef struct ompt_record_task_create_t { + ompt_id_t encountering_task_id; + ompt_id_t new_task_id; + int flags; + int has_dependences; + const void *codeptr_ra; +} ompt_record_task_create_t; + +typedef void (*ompt_callback_dependences_t) ( + ompt_data_t *task_data, + const ompt_dependence_t *deps, + int ndeps +); + +typedef struct ompt_record_dependences_t { + ompt_id_t task_id; + ompt_dependence_t dep; + int ndeps; +} ompt_record_dependences_t; + +typedef void (*ompt_callback_task_dependence_t) ( + ompt_data_t *src_task_data, + ompt_data_t *sink_task_data +); + +typedef struct ompt_record_task_dependence_t { + ompt_id_t src_task_id; + ompt_id_t sink_task_id; +} ompt_record_task_dependence_t; + +typedef void (*ompt_callback_task_schedule_t) ( + ompt_data_t *prior_task_data, + ompt_task_status_t prior_task_status, + ompt_data_t *next_task_data +); + +typedef struct ompt_record_task_schedule_t { + ompt_id_t prior_task_id; + ompt_task_status_t prior_task_status; + ompt_id_t next_task_id; +} ompt_record_task_schedule_t; + +typedef void (*ompt_callback_implicit_task_t) ( + ompt_scope_endpoint_t endpoint, + ompt_data_t *parallel_data, + ompt_data_t *task_data, + unsigned int actual_parallelism, + unsigned int index, + int flags +); + +typedef struct ompt_record_implicit_task_t { + ompt_scope_endpoint_t endpoint; + ompt_id_t parallel_id; + ompt_id_t task_id; + unsigned int actual_parallelism; + unsigned int index; + int flags; +} ompt_record_implicit_task_t; + +typedef void (*ompt_callback_master_t) ( + ompt_scope_endpoint_t endpoint, + ompt_data_t *parallel_data, + ompt_data_t *task_data, + const void *codeptr_ra +); + +typedef struct ompt_record_master_t { + ompt_scope_endpoint_t endpoint; + ompt_id_t parallel_id; + ompt_id_t task_id; + const void *codeptr_ra; +} ompt_record_master_t; + +typedef void (*ompt_callback_sync_region_t) ( + ompt_sync_region_t kind, + ompt_scope_endpoint_t endpoint, + ompt_data_t *parallel_data, + ompt_data_t *task_data, + const void *codeptr_ra +); + +typedef struct ompt_record_sync_region_t { + ompt_sync_region_t kind; + ompt_scope_endpoint_t endpoint; + ompt_id_t parallel_id; + ompt_id_t task_id; + const void *codeptr_ra; +} ompt_record_sync_region_t; + +typedef void (*ompt_callback_mutex_acquire_t) ( + ompt_mutex_t kind, + unsigned int hint, + unsigned int impl, + ompt_wait_id_t wait_id, + const void *codeptr_ra +); + +typedef struct ompt_record_mutex_acquire_t { + ompt_mutex_t kind; + unsigned int hint; + unsigned int impl; + ompt_wait_id_t wait_id; + const void *codeptr_ra; +} ompt_record_mutex_acquire_t; + +typedef void (*ompt_callback_mutex_t) ( + ompt_mutex_t kind, + ompt_wait_id_t wait_id, + const void *codeptr_ra +); + +typedef struct ompt_record_mutex_t { + ompt_mutex_t kind; + ompt_wait_id_t wait_id; + const void *codeptr_ra; +} ompt_record_mutex_t; + +typedef void (*ompt_callback_nest_lock_t) ( + ompt_scope_endpoint_t endpoint, + ompt_wait_id_t wait_id, + const void *codeptr_ra +); + +typedef struct ompt_record_nest_lock_t { + ompt_scope_endpoint_t endpoint; + ompt_wait_id_t wait_id; + const void *codeptr_ra; +} ompt_record_nest_lock_t; + +typedef void (*ompt_callback_flush_t) ( + ompt_data_t *thread_data, + const void *codeptr_ra +); + +typedef struct ompt_record_flush_t { + const void *codeptr_ra; +} ompt_record_flush_t; + +typedef void (*ompt_callback_cancel_t) ( + ompt_data_t *task_data, + int flags, + const void *codeptr_ra +); + +typedef struct ompt_record_cancel_t { + ompt_id_t task_id; + int flags; + const void *codeptr_ra; +} ompt_record_cancel_t; + +typedef void (*ompt_callback_device_initialize_t) ( + int device_num, + const char *type, + ompt_device_t *device, + ompt_function_lookup_t lookup, + const char *documentation +); + +typedef void (*ompt_callback_device_finalize_t) ( + int device_num +); + +typedef void (*ompt_callback_device_load_t) ( + int device_num, + const char *filename, + int64_t offset_in_file, + void *vma_in_file, + size_t bytes, + void *host_addr, + void *device_addr, + uint64_t module_id +); + +typedef void (*ompt_callback_device_unload_t) ( + int device_num, + uint64_t module_id +); + +typedef void (*ompt_callback_target_data_op_t) ( + ompt_id_t target_id, + ompt_id_t host_op_id, + ompt_target_data_op_t optype, + void *src_addr, + int src_device_num, + void *dest_addr, + int dest_device_num, + size_t bytes, + const void *codeptr_ra +); + +typedef struct ompt_record_target_data_op_t { + ompt_id_t host_op_id; + ompt_target_data_op_t optype; + void *src_addr; + int src_device_num; + void *dest_addr; + int dest_device_num; + size_t bytes; + ompt_device_time_t end_time; + const void *codeptr_ra; +} ompt_record_target_data_op_t; + +typedef void (*ompt_callback_target_t) ( + ompt_target_t kind, + ompt_scope_endpoint_t endpoint, + int device_num, + ompt_data_t *task_data, + ompt_id_t target_id, + const void *codeptr_ra +); + +typedef struct ompt_record_target_t { + ompt_target_t kind; + ompt_scope_endpoint_t endpoint; + int device_num; + ompt_id_t task_id; + ompt_id_t target_id; + const void *codeptr_ra; +} ompt_record_target_t; + +typedef void (*ompt_callback_target_map_t) ( + ompt_id_t target_id, + unsigned int nitems, + void **host_addr, + void **device_addr, + size_t *bytes, + unsigned int *mapping_flags, + const void *codeptr_ra +); + +typedef struct ompt_record_target_map_t { + ompt_id_t target_id; + unsigned int nitems; + void **host_addr; + void **device_addr; + size_t *bytes; + unsigned int *mapping_flags; + const void *codeptr_ra; +} ompt_record_target_map_t; + +typedef void (*ompt_callback_target_submit_t) ( + ompt_id_t target_id, + ompt_id_t host_op_id, + unsigned int requested_num_teams +); + +typedef struct ompt_record_target_kernel_t { + ompt_id_t host_op_id; + unsigned int requested_num_teams; + unsigned int granted_num_teams; + ompt_device_time_t end_time; +} ompt_record_target_kernel_t; + +typedef int (*ompt_callback_control_tool_t) ( + uint64_t command, + uint64_t modifier, + void *arg, + const void *codeptr_ra +); + +typedef struct ompt_record_control_tool_t { + uint64_t command; + uint64_t modifier; + const void *codeptr_ra; +} ompt_record_control_tool_t; + +typedef struct ompd_address_t { + ompd_seg_t segment; + ompd_addr_t address; +} ompd_address_t; + +typedef struct ompd_frame_info_t { + ompd_address_t frame_address; + ompd_word_t frame_flag; +} ompd_frame_info_t; + +typedef struct _ompd_aspace_handle ompd_address_space_handle_t; +typedef struct _ompd_thread_handle ompd_thread_handle_t; +typedef struct _ompd_parallel_handle ompd_parallel_handle_t; +typedef struct _ompd_task_handle ompd_task_handle_t; + +typedef struct _ompd_aspace_cont ompd_address_space_context_t; +typedef struct _ompd_thread_cont ompd_thread_context_t; + +typedef struct ompd_device_type_sizes_t { + uint8_t sizeof_char; + uint8_t sizeof_short; + uint8_t sizeof_int; + uint8_t sizeof_long; + uint8_t sizeof_long_long; + uint8_t sizeof_pointer; +} ompd_device_type_sizes_t; + +typedef struct ompt_record_ompt_t { + ompt_callbacks_t type; + ompt_device_time_t time; + ompt_id_t thread_id; + ompt_id_t target_id; + union { + ompt_record_thread_begin_t thread_begin; + ompt_record_parallel_begin_t parallel_begin; + ompt_record_parallel_end_t parallel_end; + ompt_record_work_t work; + ompt_record_dispatch_t dispatch; + ompt_record_task_create_t task_create; + ompt_record_dependences_t dependences; + ompt_record_task_dependence_t task_dependence; + ompt_record_task_schedule_t task_schedule; + ompt_record_implicit_task_t implicit_task; + ompt_record_master_t master; + ompt_record_sync_region_t sync_region; + ompt_record_mutex_acquire_t mutex_acquire; + ompt_record_mutex_t mutex; + ompt_record_nest_lock_t nest_lock; + ompt_record_flush_t flush; + ompt_record_cancel_t cancel; + ompt_record_target_t target; + ompt_record_target_data_op_t target_data_op; + ompt_record_target_map_t target_map; + ompt_record_target_kernel_t target_kernel; + ompt_record_control_tool_t control_tool; + } record; +} ompt_record_ompt_t; + +typedef ompt_record_ompt_t *(*ompt_get_record_ompt_t) ( + ompt_buffer_t *buffer, + ompt_buffer_cursor_t current +); + +#define ompt_id_none 0 +#define ompt_data_none {0} +#define ompt_time_none 0 +#define ompt_hwid_none 0 +#define ompt_addr_none ~0 +#define ompt_mutex_impl_none 0 +#define ompt_wait_id_none 0 + +#define ompd_segment_none 0 + +#endif /* __OMPT__ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ee6cac93ebe31524908a4758f4dbc32450db403 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_sass_metrics.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_sass_metrics.h new file mode 100644 index 0000000000000000000000000000000000000000..acb59cf8e5882a5ff13b4a1b0fdc6bc7b0ec47f7 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_sass_metrics.h @@ -0,0 +1,436 @@ +/* + * Copyright 2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(_CUPTI_SASS_METRICS_H_) +#define _CUPTI_SASS_METRICS_H_ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +/** + * \defgroup CUPTI_SASS_METRICS_API CUPTI SASS Metrics API + * Functions, types, and enums that implement the CUPTI SASS Metrics API. + * @{ + */ + +typedef enum +{ + /// SASS metric data will be collected at GPU level. + /// In CUpti_SassMetricsGetDataProperties_Params struct the numOfInstances will be equal to 1 + CUPTI_SASS_METRICS_OUTPUT_GRANULARITY_GPU = 0, + + /// SASS metric data will be collected at SM level + /// In CUpti_SassMetricsGetDataProperties_Params struct the numOfInstances will be equal to number of SMs in the GPU + CUPTI_SASS_METRICS_OUTPUT_GRANULARITY_SM = 1, + + /// SASS metric data will be collected at SM sub-partition level + /// In CUpti_SassMetricsGetDataProperties_Params struct the numOfInstances will be equal to number of SM sub-partitions in the GPU + CUPTI_SASS_METRICS_OUTPUT_GRANULARITY_SMSP = 2, + + CUPTI_SASS_METRICS_OUTPUT_GRANULARITY_INVALID +} CUpti_SassMetrics_OutputGranularity; + +typedef struct CUpti_SassMetrics_MetricDetails +{ + /// unique ID for the SASS metric + uint64_t metricId; + /// metric name + const char* pMetricName; + /// metric description + const char* pMetricDescription; +} CUpti_SassMetrics_MetricDetails; + +/** + * \brief Params for cuptiSassMetricsGetNumOfMetrics + */ +typedef struct CUpti_SassMetrics_GetNumOfMetrics_Params +{ + /// [in] should be equal to CUpti_SassMetrics_GetNumOfMetrics_Params_STRUCT_SIZE + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] chip name for which metrics will be queried + const char* pChipName; + /// [out] number of metrics supported for the queried chip + size_t numOfMetrics; +} CUpti_SassMetrics_GetNumOfMetrics_Params; + +#define CUpti_SassMetrics_GetNumOfMetrics_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetrics_GetNumOfMetrics_Params, numOfMetrics) + +/** + * \brief Get the number of supported SASS metrics for the chip. + * + * \param pParams A pointer to \ref CUpti_SassMetrics_GetNumOfMetrics_Params + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric collection + */ +CUptiResult CUPTIAPI cuptiSassMetricsGetNumOfMetrics(CUpti_SassMetrics_GetNumOfMetrics_Params* pParams); + +/** + * \brief Params for cuptiSassMetricsGetMetrics + */ +typedef struct CUpti_SassMetrics_GetMetrics_Params +{ + /// [in] should be equal to CUpti_SassMetrics_GetMetrics_Params_STRUCT_SIZE + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] chip name for which metrics will be queried + const char* pChipName; + /// [in] number of metrics supported for the queried chip (can be queried using cuptiSassMetricsGetNumOfMetrics()) + size_t numOfMetrics; + /// [out] list of metrics supported for queried chip + CUpti_SassMetrics_MetricDetails* pMetricsList; +} CUpti_SassMetrics_GetMetrics_Params; +#define CUpti_SassMetrics_GetMetrics_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetrics_GetMetrics_Params, pMetricsList) + +/** + * \brief Get the list of all supported SASS metrics for the chip. + * + * \param pParams A pointer to \ref CUpti_SassMetrics_GetMetrics_Params + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric collection + */ +CUptiResult CUPTIAPI cuptiSassMetricsGetMetrics(CUpti_SassMetrics_GetMetrics_Params* pParams); + +/** + * \brief Params for cuptiSassMetricsGetProperties + */ +typedef struct CUpti_SassMetrics_GetProperties_Params +{ + /// [in] should be equal to CUpti_SassMetrics_GetProperties_Params_STRUCT_SIZE + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] chip name for which metric will be queried + const char* pChipName; + /// [in] metric name + const char* pMetricName; + /// [out] returns the metric ID and the metric description + CUpti_SassMetrics_MetricDetails metric; +} CUpti_SassMetrics_GetProperties_Params; +#define CUpti_SassMetrics_GetProperties_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetrics_GetProperties_Params, metric) + +/** + * \brief Get metric properties for the queried metric. + * For a given metric the results will be put in CUpti_SassMetrics_MetricDetails which + * stores metric ID, description of the metric. + * + * \param pParams A pointer to \ref CUpti_SassMetrics_GetProperties_Params + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection + */ +CUptiResult CUPTIAPI cuptiSassMetricsGetProperties(CUpti_SassMetrics_GetProperties_Params *pParams); + +typedef struct CUpti_SassMetrics_Config +{ + /// [in] unique id for the SASS metric, can be queried using cuptiSassMetricsGetProperties() + uint64_t metricId; + /// [in] CUpti_SassMetrics_OutputGranularity + uint8_t outputGranularity; +} CUpti_SassMetrics_Config; + +/** + * \brief Params for cuptiSassMetricsSetConfig + */ +typedef struct CUpti_SassMetricsSetConfig_Params +{ + /// [in] equal to CUpti_SassMetricsSetConfig_Params_STRUCT_SIZE + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] num of metric configs, will be equal to number of metrics queried + size_t numOfMetricConfig; + /// [in] list of metric config generated for given sass metrics + CUpti_SassMetrics_Config* pConfigs; + /// [in] device index for which config will be set, user can call this once for + /// the device on which the the SASS metric data will be collected + uint32_t deviceIndex; +} CUpti_SassMetricsSetConfig_Params; +#define CUpti_SassMetricsSetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsSetConfig_Params, deviceIndex) + +/** + * \brief Set config for the SASS metric data collection for a device. + * User need to call this API before calling any of the SASS metric data collection APIs. + * Each set config API call need to be followed by cuptiSassPatchingUnSetConfig API + * before calling the cuptiSassMetricsSetConfig() API again for the same device. + * + * \param pParams A pointer to \ref CUpti_SassMetricsSetConfig_Params + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_INVALID_CONTEXT if any cuda context has not been created prior to this API call + * \retval CUPTI_ERROR_INVALID_OPERATION if this is called multiple times for the device without calling unset config API + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection + */ +CUptiResult CUPTIAPI cuptiSassMetricsSetConfig(CUpti_SassMetricsSetConfig_Params *pParams); + +/** + * \brief Params for cuptiSassMetricsUnsetConfig + */ +typedef struct CUpti_SassMetricsUnsetConfig_Params +{ + /// [in] equal to CUpti_SassMetricsUnsetConfig_Params_STRUCT_SIZE + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] device index for which SASS metric data collection config will get reset, user need to call this API for + /// all the devices on which the the SASS metric data collection have been configured. + uint32_t deviceIndex; +} CUpti_SassMetricsUnsetConfig_Params; +#define CUpti_SassMetricsUnsetConfig_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsUnsetConfig_Params, deviceIndex) + +/** + * \brief Unset config API will reset the SASS metric data collection configuration for the device. + * Once this API called CUPTI will deallocate all the memory allocated and remove all + * the configuration for SASS metric data collection. User can only call this API for a device where + * cuptiSassMetricsSetConfig() API has been called earlier for the device. + * + * \param pParams A pointer to \ref CUpti_SassMetricsSetConfig_Params + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_INVALID_CONTEXT if any cuda context has not been created prior to this API call + * \retval CUPTI_ERROR_INVALID_OPERATION if this is called multiple times for the device without calling set config API + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection + */ +CUptiResult CUPTIAPI cuptiSassMetricsUnsetConfig(CUpti_SassMetricsUnsetConfig_Params *pParams); + +/** + * \brief Params for cuptiSassMetricsEnable + */ +typedef struct CUpti_SassMetricsEnable_Params +{ + /// [in] equal to CUpti_SassMetricsEnable_Params_STRUCT_SIZE + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] CUDA context on which SASS metric data collection will be enabled. + /// If set NULL, default context will be consider for SASS metric data collection. + CUcontext ctx; + /// [in] if false, all the functions will patched regardless of their execution with cuptiSassMetricsEnable() API call. + /// when this parameter is set to true, metric data collection for the function will be done at the very first execution in the enable/disble + /// range. + uint8_t enableLazyPatching; +} CUpti_SassMetricsEnable_Params; +#define CUpti_SassMetricsEnable_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsEnable_Params, enableLazyPatching) + +/** + * \brief Sass metric data collection enable API will mark the start of a range, between which kernel + * will be profiled for SASS metrics. + * + * \param pParams A pointer to \ref CUpti_SassMetricsEnable_Params + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection + * \retval CUPTI_ERROR_INVALID_CONTEXT if any cuda context has not been created prior to this API call + * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called multiple times for a cuda context without calling + * cuptiSassMetricsDisable() API or called before cuptiSassMetricsSetConfig() API call. + */ +CUptiResult CUPTIAPI cuptiSassMetricsEnable(CUpti_SassMetricsEnable_Params* pParams); + +/** + * \brief Params for cuptiSassMetricsDisable + */ +typedef struct CUpti_SassMetricsDisable_Params +{ + /// [in] equal to CUpti_SassMetricsDisable_Params_STRUCT_SIZE + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] CUDA context on which SASS metric data collection will be disabled. + /// If set NULL, default context will be consider for SASS metric data collection. + CUcontext ctx; + /// [out] Num of dropped SASS records will be equal to numOfPatchedInstructions * numOfInstances. + /// Number of dropped records will be zero when data is flushed prior to calling the disable API. + size_t numOfDroppedRecords; +} CUpti_SassMetricsDisable_Params; +#define CUpti_SassMetricsDisable_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsDisable_Params, numOfDroppedRecords) + +/** + * \brief SASS metric data collection disable API will mark the end of a range, any kernel launched after this + * API call will not be profiled for the SASS metrics. + * + * \param pParams A pointer to \ref CUpti_SassMetricsDisable_Params + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection + * \retval CUPTI_ERROR_INVALID_CONTEXT if any cuda context has not been created prior to this API call + * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called multiple times for a cuda context without calling + * cuptiSassMetricsEnable() API or called before cuptiSassMetricsSetConfig() API call. + */ +CUptiResult CUPTIAPI cuptiSassMetricsDisable(CUpti_SassMetricsDisable_Params* pParams); + +/** + * \brief Params for cuptiSassMetricsGetDataProperties + */ +typedef struct CUpti_SassMetricsGetDataProperties_Params +{ + /// [in] equal to CUpti_SassMetricsGetDataProperties_Params_STRUCT_SIZE + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] CUDA context on which SASS metric data collection was enabled. + /// If set NULL, default context will be consider for SASS metric data collection. + CUcontext ctx; + /// [out] total number of SASS records has been collected + size_t numOfPatchedInstructionRecords; + /// [out] number of instances for each metric value per instruction. + /// This will depend on CUpti_SassPatching_OutputGranularity level set for the metric config. + size_t numOfInstances; +} CUpti_SassMetricsGetDataProperties_Params; + +#define CUpti_SassMetricsGetDataProperties_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsGetDataProperties_Params, numOfInstances) +/** + * \brief SASS metric data properties API will give the data regarding number of instances of a metric + * value and number of SASS instruction data has been collected. The number of instances of a metric + * will vary as per user set the output granularity level with CUpti_SassMetrics_OutputGranularity value. + * User need to allocate memory for retriving the SASS data using cuptiSassMetricsFlushData() API. + * + * \param pParams A pointer to \ref CUpti_SassMetricsGetDataProperties_Params + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection + * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called outside the enable/disable range. + */ +CUptiResult CUPTIAPI cuptiSassMetricsGetDataProperties(CUpti_SassMetricsGetDataProperties_Params* pParams); + +typedef struct CUpti_SassMetrics_InstanceValue +{ + // unique id of the metric + uint64_t metricId; + // metric value + uint64_t value; +} CUpti_SassMetrics_InstanceValue; +#define CUpti_SassMetrics_InstanceValue_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetrics_InstanceValue, value) + +typedef struct CUpti_SassMetrics_Data +{ + /// [in] equal to CUpti_SassMetricsFlushData_Params_STRUCT_SIZE + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [out] Unique cubin id + uint32_t cubinCrc; + /// [out] function's unique symbol index in the module. + uint32_t functionIndex; + /// [out] The function name + const char* functionName; + /// [out] pc offset for the function in a module + uint32_t pcOffset; + /// [out] array of size equal to number of instances per metric, which contains the metric ID and metric value. + CUpti_SassMetrics_InstanceValue* pInstanceValues; +} CUpti_SassMetrics_Data; + +/** + * \brief Params for cuptiSassMetricsFlushData + */ +typedef struct CUpti_SassMetricsFlushData_Params +{ + /// [in] equal to CUpti_SassMetricsFlushData_Params_STRUCT_SIZE + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] CUDA context on which SASS metric data collection was enabled. + /// If set NULL, default context will be consider for SASS metric data collection. + CUcontext ctx; + /// [in] number of patched instruction record will be retrived, user can call cuptiSassMetricsGetDataProperties() + /// for getting total number of records available. + size_t numOfPatchedInstructionRecords; + /// [in] number of patched instruction record instances for a metric, user can call cuptiSassMetricsGetDataProperties() + /// for getting total number of instances for each record per metric available. + size_t numOfInstances; + /// [out] + CUpti_SassMetrics_Data* pMetricsData; +} CUpti_SassMetricsFlushData_Params; +#define CUpti_SassMetricsFlushData_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_SassMetricsFlushData_Params, numOfInstances) + +/** + * \brief Flush SASS metrics data from CUPTI internal buffer to the user buffer. + * User needs to allocate the buffer for retrieving the data. The number of records collected + * can be queried using the API cuptiSassMetricsGetDataProperties(). + * + * \param pParams A pointer to \ref CUpti_SassMetricsFlushData_Params + * + * \retval CUPTI_SUCCESS + * \retval CUPTI_ERROR_INVALID_PARAMETER if any \p pParams is not valid + * \retval CUPTI_ERROR_NOT_SUPPORTED indicates that the system/device doesn't support SASS metric data collection. + * \retval CUPTI_ERROR_INVALID_OPERATION if this API is called outside the enable/disable range. + */ +CUptiResult CUPTIAPI cuptiSassMetricsFlushData(CUpti_SassMetricsFlushData_Params* pParams); + +/** @} */ /* END CUPTI_SASS_METRICS_API */ + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif // _CUPTI_SASS_METRICS_H_ \ No newline at end of file diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_target.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_target.h new file mode 100644 index 0000000000000000000000000000000000000000..e4b625d45c65288fa2ea7dc05819ee4dfc4cbdd3 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_target.h @@ -0,0 +1,43 @@ +#if !defined(_CUPTI_TARGET_H_) +#define _CUPTI_TARGET_H_ + +/* +CUPTI profiler target API's +This file contains the CUPTI profiling API's. +*/ +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +#ifndef CUPTI_PROFILER_STRUCT_SIZE +#define CUPTI_PROFILER_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_)) +#endif + +typedef struct CUpti_Device_GetChipName_Params +{ + size_t structSize; //!< [in] + void* pPriv; //!< [in] assign to NULL + + size_t deviceIndex; //!< [in] + const char* pChipName; //!< [out] +} CUpti_Device_GetChipName_Params; + +#define CUpti_Device_GetChipName_Params_STRUCT_SIZE CUPTI_PROFILER_STRUCT_SIZE(CUpti_Device_GetChipName_Params, pChipName) +CUptiResult CUPTIAPI cuptiDeviceGetChipName(CUpti_Device_GetChipName_Params *pParams); + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif +#endif diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_version.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_version.h new file mode 100644 index 0000000000000000000000000000000000000000..47fd0544f0aad0c9c0f6173b3b32f12ba4f5c1c1 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/cupti_version.h @@ -0,0 +1,136 @@ +/* + * Copyright 2010-2024 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(_CUPTI_VERSION_H_) +#define _CUPTI_VERSION_H_ + +#include +#include + +#ifndef CUPTIAPI +#ifdef _WIN32 +#define CUPTIAPI __stdcall +#else +#define CUPTIAPI +#endif +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +/** + * \defgroup CUPTI_VERSION_API CUPTI Version + * Function and macro to determine the CUPTI version. + * @{ + */ + +/** + * \brief The API version for this implementation of CUPTI. + * + * The API version for this implementation of CUPTI. This define along + * with \ref cuptiGetVersion can be used to dynamically detect if the + * version of CUPTI compiled against matches the version of the loaded + * CUPTI library. + * + * v1 : CUDAToolsSDK 4.0 + * v2 : CUDAToolsSDK 4.1 + * v3 : CUDA Toolkit 5.0 + * v4 : CUDA Toolkit 5.5 + * v5 : CUDA Toolkit 6.0 + * v6 : CUDA Toolkit 6.5 + * v7 : CUDA Toolkit 6.5(with sm_52 support) + * v8 : CUDA Toolkit 7.0 + * v9 : CUDA Toolkit 8.0 + * v10 : CUDA Toolkit 9.0 + * v11 : CUDA Toolkit 9.1 + * v12 : CUDA Toolkit 10.0, 10.1 and 10.2 + * v13 : CUDA Toolkit 11.0 + * v14 : CUDA Toolkit 11.1 + * v15 : CUDA Toolkit 11.2, 11.3 and 11.4 + * v16 : CUDA Toolkit 11.5 + * v17 : CUDA Toolkit 11.6 + * v18 : CUDA Toolkit 11.8 + * v19 : CUDA Toolkit 12.0 + * v20 : CUDA Toolkit 12.2 + * v21 : CUDA Toolkit 12.3 + * v22 : CUDA Toolkit 12.4 + * v23 : CUDA Toolkit 12.5 + * v24 : CUDA Toolkit 12.6 + */ +#define CUPTI_API_VERSION 24 + +/** + * \brief Get the CUPTI API version. + * + * Return the API version in \p *version. + * + * \param version Returns the version + * + * \retval CUPTI_SUCCESS on success + * \retval CUPTI_ERROR_INVALID_PARAMETER if \p version is NULL + * \sa CUPTI_API_VERSION + */ +CUptiResult CUPTIAPI cuptiGetVersion(uint32_t *version); + +/** @} */ /* END CUPTI_VERSION_API */ + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /*_CUPTI_VERSION_H_*/ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cudaGL_meta.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cudaGL_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..7a52e194b265d32f61d47bd3081f4958755bff46 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cudaGL_meta.h @@ -0,0 +1,116 @@ +// This file is generated. Any changes you make will be lost during the next clean build. + +// Dependent includes +#ifdef __APPLE__ +#include +#else +#include +#endif + +// CUDA public interface, for type definitions and cu* function prototypes +#include "cudaGL.h" + + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +typedef struct cuGraphicsGLRegisterBuffer_params_st { + CUgraphicsResource *pCudaResource; + GLuint buffer; + unsigned int Flags; +} cuGraphicsGLRegisterBuffer_params; + +typedef struct cuGraphicsGLRegisterImage_params_st { + CUgraphicsResource *pCudaResource; + GLuint image; + GLenum target; + unsigned int Flags; +} cuGraphicsGLRegisterImage_params; + +typedef struct cuGLGetDevices_v2_params_st { + unsigned int *pCudaDeviceCount; + CUdevice *pCudaDevices; + unsigned int cudaDeviceCount; + CUGLDeviceList deviceList; +} cuGLGetDevices_v2_params; + +typedef struct cuGLCtxCreate_v2_params_st { + CUcontext *pCtx; + unsigned int Flags; + CUdevice device; +} cuGLCtxCreate_v2_params; + +typedef struct cuGLRegisterBufferObject_params_st { + GLuint buffer; +} cuGLRegisterBufferObject_params; + +typedef struct cuGLMapBufferObject_v2_ptds_params_st { + CUdeviceptr *dptr; + size_t *size; + GLuint buffer; +} cuGLMapBufferObject_v2_ptds_params; + +typedef struct cuGLUnmapBufferObject_params_st { + GLuint buffer; +} cuGLUnmapBufferObject_params; + +typedef struct cuGLUnregisterBufferObject_params_st { + GLuint buffer; +} cuGLUnregisterBufferObject_params; + +typedef struct cuGLSetBufferObjectMapFlags_params_st { + GLuint buffer; + unsigned int Flags; +} cuGLSetBufferObjectMapFlags_params; + +typedef struct cuGLMapBufferObjectAsync_v2_ptsz_params_st { + CUdeviceptr *dptr; + size_t *size; + GLuint buffer; + CUstream hStream; +} cuGLMapBufferObjectAsync_v2_ptsz_params; + +typedef struct cuGLUnmapBufferObjectAsync_params_st { + GLuint buffer; + CUstream hStream; +} cuGLUnmapBufferObjectAsync_params; + +typedef struct cuGLGetDevices_params_st { + unsigned int *pCudaDeviceCount; + CUdevice *pCudaDevices; + unsigned int cudaDeviceCount; + CUGLDeviceList deviceList; +} cuGLGetDevices_params; + +typedef struct cuGLMapBufferObject_v2_params_st { + CUdeviceptr *dptr; + size_t *size; + GLuint buffer; +} cuGLMapBufferObject_v2_params; + +typedef struct cuGLMapBufferObjectAsync_v2_params_st { + CUdeviceptr *dptr; + size_t *size; + GLuint buffer; + CUstream hStream; +} cuGLMapBufferObjectAsync_v2_params; + +typedef struct cuGLCtxCreate_params_st { + CUcontext *pCtx; + unsigned int Flags; + CUdevice device; +} cuGLCtxCreate_params; + +typedef struct cuGLMapBufferObject_params_st { + CUdeviceptr_v1 *dptr; + unsigned int *size; + GLuint buffer; +} cuGLMapBufferObject_params; + +typedef struct cuGLMapBufferObjectAsync_params_st { + CUdeviceptr_v1 *dptr; + unsigned int *size; + GLuint buffer; + CUstream hStream; +} cuGLMapBufferObjectAsync_params; diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cudaVDPAU_meta.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cudaVDPAU_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..abc603c8d9be21e012a9b1641330c2e203d623b2 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cudaVDPAU_meta.h @@ -0,0 +1,46 @@ +// This file is generated. Any changes you make will be lost during the next clean build. + +// Dependent includes +#include + +// CUDA public interface, for type definitions and cu* function prototypes +#include "cudaVDPAU.h" + + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +typedef struct cuVDPAUGetDevice_params_st { + CUdevice *pDevice; + VdpDevice vdpDevice; + VdpGetProcAddress *vdpGetProcAddress; +} cuVDPAUGetDevice_params; + +typedef struct cuVDPAUCtxCreate_v2_params_st { + CUcontext *pCtx; + unsigned int flags; + CUdevice device; + VdpDevice vdpDevice; + VdpGetProcAddress *vdpGetProcAddress; +} cuVDPAUCtxCreate_v2_params; + +typedef struct cuGraphicsVDPAURegisterVideoSurface_params_st { + CUgraphicsResource *pCudaResource; + VdpVideoSurface vdpSurface; + unsigned int flags; +} cuGraphicsVDPAURegisterVideoSurface_params; + +typedef struct cuGraphicsVDPAURegisterOutputSurface_params_st { + CUgraphicsResource *pCudaResource; + VdpOutputSurface vdpSurface; + unsigned int flags; +} cuGraphicsVDPAURegisterOutputSurface_params; + +typedef struct cuVDPAUCtxCreate_params_st { + CUcontext *pCtx; + unsigned int flags; + CUdevice device; + VdpDevice vdpDevice; + VdpGetProcAddress *vdpGetProcAddress; +} cuVDPAUCtxCreate_params; diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cuda_gl_interop_meta.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cuda_gl_interop_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..eaba3ac5a760e338f1edc191609f6fa2a32adee7 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cuda_gl_interop_meta.h @@ -0,0 +1,71 @@ +// This file is generated. Any changes you make will be lost during the next clean build. + +// CUDA public interface, for type definitions and api function prototypes +#include "cuda_gl_interop.h" + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +// Currently used parameter trace structures +typedef struct cudaGLGetDevices_v4010_params_st { + unsigned int *pCudaDeviceCount; + int *pCudaDevices; + unsigned int cudaDeviceCount; + enum cudaGLDeviceList deviceList; +} cudaGLGetDevices_v4010_params; + +typedef struct cudaGraphicsGLRegisterImage_v3020_params_st { + struct cudaGraphicsResource **resource; + GLuint image; + GLenum target; + unsigned int flags; +} cudaGraphicsGLRegisterImage_v3020_params; + +typedef struct cudaGraphicsGLRegisterBuffer_v3020_params_st { + struct cudaGraphicsResource **resource; + GLuint buffer; + unsigned int flags; +} cudaGraphicsGLRegisterBuffer_v3020_params; + +typedef struct cudaGLSetGLDevice_v3020_params_st { + int device; +} cudaGLSetGLDevice_v3020_params; + +typedef struct cudaGLRegisterBufferObject_v3020_params_st { + GLuint bufObj; +} cudaGLRegisterBufferObject_v3020_params; + +typedef struct cudaGLMapBufferObject_v3020_params_st { + void **devPtr; + GLuint bufObj; +} cudaGLMapBufferObject_v3020_params; + +typedef struct cudaGLUnmapBufferObject_v3020_params_st { + GLuint bufObj; +} cudaGLUnmapBufferObject_v3020_params; + +typedef struct cudaGLUnregisterBufferObject_v3020_params_st { + GLuint bufObj; +} cudaGLUnregisterBufferObject_v3020_params; + +typedef struct cudaGLSetBufferObjectMapFlags_v3020_params_st { + GLuint bufObj; + unsigned int flags; +} cudaGLSetBufferObjectMapFlags_v3020_params; + +typedef struct cudaGLMapBufferObjectAsync_v3020_params_st { + void **devPtr; + GLuint bufObj; + cudaStream_t stream; +} cudaGLMapBufferObjectAsync_v3020_params; + +typedef struct cudaGLUnmapBufferObjectAsync_v3020_params_st { + GLuint bufObj; + cudaStream_t stream; +} cudaGLUnmapBufferObjectAsync_v3020_params; + +// Parameter trace structures for removed functions + + +// End of parameter trace structures diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cuda_meta.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cuda_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..b4856671d9f3944bf7487f6d12c2655c64c678c6 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cuda_meta.h @@ -0,0 +1,3616 @@ +// This file is generated. Any changes you make will be lost during the next clean build. + +// No dependent includes + +// CUDA public interface, for type definitions and cu* function prototypes +#include "cuda.h" + + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +typedef struct cuGetErrorString_params_st { + CUresult error; + const char **pStr; +} cuGetErrorString_params; + +typedef struct cuGetErrorName_params_st { + CUresult error; + const char **pStr; +} cuGetErrorName_params; + +typedef struct cuInit_params_st { + unsigned int Flags; +} cuInit_params; + +typedef struct cuDriverGetVersion_params_st { + int *driverVersion; +} cuDriverGetVersion_params; + +typedef struct cuDeviceGet_params_st { + CUdevice *device; + int ordinal; +} cuDeviceGet_params; + +typedef struct cuDeviceGetCount_params_st { + int *count; +} cuDeviceGetCount_params; + +typedef struct cuDeviceGetName_params_st { + char *name; + int len; + CUdevice dev; +} cuDeviceGetName_params; + +typedef struct cuDeviceGetUuid_params_st { + CUuuid *uuid; + CUdevice dev; +} cuDeviceGetUuid_params; + +typedef struct cuDeviceGetUuid_v2_params_st { + CUuuid *uuid; + CUdevice dev; +} cuDeviceGetUuid_v2_params; + +typedef struct cuDeviceGetLuid_params_st { + char *luid; + unsigned int *deviceNodeMask; + CUdevice dev; +} cuDeviceGetLuid_params; + +typedef struct cuDeviceTotalMem_v2_params_st { + size_t *bytes; + CUdevice dev; +} cuDeviceTotalMem_v2_params; + +typedef struct cuDeviceGetTexture1DLinearMaxWidth_params_st { + size_t *maxWidthInElements; + CUarray_format format; + unsigned numChannels; + CUdevice dev; +} cuDeviceGetTexture1DLinearMaxWidth_params; + +typedef struct cuDeviceGetAttribute_params_st { + int *pi; + CUdevice_attribute attrib; + CUdevice dev; +} cuDeviceGetAttribute_params; + +typedef struct cuDeviceGetNvSciSyncAttributes_params_st { + void *nvSciSyncAttrList; + CUdevice dev; + int flags; +} cuDeviceGetNvSciSyncAttributes_params; + +typedef struct cuDeviceSetMemPool_params_st { + CUdevice dev; + CUmemoryPool pool; +} cuDeviceSetMemPool_params; + +typedef struct cuDeviceGetMemPool_params_st { + CUmemoryPool *pool; + CUdevice dev; +} cuDeviceGetMemPool_params; + +typedef struct cuDeviceGetDefaultMemPool_params_st { + CUmemoryPool *pool_out; + CUdevice dev; +} cuDeviceGetDefaultMemPool_params; + +typedef struct cuDeviceGetExecAffinitySupport_params_st { + int *pi; + CUexecAffinityType type; + CUdevice dev; +} cuDeviceGetExecAffinitySupport_params; + +typedef struct cuFlushGPUDirectRDMAWrites_params_st { + CUflushGPUDirectRDMAWritesTarget target; + CUflushGPUDirectRDMAWritesScope scope; +} cuFlushGPUDirectRDMAWrites_params; + +typedef struct cuDeviceGetProperties_params_st { + CUdevprop *prop; + CUdevice dev; +} cuDeviceGetProperties_params; + +typedef struct cuDeviceComputeCapability_params_st { + int *major; + int *minor; + CUdevice dev; +} cuDeviceComputeCapability_params; + +typedef struct cuDevicePrimaryCtxRetain_params_st { + CUcontext *pctx; + CUdevice dev; +} cuDevicePrimaryCtxRetain_params; + +typedef struct cuDevicePrimaryCtxRelease_v2_params_st { + CUdevice dev; +} cuDevicePrimaryCtxRelease_v2_params; + +typedef struct cuDevicePrimaryCtxSetFlags_v2_params_st { + CUdevice dev; + unsigned int flags; +} cuDevicePrimaryCtxSetFlags_v2_params; + +typedef struct cuDevicePrimaryCtxGetState_params_st { + CUdevice dev; + unsigned int *flags; + int *active; +} cuDevicePrimaryCtxGetState_params; + +typedef struct cuDevicePrimaryCtxReset_v2_params_st { + CUdevice dev; +} cuDevicePrimaryCtxReset_v2_params; + +typedef struct cuCtxCreate_v2_params_st { + CUcontext *pctx; + unsigned int flags; + CUdevice dev; +} cuCtxCreate_v2_params; + +typedef struct cuCtxCreate_v3_params_st { + CUcontext *pctx; + CUexecAffinityParam *paramsArray; + int numParams; + unsigned int flags; + CUdevice dev; +} cuCtxCreate_v3_params; + +typedef struct cuCtxCreate_v4_params_st { + CUcontext *pctx; + CUctxCreateParams *ctxCreateParams; + unsigned int flags; + CUdevice dev; +} cuCtxCreate_v4_params; + +typedef struct cuCtxDestroy_v2_params_st { + CUcontext ctx; +} cuCtxDestroy_v2_params; + +typedef struct cuCtxPushCurrent_v2_params_st { + CUcontext ctx; +} cuCtxPushCurrent_v2_params; + +typedef struct cuCtxPopCurrent_v2_params_st { + CUcontext *pctx; +} cuCtxPopCurrent_v2_params; + +typedef struct cuCtxSetCurrent_params_st { + CUcontext ctx; +} cuCtxSetCurrent_params; + +typedef struct cuCtxGetCurrent_params_st { + CUcontext *pctx; +} cuCtxGetCurrent_params; + +typedef struct cuCtxGetDevice_params_st { + CUdevice *device; +} cuCtxGetDevice_params; + +typedef struct cuCtxGetFlags_params_st { + unsigned int *flags; +} cuCtxGetFlags_params; + +typedef struct cuCtxSetFlags_params_st { + unsigned int flags; +} cuCtxSetFlags_params; + +typedef struct cuCtxGetId_params_st { + CUcontext ctx; + unsigned long long *ctxId; +} cuCtxGetId_params; + +typedef struct cuCtxSetLimit_params_st { + CUlimit limit; + size_t value; +} cuCtxSetLimit_params; + +typedef struct cuCtxGetLimit_params_st { + size_t *pvalue; + CUlimit limit; +} cuCtxGetLimit_params; + +typedef struct cuCtxGetCacheConfig_params_st { + CUfunc_cache *pconfig; +} cuCtxGetCacheConfig_params; + +typedef struct cuCtxSetCacheConfig_params_st { + CUfunc_cache config; +} cuCtxSetCacheConfig_params; + +typedef struct cuCtxGetApiVersion_params_st { + CUcontext ctx; + unsigned int *version; +} cuCtxGetApiVersion_params; + +typedef struct cuCtxGetStreamPriorityRange_params_st { + int *leastPriority; + int *greatestPriority; +} cuCtxGetStreamPriorityRange_params; + +typedef struct cuCtxGetExecAffinity_params_st { + CUexecAffinityParam *pExecAffinity; + CUexecAffinityType type; +} cuCtxGetExecAffinity_params; + +typedef struct cuCtxRecordEvent_params_st { + CUcontext hCtx; + CUevent hEvent; +} cuCtxRecordEvent_params; + +typedef struct cuCtxWaitEvent_params_st { + CUcontext hCtx; + CUevent hEvent; +} cuCtxWaitEvent_params; + +typedef struct cuCtxAttach_params_st { + CUcontext *pctx; + unsigned int flags; +} cuCtxAttach_params; + +typedef struct cuCtxDetach_params_st { + CUcontext ctx; +} cuCtxDetach_params; + +typedef struct cuCtxGetSharedMemConfig_params_st { + CUsharedconfig *pConfig; +} cuCtxGetSharedMemConfig_params; + +typedef struct cuCtxSetSharedMemConfig_params_st { + CUsharedconfig config; +} cuCtxSetSharedMemConfig_params; + +typedef struct cuModuleLoad_params_st { + CUmodule *module; + const char *fname; +} cuModuleLoad_params; + +typedef struct cuModuleLoadData_params_st { + CUmodule *module; + const void *image; +} cuModuleLoadData_params; + +typedef struct cuModuleLoadDataEx_params_st { + CUmodule *module; + const void *image; + unsigned int numOptions; + CUjit_option *options; + void **optionValues; +} cuModuleLoadDataEx_params; + +typedef struct cuModuleLoadFatBinary_params_st { + CUmodule *module; + const void *fatCubin; +} cuModuleLoadFatBinary_params; + +typedef struct cuModuleUnload_params_st { + CUmodule hmod; +} cuModuleUnload_params; + +typedef struct cuModuleGetLoadingMode_params_st { + CUmoduleLoadingMode *mode; +} cuModuleGetLoadingMode_params; + +typedef struct cuModuleGetFunction_params_st { + CUfunction *hfunc; + CUmodule hmod; + const char *name; +} cuModuleGetFunction_params; + +typedef struct cuModuleGetFunctionCount_params_st { + unsigned int *count; + CUmodule mod; +} cuModuleGetFunctionCount_params; + +typedef struct cuModuleEnumerateFunctions_params_st { + CUfunction *functions; + unsigned int numFunctions; + CUmodule mod; +} cuModuleEnumerateFunctions_params; + +typedef struct cuModuleGetGlobal_v2_params_st { + CUdeviceptr *dptr; + size_t *bytes; + CUmodule hmod; + const char *name; +} cuModuleGetGlobal_v2_params; + +typedef struct cuLinkCreate_v2_params_st { + unsigned int numOptions; + CUjit_option *options; + void **optionValues; + CUlinkState *stateOut; +} cuLinkCreate_v2_params; + +typedef struct cuLinkAddData_v2_params_st { + CUlinkState state; + CUjitInputType type; + void *data; + size_t size; + const char *name; + unsigned int numOptions; + CUjit_option *options; + void **optionValues; +} cuLinkAddData_v2_params; + +typedef struct cuLinkAddFile_v2_params_st { + CUlinkState state; + CUjitInputType type; + const char *path; + unsigned int numOptions; + CUjit_option *options; + void **optionValues; +} cuLinkAddFile_v2_params; + +typedef struct cuLinkComplete_params_st { + CUlinkState state; + void **cubinOut; + size_t *sizeOut; +} cuLinkComplete_params; + +typedef struct cuLinkDestroy_params_st { + CUlinkState state; +} cuLinkDestroy_params; + +typedef struct cuModuleGetTexRef_params_st { + CUtexref *pTexRef; + CUmodule hmod; + const char *name; +} cuModuleGetTexRef_params; + +typedef struct cuModuleGetSurfRef_params_st { + CUsurfref *pSurfRef; + CUmodule hmod; + const char *name; +} cuModuleGetSurfRef_params; + +typedef struct cuLibraryLoadData_params_st { + CUlibrary *library; + const void *code; + CUjit_option *jitOptions; + void **jitOptionsValues; + unsigned int numJitOptions; + CUlibraryOption *libraryOptions; + void **libraryOptionValues; + unsigned int numLibraryOptions; +} cuLibraryLoadData_params; + +typedef struct cuLibraryLoadFromFile_params_st { + CUlibrary *library; + const char *fileName; + CUjit_option *jitOptions; + void **jitOptionsValues; + unsigned int numJitOptions; + CUlibraryOption *libraryOptions; + void **libraryOptionValues; + unsigned int numLibraryOptions; +} cuLibraryLoadFromFile_params; + +typedef struct cuLibraryUnload_params_st { + CUlibrary library; +} cuLibraryUnload_params; + +typedef struct cuLibraryGetKernel_params_st { + CUkernel *pKernel; + CUlibrary library; + const char *name; +} cuLibraryGetKernel_params; + +typedef struct cuLibraryGetKernelCount_params_st { + unsigned int *count; + CUlibrary lib; +} cuLibraryGetKernelCount_params; + +typedef struct cuLibraryEnumerateKernels_params_st { + CUkernel *kernels; + unsigned int numKernels; + CUlibrary lib; +} cuLibraryEnumerateKernels_params; + +typedef struct cuLibraryGetModule_params_st { + CUmodule *pMod; + CUlibrary library; +} cuLibraryGetModule_params; + +typedef struct cuKernelGetFunction_params_st { + CUfunction *pFunc; + CUkernel kernel; +} cuKernelGetFunction_params; + +typedef struct cuKernelGetLibrary_params_st { + CUlibrary *pLib; + CUkernel kernel; +} cuKernelGetLibrary_params; + +typedef struct cuLibraryGetGlobal_params_st { + CUdeviceptr *dptr; + size_t *bytes; + CUlibrary library; + const char *name; +} cuLibraryGetGlobal_params; + +typedef struct cuLibraryGetManaged_params_st { + CUdeviceptr *dptr; + size_t *bytes; + CUlibrary library; + const char *name; +} cuLibraryGetManaged_params; + +typedef struct cuLibraryGetUnifiedFunction_params_st { + void **fptr; + CUlibrary library; + const char *symbol; +} cuLibraryGetUnifiedFunction_params; + +typedef struct cuKernelGetAttribute_params_st { + int *pi; + CUfunction_attribute attrib; + CUkernel kernel; + CUdevice dev; +} cuKernelGetAttribute_params; + +typedef struct cuKernelSetAttribute_params_st { + CUfunction_attribute attrib; + int val; + CUkernel kernel; + CUdevice dev; +} cuKernelSetAttribute_params; + +typedef struct cuKernelSetCacheConfig_params_st { + CUkernel kernel; + CUfunc_cache config; + CUdevice dev; +} cuKernelSetCacheConfig_params; + +typedef struct cuKernelGetName_params_st { + const char **name; + CUkernel hfunc; +} cuKernelGetName_params; + +typedef struct cuKernelGetParamInfo_params_st { + CUkernel kernel; + size_t paramIndex; + size_t *paramOffset; + size_t *paramSize; +} cuKernelGetParamInfo_params; + +typedef struct cuMemGetInfo_v2_params_st { + size_t *free; + size_t *total; +} cuMemGetInfo_v2_params; + +typedef struct cuMemAlloc_v2_params_st { + CUdeviceptr *dptr; + size_t bytesize; +} cuMemAlloc_v2_params; + +typedef struct cuMemAllocPitch_v2_params_st { + CUdeviceptr *dptr; + size_t *pPitch; + size_t WidthInBytes; + size_t Height; + unsigned int ElementSizeBytes; +} cuMemAllocPitch_v2_params; + +typedef struct cuMemFree_v2_params_st { + CUdeviceptr dptr; +} cuMemFree_v2_params; + +typedef struct cuMemGetAddressRange_v2_params_st { + CUdeviceptr *pbase; + size_t *psize; + CUdeviceptr dptr; +} cuMemGetAddressRange_v2_params; + +typedef struct cuMemAllocHost_v2_params_st { + void **pp; + size_t bytesize; +} cuMemAllocHost_v2_params; + +typedef struct cuMemFreeHost_params_st { + void *p; +} cuMemFreeHost_params; + +typedef struct cuMemHostAlloc_params_st { + void **pp; + size_t bytesize; + unsigned int Flags; +} cuMemHostAlloc_params; + +typedef struct cuMemHostGetDevicePointer_v2_params_st { + CUdeviceptr *pdptr; + void *p; + unsigned int Flags; +} cuMemHostGetDevicePointer_v2_params; + +typedef struct cuMemHostGetFlags_params_st { + unsigned int *pFlags; + void *p; +} cuMemHostGetFlags_params; + +typedef struct cuMemAllocManaged_params_st { + CUdeviceptr *dptr; + size_t bytesize; + unsigned int flags; +} cuMemAllocManaged_params; + +typedef struct cuDeviceRegisterAsyncNotification_params_st { + CUdevice device; + CUasyncCallback callbackFunc; + void *userData; + CUasyncCallbackHandle *callback; +} cuDeviceRegisterAsyncNotification_params; + +typedef struct cuDeviceUnregisterAsyncNotification_params_st { + CUdevice device; + CUasyncCallbackHandle callback; +} cuDeviceUnregisterAsyncNotification_params; + +typedef struct cuDeviceGetByPCIBusId_params_st { + CUdevice *dev; + const char *pciBusId; +} cuDeviceGetByPCIBusId_params; + +typedef struct cuDeviceGetPCIBusId_params_st { + char *pciBusId; + int len; + CUdevice dev; +} cuDeviceGetPCIBusId_params; + +typedef struct cuIpcGetEventHandle_params_st { + CUipcEventHandle *pHandle; + CUevent event; +} cuIpcGetEventHandle_params; + +typedef struct cuIpcOpenEventHandle_params_st { + CUevent *phEvent; + CUipcEventHandle handle; +} cuIpcOpenEventHandle_params; + +typedef struct cuIpcGetMemHandle_params_st { + CUipcMemHandle *pHandle; + CUdeviceptr dptr; +} cuIpcGetMemHandle_params; + +typedef struct cuIpcOpenMemHandle_v2_params_st { + CUdeviceptr *pdptr; + CUipcMemHandle handle; + unsigned int Flags; +} cuIpcOpenMemHandle_v2_params; + +typedef struct cuIpcCloseMemHandle_params_st { + CUdeviceptr dptr; +} cuIpcCloseMemHandle_params; + +typedef struct cuMemHostRegister_v2_params_st { + void *p; + size_t bytesize; + unsigned int Flags; +} cuMemHostRegister_v2_params; + +typedef struct cuMemHostUnregister_params_st { + void *p; +} cuMemHostUnregister_params; + +typedef struct cuMemcpy_ptds_params_st { + CUdeviceptr dst; + CUdeviceptr src; + size_t ByteCount; +} cuMemcpy_ptds_params; + +typedef struct cuMemcpyPeer_ptds_params_st { + CUdeviceptr dstDevice; + CUcontext dstContext; + CUdeviceptr srcDevice; + CUcontext srcContext; + size_t ByteCount; +} cuMemcpyPeer_ptds_params; + +typedef struct cuMemcpyHtoD_v2_ptds_params_st { + CUdeviceptr dstDevice; + const void *srcHost; + size_t ByteCount; +} cuMemcpyHtoD_v2_ptds_params; + +typedef struct cuMemcpyDtoH_v2_ptds_params_st { + void *dstHost; + CUdeviceptr srcDevice; + size_t ByteCount; +} cuMemcpyDtoH_v2_ptds_params; + +typedef struct cuMemcpyDtoD_v2_ptds_params_st { + CUdeviceptr dstDevice; + CUdeviceptr srcDevice; + size_t ByteCount; +} cuMemcpyDtoD_v2_ptds_params; + +typedef struct cuMemcpyDtoA_v2_ptds_params_st { + CUarray dstArray; + size_t dstOffset; + CUdeviceptr srcDevice; + size_t ByteCount; +} cuMemcpyDtoA_v2_ptds_params; + +typedef struct cuMemcpyAtoD_v2_ptds_params_st { + CUdeviceptr dstDevice; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; +} cuMemcpyAtoD_v2_ptds_params; + +typedef struct cuMemcpyHtoA_v2_ptds_params_st { + CUarray dstArray; + size_t dstOffset; + const void *srcHost; + size_t ByteCount; +} cuMemcpyHtoA_v2_ptds_params; + +typedef struct cuMemcpyAtoH_v2_ptds_params_st { + void *dstHost; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; +} cuMemcpyAtoH_v2_ptds_params; + +typedef struct cuMemcpyAtoA_v2_ptds_params_st { + CUarray dstArray; + size_t dstOffset; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; +} cuMemcpyAtoA_v2_ptds_params; + +typedef struct cuMemcpy2D_v2_ptds_params_st { + const CUDA_MEMCPY2D *pCopy; +} cuMemcpy2D_v2_ptds_params; + +typedef struct cuMemcpy2DUnaligned_v2_ptds_params_st { + const CUDA_MEMCPY2D *pCopy; +} cuMemcpy2DUnaligned_v2_ptds_params; + +typedef struct cuMemcpy3D_v2_ptds_params_st { + const CUDA_MEMCPY3D *pCopy; +} cuMemcpy3D_v2_ptds_params; + +typedef struct cuMemcpy3DPeer_ptds_params_st { + const CUDA_MEMCPY3D_PEER *pCopy; +} cuMemcpy3DPeer_ptds_params; + +typedef struct cuMemcpyAsync_ptsz_params_st { + CUdeviceptr dst; + CUdeviceptr src; + size_t ByteCount; + CUstream hStream; +} cuMemcpyAsync_ptsz_params; + +typedef struct cuMemcpyPeerAsync_ptsz_params_st { + CUdeviceptr dstDevice; + CUcontext dstContext; + CUdeviceptr srcDevice; + CUcontext srcContext; + size_t ByteCount; + CUstream hStream; +} cuMemcpyPeerAsync_ptsz_params; + +typedef struct cuMemcpyHtoDAsync_v2_ptsz_params_st { + CUdeviceptr dstDevice; + const void *srcHost; + size_t ByteCount; + CUstream hStream; +} cuMemcpyHtoDAsync_v2_ptsz_params; + +typedef struct cuMemcpyDtoHAsync_v2_ptsz_params_st { + void *dstHost; + CUdeviceptr srcDevice; + size_t ByteCount; + CUstream hStream; +} cuMemcpyDtoHAsync_v2_ptsz_params; + +typedef struct cuMemcpyDtoDAsync_v2_ptsz_params_st { + CUdeviceptr dstDevice; + CUdeviceptr srcDevice; + size_t ByteCount; + CUstream hStream; +} cuMemcpyDtoDAsync_v2_ptsz_params; + +typedef struct cuMemcpyHtoAAsync_v2_ptsz_params_st { + CUarray dstArray; + size_t dstOffset; + const void *srcHost; + size_t ByteCount; + CUstream hStream; +} cuMemcpyHtoAAsync_v2_ptsz_params; + +typedef struct cuMemcpyAtoHAsync_v2_ptsz_params_st { + void *dstHost; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; + CUstream hStream; +} cuMemcpyAtoHAsync_v2_ptsz_params; + +typedef struct cuMemcpy2DAsync_v2_ptsz_params_st { + const CUDA_MEMCPY2D *pCopy; + CUstream hStream; +} cuMemcpy2DAsync_v2_ptsz_params; + +typedef struct cuMemcpy3DAsync_v2_ptsz_params_st { + const CUDA_MEMCPY3D *pCopy; + CUstream hStream; +} cuMemcpy3DAsync_v2_ptsz_params; + +typedef struct cuMemcpy3DPeerAsync_ptsz_params_st { + const CUDA_MEMCPY3D_PEER *pCopy; + CUstream hStream; +} cuMemcpy3DPeerAsync_ptsz_params; + +typedef struct cuMemsetD8_v2_ptds_params_st { + CUdeviceptr dstDevice; + unsigned char uc; + size_t N; +} cuMemsetD8_v2_ptds_params; + +typedef struct cuMemsetD16_v2_ptds_params_st { + CUdeviceptr dstDevice; + unsigned short us; + size_t N; +} cuMemsetD16_v2_ptds_params; + +typedef struct cuMemsetD32_v2_ptds_params_st { + CUdeviceptr dstDevice; + unsigned int ui; + size_t N; +} cuMemsetD32_v2_ptds_params; + +typedef struct cuMemsetD2D8_v2_ptds_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned char uc; + size_t Width; + size_t Height; +} cuMemsetD2D8_v2_ptds_params; + +typedef struct cuMemsetD2D16_v2_ptds_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned short us; + size_t Width; + size_t Height; +} cuMemsetD2D16_v2_ptds_params; + +typedef struct cuMemsetD2D32_v2_ptds_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned int ui; + size_t Width; + size_t Height; +} cuMemsetD2D32_v2_ptds_params; + +typedef struct cuMemsetD8Async_ptsz_params_st { + CUdeviceptr dstDevice; + unsigned char uc; + size_t N; + CUstream hStream; +} cuMemsetD8Async_ptsz_params; + +typedef struct cuMemsetD16Async_ptsz_params_st { + CUdeviceptr dstDevice; + unsigned short us; + size_t N; + CUstream hStream; +} cuMemsetD16Async_ptsz_params; + +typedef struct cuMemsetD32Async_ptsz_params_st { + CUdeviceptr dstDevice; + unsigned int ui; + size_t N; + CUstream hStream; +} cuMemsetD32Async_ptsz_params; + +typedef struct cuMemsetD2D8Async_ptsz_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned char uc; + size_t Width; + size_t Height; + CUstream hStream; +} cuMemsetD2D8Async_ptsz_params; + +typedef struct cuMemsetD2D16Async_ptsz_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned short us; + size_t Width; + size_t Height; + CUstream hStream; +} cuMemsetD2D16Async_ptsz_params; + +typedef struct cuMemsetD2D32Async_ptsz_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned int ui; + size_t Width; + size_t Height; + CUstream hStream; +} cuMemsetD2D32Async_ptsz_params; + +typedef struct cuArrayCreate_v2_params_st { + CUarray *pHandle; + const CUDA_ARRAY_DESCRIPTOR *pAllocateArray; +} cuArrayCreate_v2_params; + +typedef struct cuArrayGetDescriptor_v2_params_st { + CUDA_ARRAY_DESCRIPTOR *pArrayDescriptor; + CUarray hArray; +} cuArrayGetDescriptor_v2_params; + +typedef struct cuArrayGetSparseProperties_params_st { + CUDA_ARRAY_SPARSE_PROPERTIES *sparseProperties; + CUarray array; +} cuArrayGetSparseProperties_params; + +typedef struct cuMipmappedArrayGetSparseProperties_params_st { + CUDA_ARRAY_SPARSE_PROPERTIES *sparseProperties; + CUmipmappedArray mipmap; +} cuMipmappedArrayGetSparseProperties_params; + +typedef struct cuArrayGetMemoryRequirements_params_st { + CUDA_ARRAY_MEMORY_REQUIREMENTS *memoryRequirements; + CUarray array; + CUdevice device; +} cuArrayGetMemoryRequirements_params; + +typedef struct cuMipmappedArrayGetMemoryRequirements_params_st { + CUDA_ARRAY_MEMORY_REQUIREMENTS *memoryRequirements; + CUmipmappedArray mipmap; + CUdevice device; +} cuMipmappedArrayGetMemoryRequirements_params; + +typedef struct cuArrayGetPlane_params_st { + CUarray *pPlaneArray; + CUarray hArray; + unsigned int planeIdx; +} cuArrayGetPlane_params; + +typedef struct cuArrayDestroy_params_st { + CUarray hArray; +} cuArrayDestroy_params; + +typedef struct cuArray3DCreate_v2_params_st { + CUarray *pHandle; + const CUDA_ARRAY3D_DESCRIPTOR *pAllocateArray; +} cuArray3DCreate_v2_params; + +typedef struct cuArray3DGetDescriptor_v2_params_st { + CUDA_ARRAY3D_DESCRIPTOR *pArrayDescriptor; + CUarray hArray; +} cuArray3DGetDescriptor_v2_params; + +typedef struct cuMipmappedArrayCreate_params_st { + CUmipmappedArray *pHandle; + const CUDA_ARRAY3D_DESCRIPTOR *pMipmappedArrayDesc; + unsigned int numMipmapLevels; +} cuMipmappedArrayCreate_params; + +typedef struct cuMipmappedArrayGetLevel_params_st { + CUarray *pLevelArray; + CUmipmappedArray hMipmappedArray; + unsigned int level; +} cuMipmappedArrayGetLevel_params; + +typedef struct cuMipmappedArrayDestroy_params_st { + CUmipmappedArray hMipmappedArray; +} cuMipmappedArrayDestroy_params; + +typedef struct cuMemGetHandleForAddressRange_params_st { + void *handle; + CUdeviceptr dptr; + size_t size; + CUmemRangeHandleType handleType; + unsigned long long flags; +} cuMemGetHandleForAddressRange_params; + +typedef struct cuMemAddressReserve_params_st { + CUdeviceptr *ptr; + size_t size; + size_t alignment; + CUdeviceptr addr; + unsigned long long flags; +} cuMemAddressReserve_params; + +typedef struct cuMemAddressFree_params_st { + CUdeviceptr ptr; + size_t size; +} cuMemAddressFree_params; + +typedef struct cuMemCreate_params_st { + CUmemGenericAllocationHandle *handle; + size_t size; + const CUmemAllocationProp *prop; + unsigned long long flags; +} cuMemCreate_params; + +typedef struct cuMemRelease_params_st { + CUmemGenericAllocationHandle handle; +} cuMemRelease_params; + +typedef struct cuMemMap_params_st { + CUdeviceptr ptr; + size_t size; + size_t offset; + CUmemGenericAllocationHandle handle; + unsigned long long flags; +} cuMemMap_params; + +typedef struct cuMemMapArrayAsync_ptsz_params_st { + CUarrayMapInfo *mapInfoList; + unsigned int count; + CUstream hStream; +} cuMemMapArrayAsync_ptsz_params; + +typedef struct cuMemUnmap_params_st { + CUdeviceptr ptr; + size_t size; +} cuMemUnmap_params; + +typedef struct cuMemSetAccess_params_st { + CUdeviceptr ptr; + size_t size; + const CUmemAccessDesc *desc; + size_t count; +} cuMemSetAccess_params; + +typedef struct cuMemGetAccess_params_st { + unsigned long long *flags; + const CUmemLocation *location; + CUdeviceptr ptr; +} cuMemGetAccess_params; + +typedef struct cuMemExportToShareableHandle_params_st { + void *shareableHandle; + CUmemGenericAllocationHandle handle; + CUmemAllocationHandleType handleType; + unsigned long long flags; +} cuMemExportToShareableHandle_params; + +typedef struct cuMemImportFromShareableHandle_params_st { + CUmemGenericAllocationHandle *handle; + void *osHandle; + CUmemAllocationHandleType shHandleType; +} cuMemImportFromShareableHandle_params; + +typedef struct cuMemGetAllocationGranularity_params_st { + size_t *granularity; + const CUmemAllocationProp *prop; + CUmemAllocationGranularity_flags option; +} cuMemGetAllocationGranularity_params; + +typedef struct cuMemGetAllocationPropertiesFromHandle_params_st { + CUmemAllocationProp *prop; + CUmemGenericAllocationHandle handle; +} cuMemGetAllocationPropertiesFromHandle_params; + +typedef struct cuMemRetainAllocationHandle_params_st { + CUmemGenericAllocationHandle *handle; + void *addr; +} cuMemRetainAllocationHandle_params; + +typedef struct cuMemFreeAsync_ptsz_params_st { + CUdeviceptr dptr; + CUstream hStream; +} cuMemFreeAsync_ptsz_params; + +typedef struct cuMemAllocAsync_ptsz_params_st { + CUdeviceptr *dptr; + size_t bytesize; + CUstream hStream; +} cuMemAllocAsync_ptsz_params; + +typedef struct cuMemPoolTrimTo_params_st { + CUmemoryPool pool; + size_t minBytesToKeep; +} cuMemPoolTrimTo_params; + +typedef struct cuMemPoolSetAttribute_params_st { + CUmemoryPool pool; + CUmemPool_attribute attr; + void *value; +} cuMemPoolSetAttribute_params; + +typedef struct cuMemPoolGetAttribute_params_st { + CUmemoryPool pool; + CUmemPool_attribute attr; + void *value; +} cuMemPoolGetAttribute_params; + +typedef struct cuMemPoolSetAccess_params_st { + CUmemoryPool pool; + const CUmemAccessDesc *map; + size_t count; +} cuMemPoolSetAccess_params; + +typedef struct cuMemPoolGetAccess_params_st { + CUmemAccess_flags *flags; + CUmemoryPool memPool; + CUmemLocation *location; +} cuMemPoolGetAccess_params; + +typedef struct cuMemPoolCreate_params_st { + CUmemoryPool *pool; + const CUmemPoolProps *poolProps; +} cuMemPoolCreate_params; + +typedef struct cuMemPoolDestroy_params_st { + CUmemoryPool pool; +} cuMemPoolDestroy_params; + +typedef struct cuMemAllocFromPoolAsync_ptsz_params_st { + CUdeviceptr *dptr; + size_t bytesize; + CUmemoryPool pool; + CUstream hStream; +} cuMemAllocFromPoolAsync_ptsz_params; + +typedef struct cuMemPoolExportToShareableHandle_params_st { + void *handle_out; + CUmemoryPool pool; + CUmemAllocationHandleType handleType; + unsigned long long flags; +} cuMemPoolExportToShareableHandle_params; + +typedef struct cuMemPoolImportFromShareableHandle_params_st { + CUmemoryPool *pool_out; + void *handle; + CUmemAllocationHandleType handleType; + unsigned long long flags; +} cuMemPoolImportFromShareableHandle_params; + +typedef struct cuMemPoolExportPointer_params_st { + CUmemPoolPtrExportData *shareData_out; + CUdeviceptr ptr; +} cuMemPoolExportPointer_params; + +typedef struct cuMemPoolImportPointer_params_st { + CUdeviceptr *ptr_out; + CUmemoryPool pool; + CUmemPoolPtrExportData *shareData; +} cuMemPoolImportPointer_params; + +typedef struct cuMulticastCreate_params_st { + CUmemGenericAllocationHandle *mcHandle; + const CUmulticastObjectProp *prop; +} cuMulticastCreate_params; + +typedef struct cuMulticastAddDevice_params_st { + CUmemGenericAllocationHandle mcHandle; + CUdevice dev; +} cuMulticastAddDevice_params; + +typedef struct cuMulticastBindMem_params_st { + CUmemGenericAllocationHandle mcHandle; + size_t mcOffset; + CUmemGenericAllocationHandle memHandle; + size_t memOffset; + size_t size; + unsigned long long flags; +} cuMulticastBindMem_params; + +typedef struct cuMulticastBindAddr_params_st { + CUmemGenericAllocationHandle mcHandle; + size_t mcOffset; + CUdeviceptr memptr; + size_t size; + unsigned long long flags; +} cuMulticastBindAddr_params; + +typedef struct cuMulticastUnbind_params_st { + CUmemGenericAllocationHandle mcHandle; + CUdevice dev; + size_t mcOffset; + size_t size; +} cuMulticastUnbind_params; + +typedef struct cuMulticastGetGranularity_params_st { + size_t *granularity; + const CUmulticastObjectProp *prop; + CUmulticastGranularity_flags option; +} cuMulticastGetGranularity_params; + +typedef struct cuPointerGetAttribute_params_st { + void *data; + CUpointer_attribute attribute; + CUdeviceptr ptr; +} cuPointerGetAttribute_params; + +typedef struct cuMemPrefetchAsync_ptsz_params_st { + CUdeviceptr devPtr; + size_t count; + CUdevice dstDevice; + CUstream hStream; +} cuMemPrefetchAsync_ptsz_params; + +typedef struct cuMemPrefetchAsync_v2_ptsz_params_st { + CUdeviceptr devPtr; + size_t count; + CUmemLocation location; + unsigned int flags; + CUstream hStream; +} cuMemPrefetchAsync_v2_ptsz_params; + +typedef struct cuMemAdvise_params_st { + CUdeviceptr devPtr; + size_t count; + CUmem_advise advice; + CUdevice device; +} cuMemAdvise_params; + +typedef struct cuMemAdvise_v2_params_st { + CUdeviceptr devPtr; + size_t count; + CUmem_advise advice; + CUmemLocation location; +} cuMemAdvise_v2_params; + +typedef struct cuMemRangeGetAttribute_params_st { + void *data; + size_t dataSize; + CUmem_range_attribute attribute; + CUdeviceptr devPtr; + size_t count; +} cuMemRangeGetAttribute_params; + +typedef struct cuMemRangeGetAttributes_params_st { + void **data; + size_t *dataSizes; + CUmem_range_attribute *attributes; + size_t numAttributes; + CUdeviceptr devPtr; + size_t count; +} cuMemRangeGetAttributes_params; + +typedef struct cuPointerSetAttribute_params_st { + const void *value; + CUpointer_attribute attribute; + CUdeviceptr ptr; +} cuPointerSetAttribute_params; + +typedef struct cuPointerGetAttributes_params_st { + unsigned int numAttributes; + CUpointer_attribute *attributes; + void **data; + CUdeviceptr ptr; +} cuPointerGetAttributes_params; + +typedef struct cuStreamCreate_params_st { + CUstream *phStream; + unsigned int Flags; +} cuStreamCreate_params; + +typedef struct cuStreamCreateWithPriority_params_st { + CUstream *phStream; + unsigned int flags; + int priority; +} cuStreamCreateWithPriority_params; + +typedef struct cuStreamGetPriority_ptsz_params_st { + CUstream hStream; + int *priority; +} cuStreamGetPriority_ptsz_params; + +typedef struct cuStreamGetFlags_ptsz_params_st { + CUstream hStream; + unsigned int *flags; +} cuStreamGetFlags_ptsz_params; + +typedef struct cuStreamGetId_ptsz_params_st { + CUstream hStream; + unsigned long long *streamId; +} cuStreamGetId_ptsz_params; + +typedef struct cuStreamGetCtx_ptsz_params_st { + CUstream hStream; + CUcontext *pctx; +} cuStreamGetCtx_ptsz_params; + +typedef struct cuStreamGetCtx_v2_ptsz_params_st { + CUstream hStream; + CUcontext *pCtx; + CUgreenCtx *pGreenCtx; +} cuStreamGetCtx_v2_ptsz_params; + +typedef struct cuStreamWaitEvent_ptsz_params_st { + CUstream hStream; + CUevent hEvent; + unsigned int Flags; +} cuStreamWaitEvent_ptsz_params; + +typedef struct cuStreamAddCallback_ptsz_params_st { + CUstream hStream; + CUstreamCallback callback; + void *userData; + unsigned int flags; +} cuStreamAddCallback_ptsz_params; + +typedef struct cuStreamBeginCapture_v2_ptsz_params_st { + CUstream hStream; + CUstreamCaptureMode mode; +} cuStreamBeginCapture_v2_ptsz_params; + +typedef struct cuStreamBeginCaptureToGraph_ptsz_params_st { + CUstream hStream; + CUgraph hGraph; + const CUgraphNode *dependencies; + const CUgraphEdgeData *dependencyData; + size_t numDependencies; + CUstreamCaptureMode mode; +} cuStreamBeginCaptureToGraph_ptsz_params; + +typedef struct cuThreadExchangeStreamCaptureMode_params_st { + CUstreamCaptureMode *mode; +} cuThreadExchangeStreamCaptureMode_params; + +typedef struct cuStreamEndCapture_ptsz_params_st { + CUstream hStream; + CUgraph *phGraph; +} cuStreamEndCapture_ptsz_params; + +typedef struct cuStreamIsCapturing_ptsz_params_st { + CUstream hStream; + CUstreamCaptureStatus *captureStatus; +} cuStreamIsCapturing_ptsz_params; + +typedef struct cuStreamGetCaptureInfo_v2_ptsz_params_st { + CUstream hStream; + CUstreamCaptureStatus *captureStatus_out; + cuuint64_t *id_out; + CUgraph *graph_out; + const CUgraphNode **dependencies_out; + size_t *numDependencies_out; +} cuStreamGetCaptureInfo_v2_ptsz_params; + +typedef struct cuStreamGetCaptureInfo_v3_ptsz_params_st { + CUstream hStream; + CUstreamCaptureStatus *captureStatus_out; + cuuint64_t *id_out; + CUgraph *graph_out; + const CUgraphNode **dependencies_out; + const CUgraphEdgeData **edgeData_out; + size_t *numDependencies_out; +} cuStreamGetCaptureInfo_v3_ptsz_params; + +typedef struct cuStreamUpdateCaptureDependencies_ptsz_params_st { + CUstream hStream; + CUgraphNode *dependencies; + size_t numDependencies; + unsigned int flags; +} cuStreamUpdateCaptureDependencies_ptsz_params; + +typedef struct cuStreamUpdateCaptureDependencies_v2_ptsz_params_st { + CUstream hStream; + CUgraphNode *dependencies; + const CUgraphEdgeData *dependencyData; + size_t numDependencies; + unsigned int flags; +} cuStreamUpdateCaptureDependencies_v2_ptsz_params; + +typedef struct cuStreamAttachMemAsync_ptsz_params_st { + CUstream hStream; + CUdeviceptr dptr; + size_t length; + unsigned int flags; +} cuStreamAttachMemAsync_ptsz_params; + +typedef struct cuStreamQuery_ptsz_params_st { + CUstream hStream; +} cuStreamQuery_ptsz_params; + +typedef struct cuStreamSynchronize_ptsz_params_st { + CUstream hStream; +} cuStreamSynchronize_ptsz_params; + +typedef struct cuStreamDestroy_v2_params_st { + CUstream hStream; +} cuStreamDestroy_v2_params; + +typedef struct cuStreamCopyAttributes_ptsz_params_st { + CUstream dst; + CUstream src; +} cuStreamCopyAttributes_ptsz_params; + +typedef struct cuStreamGetAttribute_ptsz_params_st { + CUstream hStream; + CUstreamAttrID attr; + CUstreamAttrValue *value_out; +} cuStreamGetAttribute_ptsz_params; + +typedef struct cuStreamSetAttribute_ptsz_params_st { + CUstream hStream; + CUstreamAttrID attr; + const CUstreamAttrValue *value; +} cuStreamSetAttribute_ptsz_params; + +typedef struct cuEventCreate_params_st { + CUevent *phEvent; + unsigned int Flags; +} cuEventCreate_params; + +typedef struct cuEventRecord_ptsz_params_st { + CUevent hEvent; + CUstream hStream; +} cuEventRecord_ptsz_params; + +typedef struct cuEventRecordWithFlags_ptsz_params_st { + CUevent hEvent; + CUstream hStream; + unsigned int flags; +} cuEventRecordWithFlags_ptsz_params; + +typedef struct cuEventQuery_params_st { + CUevent hEvent; +} cuEventQuery_params; + +typedef struct cuEventSynchronize_params_st { + CUevent hEvent; +} cuEventSynchronize_params; + +typedef struct cuEventDestroy_v2_params_st { + CUevent hEvent; +} cuEventDestroy_v2_params; + +typedef struct cuEventElapsedTime_params_st { + float *pMilliseconds; + CUevent hStart; + CUevent hEnd; +} cuEventElapsedTime_params; + +typedef struct cuImportExternalMemory_params_st { + CUexternalMemory *extMem_out; + const CUDA_EXTERNAL_MEMORY_HANDLE_DESC *memHandleDesc; +} cuImportExternalMemory_params; + +typedef struct cuExternalMemoryGetMappedBuffer_params_st { + CUdeviceptr *devPtr; + CUexternalMemory extMem; + const CUDA_EXTERNAL_MEMORY_BUFFER_DESC *bufferDesc; +} cuExternalMemoryGetMappedBuffer_params; + +typedef struct cuExternalMemoryGetMappedMipmappedArray_params_st { + CUmipmappedArray *mipmap; + CUexternalMemory extMem; + const CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC *mipmapDesc; +} cuExternalMemoryGetMappedMipmappedArray_params; + +typedef struct cuDestroyExternalMemory_params_st { + CUexternalMemory extMem; +} cuDestroyExternalMemory_params; + +typedef struct cuImportExternalSemaphore_params_st { + CUexternalSemaphore *extSem_out; + const CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC *semHandleDesc; +} cuImportExternalSemaphore_params; + +typedef struct cuSignalExternalSemaphoresAsync_ptsz_params_st { + const CUexternalSemaphore *extSemArray; + const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS *paramsArray; + unsigned int numExtSems; + CUstream stream; +} cuSignalExternalSemaphoresAsync_ptsz_params; + +typedef struct cuWaitExternalSemaphoresAsync_ptsz_params_st { + const CUexternalSemaphore *extSemArray; + const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS *paramsArray; + unsigned int numExtSems; + CUstream stream; +} cuWaitExternalSemaphoresAsync_ptsz_params; + +typedef struct cuDestroyExternalSemaphore_params_st { + CUexternalSemaphore extSem; +} cuDestroyExternalSemaphore_params; + +typedef struct cuStreamWaitValue32_v2_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWaitValue32_v2_ptsz_params; + +typedef struct cuStreamWaitValue64_v2_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWaitValue64_v2_ptsz_params; + +typedef struct cuStreamWriteValue32_v2_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWriteValue32_v2_ptsz_params; + +typedef struct cuStreamWriteValue64_v2_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWriteValue64_v2_ptsz_params; + +typedef struct cuStreamBatchMemOp_v2_ptsz_params_st { + CUstream stream; + unsigned int count; + CUstreamBatchMemOpParams *paramArray; + unsigned int flags; +} cuStreamBatchMemOp_v2_ptsz_params; + +typedef struct cuFuncGetAttribute_params_st { + int *pi; + CUfunction_attribute attrib; + CUfunction hfunc; +} cuFuncGetAttribute_params; + +typedef struct cuFuncSetAttribute_params_st { + CUfunction hfunc; + CUfunction_attribute attrib; + int value; +} cuFuncSetAttribute_params; + +typedef struct cuFuncSetCacheConfig_params_st { + CUfunction hfunc; + CUfunc_cache config; +} cuFuncSetCacheConfig_params; + +typedef struct cuFuncGetModule_params_st { + CUmodule *hmod; + CUfunction hfunc; +} cuFuncGetModule_params; + +typedef struct cuFuncGetName_params_st { + const char **name; + CUfunction hfunc; +} cuFuncGetName_params; + +typedef struct cuFuncGetParamInfo_params_st { + CUfunction func; + size_t paramIndex; + size_t *paramOffset; + size_t *paramSize; +} cuFuncGetParamInfo_params; + +typedef struct cuFuncIsLoaded_params_st { + CUfunctionLoadingState *state; + CUfunction function; +} cuFuncIsLoaded_params; + +typedef struct cuFuncLoad_params_st { + CUfunction function; +} cuFuncLoad_params; + +typedef struct cuLaunchKernel_ptsz_params_st { + CUfunction f; + unsigned int gridDimX; + unsigned int gridDimY; + unsigned int gridDimZ; + unsigned int blockDimX; + unsigned int blockDimY; + unsigned int blockDimZ; + unsigned int sharedMemBytes; + CUstream hStream; + void **kernelParams; + void **extra; +} cuLaunchKernel_ptsz_params; + +typedef struct cuLaunchKernelEx_ptsz_params_st { + const CUlaunchConfig *config; + CUfunction f; + void **kernelParams; + void **extra; +} cuLaunchKernelEx_ptsz_params; + +typedef struct cuLaunchCooperativeKernel_ptsz_params_st { + CUfunction f; + unsigned int gridDimX; + unsigned int gridDimY; + unsigned int gridDimZ; + unsigned int blockDimX; + unsigned int blockDimY; + unsigned int blockDimZ; + unsigned int sharedMemBytes; + CUstream hStream; + void **kernelParams; +} cuLaunchCooperativeKernel_ptsz_params; + +typedef struct cuLaunchCooperativeKernelMultiDevice_params_st { + CUDA_LAUNCH_PARAMS *launchParamsList; + unsigned int numDevices; + unsigned int flags; +} cuLaunchCooperativeKernelMultiDevice_params; + +typedef struct cuLaunchHostFunc_ptsz_params_st { + CUstream hStream; + CUhostFn fn; + void *userData; +} cuLaunchHostFunc_ptsz_params; + +typedef struct cuFuncSetBlockShape_params_st { + CUfunction hfunc; + int x; + int y; + int z; +} cuFuncSetBlockShape_params; + +typedef struct cuFuncSetSharedSize_params_st { + CUfunction hfunc; + unsigned int bytes; +} cuFuncSetSharedSize_params; + +typedef struct cuParamSetSize_params_st { + CUfunction hfunc; + unsigned int numbytes; +} cuParamSetSize_params; + +typedef struct cuParamSeti_params_st { + CUfunction hfunc; + int offset; + unsigned int value; +} cuParamSeti_params; + +typedef struct cuParamSetf_params_st { + CUfunction hfunc; + int offset; + float value; +} cuParamSetf_params; + +typedef struct cuParamSetv_params_st { + CUfunction hfunc; + int offset; + void *ptr; + unsigned int numbytes; +} cuParamSetv_params; + +typedef struct cuLaunch_params_st { + CUfunction f; +} cuLaunch_params; + +typedef struct cuLaunchGrid_params_st { + CUfunction f; + int grid_width; + int grid_height; +} cuLaunchGrid_params; + +typedef struct cuLaunchGridAsync_params_st { + CUfunction f; + int grid_width; + int grid_height; + CUstream hStream; +} cuLaunchGridAsync_params; + +typedef struct cuParamSetTexRef_params_st { + CUfunction hfunc; + int texunit; + CUtexref hTexRef; +} cuParamSetTexRef_params; + +typedef struct cuFuncSetSharedMemConfig_params_st { + CUfunction hfunc; + CUsharedconfig config; +} cuFuncSetSharedMemConfig_params; + +typedef struct cuGraphCreate_params_st { + CUgraph *phGraph; + unsigned int flags; +} cuGraphCreate_params; + +typedef struct cuGraphAddKernelNode_v2_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_KERNEL_NODE_PARAMS *nodeParams; +} cuGraphAddKernelNode_v2_params; + +typedef struct cuGraphKernelNodeGetParams_v2_params_st { + CUgraphNode hNode; + CUDA_KERNEL_NODE_PARAMS *nodeParams; +} cuGraphKernelNodeGetParams_v2_params; + +typedef struct cuGraphKernelNodeSetParams_v2_params_st { + CUgraphNode hNode; + const CUDA_KERNEL_NODE_PARAMS *nodeParams; +} cuGraphKernelNodeSetParams_v2_params; + +typedef struct cuGraphAddMemcpyNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_MEMCPY3D *copyParams; + CUcontext ctx; +} cuGraphAddMemcpyNode_params; + +typedef struct cuGraphMemcpyNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_MEMCPY3D *nodeParams; +} cuGraphMemcpyNodeGetParams_params; + +typedef struct cuGraphMemcpyNodeSetParams_params_st { + CUgraphNode hNode; + const CUDA_MEMCPY3D *nodeParams; +} cuGraphMemcpyNodeSetParams_params; + +typedef struct cuGraphAddMemsetNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_MEMSET_NODE_PARAMS *memsetParams; + CUcontext ctx; +} cuGraphAddMemsetNode_params; + +typedef struct cuGraphMemsetNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_MEMSET_NODE_PARAMS *nodeParams; +} cuGraphMemsetNodeGetParams_params; + +typedef struct cuGraphMemsetNodeSetParams_params_st { + CUgraphNode hNode; + const CUDA_MEMSET_NODE_PARAMS *nodeParams; +} cuGraphMemsetNodeSetParams_params; + +typedef struct cuGraphAddHostNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_HOST_NODE_PARAMS *nodeParams; +} cuGraphAddHostNode_params; + +typedef struct cuGraphHostNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_HOST_NODE_PARAMS *nodeParams; +} cuGraphHostNodeGetParams_params; + +typedef struct cuGraphHostNodeSetParams_params_st { + CUgraphNode hNode; + const CUDA_HOST_NODE_PARAMS *nodeParams; +} cuGraphHostNodeSetParams_params; + +typedef struct cuGraphAddChildGraphNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + CUgraph childGraph; +} cuGraphAddChildGraphNode_params; + +typedef struct cuGraphChildGraphNodeGetGraph_params_st { + CUgraphNode hNode; + CUgraph *phGraph; +} cuGraphChildGraphNodeGetGraph_params; + +typedef struct cuGraphAddEmptyNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; +} cuGraphAddEmptyNode_params; + +typedef struct cuGraphAddEventRecordNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + CUevent event; +} cuGraphAddEventRecordNode_params; + +typedef struct cuGraphEventRecordNodeGetEvent_params_st { + CUgraphNode hNode; + CUevent *event_out; +} cuGraphEventRecordNodeGetEvent_params; + +typedef struct cuGraphEventRecordNodeSetEvent_params_st { + CUgraphNode hNode; + CUevent event; +} cuGraphEventRecordNodeSetEvent_params; + +typedef struct cuGraphAddEventWaitNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + CUevent event; +} cuGraphAddEventWaitNode_params; + +typedef struct cuGraphEventWaitNodeGetEvent_params_st { + CUgraphNode hNode; + CUevent *event_out; +} cuGraphEventWaitNodeGetEvent_params; + +typedef struct cuGraphEventWaitNodeSetEvent_params_st { + CUgraphNode hNode; + CUevent event; +} cuGraphEventWaitNodeSetEvent_params; + +typedef struct cuGraphAddExternalSemaphoresSignalNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams; +} cuGraphAddExternalSemaphoresSignalNode_params; + +typedef struct cuGraphExternalSemaphoresSignalNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *params_out; +} cuGraphExternalSemaphoresSignalNodeGetParams_params; + +typedef struct cuGraphExternalSemaphoresSignalNodeSetParams_params_st { + CUgraphNode hNode; + const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams; +} cuGraphExternalSemaphoresSignalNodeSetParams_params; + +typedef struct cuGraphAddExternalSemaphoresWaitNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams; +} cuGraphAddExternalSemaphoresWaitNode_params; + +typedef struct cuGraphExternalSemaphoresWaitNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_EXT_SEM_WAIT_NODE_PARAMS *params_out; +} cuGraphExternalSemaphoresWaitNodeGetParams_params; + +typedef struct cuGraphExternalSemaphoresWaitNodeSetParams_params_st { + CUgraphNode hNode; + const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams; +} cuGraphExternalSemaphoresWaitNodeSetParams_params; + +typedef struct cuGraphAddBatchMemOpNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams; +} cuGraphAddBatchMemOpNode_params; + +typedef struct cuGraphBatchMemOpNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams_out; +} cuGraphBatchMemOpNodeGetParams_params; + +typedef struct cuGraphBatchMemOpNodeSetParams_params_st { + CUgraphNode hNode; + const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams; +} cuGraphBatchMemOpNodeSetParams_params; + +typedef struct cuGraphExecBatchMemOpNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams; +} cuGraphExecBatchMemOpNodeSetParams_params; + +typedef struct cuGraphAddMemAllocNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + CUDA_MEM_ALLOC_NODE_PARAMS *nodeParams; +} cuGraphAddMemAllocNode_params; + +typedef struct cuGraphMemAllocNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_MEM_ALLOC_NODE_PARAMS *params_out; +} cuGraphMemAllocNodeGetParams_params; + +typedef struct cuGraphAddMemFreeNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + CUdeviceptr dptr; +} cuGraphAddMemFreeNode_params; + +typedef struct cuGraphMemFreeNodeGetParams_params_st { + CUgraphNode hNode; + CUdeviceptr *dptr_out; +} cuGraphMemFreeNodeGetParams_params; + +typedef struct cuDeviceGraphMemTrim_params_st { + CUdevice device; +} cuDeviceGraphMemTrim_params; + +typedef struct cuDeviceGetGraphMemAttribute_params_st { + CUdevice device; + CUgraphMem_attribute attr; + void *value; +} cuDeviceGetGraphMemAttribute_params; + +typedef struct cuDeviceSetGraphMemAttribute_params_st { + CUdevice device; + CUgraphMem_attribute attr; + void *value; +} cuDeviceSetGraphMemAttribute_params; + +typedef struct cuGraphClone_params_st { + CUgraph *phGraphClone; + CUgraph originalGraph; +} cuGraphClone_params; + +typedef struct cuGraphNodeFindInClone_params_st { + CUgraphNode *phNode; + CUgraphNode hOriginalNode; + CUgraph hClonedGraph; +} cuGraphNodeFindInClone_params; + +typedef struct cuGraphNodeGetType_params_st { + CUgraphNode hNode; + CUgraphNodeType *type; +} cuGraphNodeGetType_params; + +typedef struct cuGraphGetNodes_params_st { + CUgraph hGraph; + CUgraphNode *nodes; + size_t *numNodes; +} cuGraphGetNodes_params; + +typedef struct cuGraphGetRootNodes_params_st { + CUgraph hGraph; + CUgraphNode *rootNodes; + size_t *numRootNodes; +} cuGraphGetRootNodes_params; + +typedef struct cuGraphGetEdges_params_st { + CUgraph hGraph; + CUgraphNode *from; + CUgraphNode *to; + size_t *numEdges; +} cuGraphGetEdges_params; + +typedef struct cuGraphGetEdges_v2_params_st { + CUgraph hGraph; + CUgraphNode *from; + CUgraphNode *to; + CUgraphEdgeData *edgeData; + size_t *numEdges; +} cuGraphGetEdges_v2_params; + +typedef struct cuGraphNodeGetDependencies_params_st { + CUgraphNode hNode; + CUgraphNode *dependencies; + size_t *numDependencies; +} cuGraphNodeGetDependencies_params; + +typedef struct cuGraphNodeGetDependencies_v2_params_st { + CUgraphNode hNode; + CUgraphNode *dependencies; + CUgraphEdgeData *edgeData; + size_t *numDependencies; +} cuGraphNodeGetDependencies_v2_params; + +typedef struct cuGraphNodeGetDependentNodes_params_st { + CUgraphNode hNode; + CUgraphNode *dependentNodes; + size_t *numDependentNodes; +} cuGraphNodeGetDependentNodes_params; + +typedef struct cuGraphNodeGetDependentNodes_v2_params_st { + CUgraphNode hNode; + CUgraphNode *dependentNodes; + CUgraphEdgeData *edgeData; + size_t *numDependentNodes; +} cuGraphNodeGetDependentNodes_v2_params; + +typedef struct cuGraphAddDependencies_params_st { + CUgraph hGraph; + const CUgraphNode *from; + const CUgraphNode *to; + size_t numDependencies; +} cuGraphAddDependencies_params; + +typedef struct cuGraphAddDependencies_v2_params_st { + CUgraph hGraph; + const CUgraphNode *from; + const CUgraphNode *to; + const CUgraphEdgeData *edgeData; + size_t numDependencies; +} cuGraphAddDependencies_v2_params; + +typedef struct cuGraphRemoveDependencies_params_st { + CUgraph hGraph; + const CUgraphNode *from; + const CUgraphNode *to; + size_t numDependencies; +} cuGraphRemoveDependencies_params; + +typedef struct cuGraphRemoveDependencies_v2_params_st { + CUgraph hGraph; + const CUgraphNode *from; + const CUgraphNode *to; + const CUgraphEdgeData *edgeData; + size_t numDependencies; +} cuGraphRemoveDependencies_v2_params; + +typedef struct cuGraphDestroyNode_params_st { + CUgraphNode hNode; +} cuGraphDestroyNode_params; + +typedef struct cuGraphInstantiateWithFlags_params_st { + CUgraphExec *phGraphExec; + CUgraph hGraph; + unsigned long long flags; +} cuGraphInstantiateWithFlags_params; + +typedef struct cuGraphInstantiateWithParams_ptsz_params_st { + CUgraphExec *phGraphExec; + CUgraph hGraph; + CUDA_GRAPH_INSTANTIATE_PARAMS *instantiateParams; +} cuGraphInstantiateWithParams_ptsz_params; + +typedef struct cuGraphExecGetFlags_params_st { + CUgraphExec hGraphExec; + cuuint64_t *flags; +} cuGraphExecGetFlags_params; + +typedef struct cuGraphExecKernelNodeSetParams_v2_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_KERNEL_NODE_PARAMS *nodeParams; +} cuGraphExecKernelNodeSetParams_v2_params; + +typedef struct cuGraphExecMemcpyNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_MEMCPY3D *copyParams; + CUcontext ctx; +} cuGraphExecMemcpyNodeSetParams_params; + +typedef struct cuGraphExecMemsetNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_MEMSET_NODE_PARAMS *memsetParams; + CUcontext ctx; +} cuGraphExecMemsetNodeSetParams_params; + +typedef struct cuGraphExecHostNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_HOST_NODE_PARAMS *nodeParams; +} cuGraphExecHostNodeSetParams_params; + +typedef struct cuGraphExecChildGraphNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + CUgraph childGraph; +} cuGraphExecChildGraphNodeSetParams_params; + +typedef struct cuGraphExecEventRecordNodeSetEvent_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + CUevent event; +} cuGraphExecEventRecordNodeSetEvent_params; + +typedef struct cuGraphExecEventWaitNodeSetEvent_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + CUevent event; +} cuGraphExecEventWaitNodeSetEvent_params; + +typedef struct cuGraphExecExternalSemaphoresSignalNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams; +} cuGraphExecExternalSemaphoresSignalNodeSetParams_params; + +typedef struct cuGraphExecExternalSemaphoresWaitNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams; +} cuGraphExecExternalSemaphoresWaitNodeSetParams_params; + +typedef struct cuGraphNodeSetEnabled_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + unsigned int isEnabled; +} cuGraphNodeSetEnabled_params; + +typedef struct cuGraphNodeGetEnabled_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + unsigned int *isEnabled; +} cuGraphNodeGetEnabled_params; + +typedef struct cuGraphUpload_ptsz_params_st { + CUgraphExec hGraphExec; + CUstream hStream; +} cuGraphUpload_ptsz_params; + +typedef struct cuGraphLaunch_ptsz_params_st { + CUgraphExec hGraphExec; + CUstream hStream; +} cuGraphLaunch_ptsz_params; + +typedef struct cuGraphExecDestroy_params_st { + CUgraphExec hGraphExec; +} cuGraphExecDestroy_params; + +typedef struct cuGraphDestroy_params_st { + CUgraph hGraph; +} cuGraphDestroy_params; + +typedef struct cuGraphExecUpdate_v2_params_st { + CUgraphExec hGraphExec; + CUgraph hGraph; + CUgraphExecUpdateResultInfo *resultInfo; +} cuGraphExecUpdate_v2_params; + +typedef struct cuGraphKernelNodeCopyAttributes_params_st { + CUgraphNode dst; + CUgraphNode src; +} cuGraphKernelNodeCopyAttributes_params; + +typedef struct cuGraphKernelNodeGetAttribute_params_st { + CUgraphNode hNode; + CUkernelNodeAttrID attr; + CUkernelNodeAttrValue *value_out; +} cuGraphKernelNodeGetAttribute_params; + +typedef struct cuGraphKernelNodeSetAttribute_params_st { + CUgraphNode hNode; + CUkernelNodeAttrID attr; + const CUkernelNodeAttrValue *value; +} cuGraphKernelNodeSetAttribute_params; + +typedef struct cuGraphDebugDotPrint_params_st { + CUgraph hGraph; + const char *path; + unsigned int flags; +} cuGraphDebugDotPrint_params; + +typedef struct cuUserObjectCreate_params_st { + CUuserObject *object_out; + void *ptr; + CUhostFn destroy; + unsigned int initialRefcount; + unsigned int flags; +} cuUserObjectCreate_params; + +typedef struct cuUserObjectRetain_params_st { + CUuserObject object; + unsigned int count; +} cuUserObjectRetain_params; + +typedef struct cuUserObjectRelease_params_st { + CUuserObject object; + unsigned int count; +} cuUserObjectRelease_params; + +typedef struct cuGraphRetainUserObject_params_st { + CUgraph graph; + CUuserObject object; + unsigned int count; + unsigned int flags; +} cuGraphRetainUserObject_params; + +typedef struct cuGraphReleaseUserObject_params_st { + CUgraph graph; + CUuserObject object; + unsigned int count; +} cuGraphReleaseUserObject_params; + +typedef struct cuGraphAddNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + CUgraphNodeParams *nodeParams; +} cuGraphAddNode_params; + +typedef struct cuGraphAddNode_v2_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + const CUgraphEdgeData *dependencyData; + size_t numDependencies; + CUgraphNodeParams *nodeParams; +} cuGraphAddNode_v2_params; + +typedef struct cuGraphNodeSetParams_params_st { + CUgraphNode hNode; + CUgraphNodeParams *nodeParams; +} cuGraphNodeSetParams_params; + +typedef struct cuGraphExecNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + CUgraphNodeParams *nodeParams; +} cuGraphExecNodeSetParams_params; + +typedef struct cuGraphConditionalHandleCreate_params_st { + CUgraphConditionalHandle *pHandle_out; + CUgraph hGraph; + CUcontext ctx; + unsigned int defaultLaunchValue; + unsigned int flags; +} cuGraphConditionalHandleCreate_params; + +typedef struct cuOccupancyMaxActiveBlocksPerMultiprocessor_params_st { + int *numBlocks; + CUfunction func; + int blockSize; + size_t dynamicSMemSize; +} cuOccupancyMaxActiveBlocksPerMultiprocessor_params; + +typedef struct cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_params_st { + int *numBlocks; + CUfunction func; + int blockSize; + size_t dynamicSMemSize; + unsigned int flags; +} cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_params; + +typedef struct cuOccupancyMaxPotentialBlockSize_params_st { + int *minGridSize; + int *blockSize; + CUfunction func; + CUoccupancyB2DSize blockSizeToDynamicSMemSize; + size_t dynamicSMemSize; + int blockSizeLimit; +} cuOccupancyMaxPotentialBlockSize_params; + +typedef struct cuOccupancyMaxPotentialBlockSizeWithFlags_params_st { + int *minGridSize; + int *blockSize; + CUfunction func; + CUoccupancyB2DSize blockSizeToDynamicSMemSize; + size_t dynamicSMemSize; + int blockSizeLimit; + unsigned int flags; +} cuOccupancyMaxPotentialBlockSizeWithFlags_params; + +typedef struct cuOccupancyAvailableDynamicSMemPerBlock_params_st { + size_t *dynamicSmemSize; + CUfunction func; + int numBlocks; + int blockSize; +} cuOccupancyAvailableDynamicSMemPerBlock_params; + +typedef struct cuOccupancyMaxPotentialClusterSize_params_st { + int *clusterSize; + CUfunction func; + const CUlaunchConfig *config; +} cuOccupancyMaxPotentialClusterSize_params; + +typedef struct cuOccupancyMaxActiveClusters_params_st { + int *numClusters; + CUfunction func; + const CUlaunchConfig *config; +} cuOccupancyMaxActiveClusters_params; + +typedef struct cuTexRefSetArray_params_st { + CUtexref hTexRef; + CUarray hArray; + unsigned int Flags; +} cuTexRefSetArray_params; + +typedef struct cuTexRefSetMipmappedArray_params_st { + CUtexref hTexRef; + CUmipmappedArray hMipmappedArray; + unsigned int Flags; +} cuTexRefSetMipmappedArray_params; + +typedef struct cuTexRefSetAddress_v2_params_st { + size_t *ByteOffset; + CUtexref hTexRef; + CUdeviceptr dptr; + size_t bytes; +} cuTexRefSetAddress_v2_params; + +typedef struct cuTexRefSetAddress2D_v3_params_st { + CUtexref hTexRef; + const CUDA_ARRAY_DESCRIPTOR *desc; + CUdeviceptr dptr; + size_t Pitch; +} cuTexRefSetAddress2D_v3_params; + +typedef struct cuTexRefSetFormat_params_st { + CUtexref hTexRef; + CUarray_format fmt; + int NumPackedComponents; +} cuTexRefSetFormat_params; + +typedef struct cuTexRefSetAddressMode_params_st { + CUtexref hTexRef; + int dim; + CUaddress_mode am; +} cuTexRefSetAddressMode_params; + +typedef struct cuTexRefSetFilterMode_params_st { + CUtexref hTexRef; + CUfilter_mode fm; +} cuTexRefSetFilterMode_params; + +typedef struct cuTexRefSetMipmapFilterMode_params_st { + CUtexref hTexRef; + CUfilter_mode fm; +} cuTexRefSetMipmapFilterMode_params; + +typedef struct cuTexRefSetMipmapLevelBias_params_st { + CUtexref hTexRef; + float bias; +} cuTexRefSetMipmapLevelBias_params; + +typedef struct cuTexRefSetMipmapLevelClamp_params_st { + CUtexref hTexRef; + float minMipmapLevelClamp; + float maxMipmapLevelClamp; +} cuTexRefSetMipmapLevelClamp_params; + +typedef struct cuTexRefSetMaxAnisotropy_params_st { + CUtexref hTexRef; + unsigned int maxAniso; +} cuTexRefSetMaxAnisotropy_params; + +typedef struct cuTexRefSetBorderColor_params_st { + CUtexref hTexRef; + float *pBorderColor; +} cuTexRefSetBorderColor_params; + +typedef struct cuTexRefSetFlags_params_st { + CUtexref hTexRef; + unsigned int Flags; +} cuTexRefSetFlags_params; + +typedef struct cuTexRefGetAddress_v2_params_st { + CUdeviceptr *pdptr; + CUtexref hTexRef; +} cuTexRefGetAddress_v2_params; + +typedef struct cuTexRefGetArray_params_st { + CUarray *phArray; + CUtexref hTexRef; +} cuTexRefGetArray_params; + +typedef struct cuTexRefGetMipmappedArray_params_st { + CUmipmappedArray *phMipmappedArray; + CUtexref hTexRef; +} cuTexRefGetMipmappedArray_params; + +typedef struct cuTexRefGetAddressMode_params_st { + CUaddress_mode *pam; + CUtexref hTexRef; + int dim; +} cuTexRefGetAddressMode_params; + +typedef struct cuTexRefGetFilterMode_params_st { + CUfilter_mode *pfm; + CUtexref hTexRef; +} cuTexRefGetFilterMode_params; + +typedef struct cuTexRefGetFormat_params_st { + CUarray_format *pFormat; + int *pNumChannels; + CUtexref hTexRef; +} cuTexRefGetFormat_params; + +typedef struct cuTexRefGetMipmapFilterMode_params_st { + CUfilter_mode *pfm; + CUtexref hTexRef; +} cuTexRefGetMipmapFilterMode_params; + +typedef struct cuTexRefGetMipmapLevelBias_params_st { + float *pbias; + CUtexref hTexRef; +} cuTexRefGetMipmapLevelBias_params; + +typedef struct cuTexRefGetMipmapLevelClamp_params_st { + float *pminMipmapLevelClamp; + float *pmaxMipmapLevelClamp; + CUtexref hTexRef; +} cuTexRefGetMipmapLevelClamp_params; + +typedef struct cuTexRefGetMaxAnisotropy_params_st { + int *pmaxAniso; + CUtexref hTexRef; +} cuTexRefGetMaxAnisotropy_params; + +typedef struct cuTexRefGetBorderColor_params_st { + float *pBorderColor; + CUtexref hTexRef; +} cuTexRefGetBorderColor_params; + +typedef struct cuTexRefGetFlags_params_st { + unsigned int *pFlags; + CUtexref hTexRef; +} cuTexRefGetFlags_params; + +typedef struct cuTexRefCreate_params_st { + CUtexref *pTexRef; +} cuTexRefCreate_params; + +typedef struct cuTexRefDestroy_params_st { + CUtexref hTexRef; +} cuTexRefDestroy_params; + +typedef struct cuSurfRefSetArray_params_st { + CUsurfref hSurfRef; + CUarray hArray; + unsigned int Flags; +} cuSurfRefSetArray_params; + +typedef struct cuSurfRefGetArray_params_st { + CUarray *phArray; + CUsurfref hSurfRef; +} cuSurfRefGetArray_params; + +typedef struct cuTexObjectCreate_params_st { + CUtexObject *pTexObject; + const CUDA_RESOURCE_DESC *pResDesc; + const CUDA_TEXTURE_DESC *pTexDesc; + const CUDA_RESOURCE_VIEW_DESC *pResViewDesc; +} cuTexObjectCreate_params; + +typedef struct cuTexObjectDestroy_params_st { + CUtexObject texObject; +} cuTexObjectDestroy_params; + +typedef struct cuTexObjectGetResourceDesc_params_st { + CUDA_RESOURCE_DESC *pResDesc; + CUtexObject texObject; +} cuTexObjectGetResourceDesc_params; + +typedef struct cuTexObjectGetTextureDesc_params_st { + CUDA_TEXTURE_DESC *pTexDesc; + CUtexObject texObject; +} cuTexObjectGetTextureDesc_params; + +typedef struct cuTexObjectGetResourceViewDesc_params_st { + CUDA_RESOURCE_VIEW_DESC *pResViewDesc; + CUtexObject texObject; +} cuTexObjectGetResourceViewDesc_params; + +typedef struct cuSurfObjectCreate_params_st { + CUsurfObject *pSurfObject; + const CUDA_RESOURCE_DESC *pResDesc; +} cuSurfObjectCreate_params; + +typedef struct cuSurfObjectDestroy_params_st { + CUsurfObject surfObject; +} cuSurfObjectDestroy_params; + +typedef struct cuSurfObjectGetResourceDesc_params_st { + CUDA_RESOURCE_DESC *pResDesc; + CUsurfObject surfObject; +} cuSurfObjectGetResourceDesc_params; + +typedef struct cuTensorMapEncodeTiled_params_st { + CUtensorMap *tensorMap; + CUtensorMapDataType tensorDataType; + cuuint32_t tensorRank; + void *globalAddress; + const cuuint64_t *globalDim; + const cuuint64_t *globalStrides; + const cuuint32_t *boxDim; + const cuuint32_t *elementStrides; + CUtensorMapInterleave interleave; + CUtensorMapSwizzle swizzle; + CUtensorMapL2promotion l2Promotion; + CUtensorMapFloatOOBfill oobFill; +} cuTensorMapEncodeTiled_params; + +typedef struct cuTensorMapEncodeIm2col_params_st { + CUtensorMap *tensorMap; + CUtensorMapDataType tensorDataType; + cuuint32_t tensorRank; + void *globalAddress; + const cuuint64_t *globalDim; + const cuuint64_t *globalStrides; + const int *pixelBoxLowerCorner; + const int *pixelBoxUpperCorner; + cuuint32_t channelsPerPixel; + cuuint32_t pixelsPerColumn; + const cuuint32_t *elementStrides; + CUtensorMapInterleave interleave; + CUtensorMapSwizzle swizzle; + CUtensorMapL2promotion l2Promotion; + CUtensorMapFloatOOBfill oobFill; +} cuTensorMapEncodeIm2col_params; + +typedef struct cuTensorMapReplaceAddress_params_st { + CUtensorMap *tensorMap; + void *globalAddress; +} cuTensorMapReplaceAddress_params; + +typedef struct cuDeviceCanAccessPeer_params_st { + int *canAccessPeer; + CUdevice dev; + CUdevice peerDev; +} cuDeviceCanAccessPeer_params; + +typedef struct cuCtxEnablePeerAccess_params_st { + CUcontext peerContext; + unsigned int Flags; +} cuCtxEnablePeerAccess_params; + +typedef struct cuCtxDisablePeerAccess_params_st { + CUcontext peerContext; +} cuCtxDisablePeerAccess_params; + +typedef struct cuDeviceGetP2PAttribute_params_st { + int *value; + CUdevice_P2PAttribute attrib; + CUdevice srcDevice; + CUdevice dstDevice; +} cuDeviceGetP2PAttribute_params; + +typedef struct cuGraphicsUnregisterResource_params_st { + CUgraphicsResource resource; +} cuGraphicsUnregisterResource_params; + +typedef struct cuGraphicsSubResourceGetMappedArray_params_st { + CUarray *pArray; + CUgraphicsResource resource; + unsigned int arrayIndex; + unsigned int mipLevel; +} cuGraphicsSubResourceGetMappedArray_params; + +typedef struct cuGraphicsResourceGetMappedMipmappedArray_params_st { + CUmipmappedArray *pMipmappedArray; + CUgraphicsResource resource; +} cuGraphicsResourceGetMappedMipmappedArray_params; + +typedef struct cuGraphicsResourceGetMappedPointer_v2_params_st { + CUdeviceptr *pDevPtr; + size_t *pSize; + CUgraphicsResource resource; +} cuGraphicsResourceGetMappedPointer_v2_params; + +typedef struct cuGraphicsResourceSetMapFlags_v2_params_st { + CUgraphicsResource resource; + unsigned int flags; +} cuGraphicsResourceSetMapFlags_v2_params; + +typedef struct cuGraphicsMapResources_ptsz_params_st { + unsigned int count; + CUgraphicsResource *resources; + CUstream hStream; +} cuGraphicsMapResources_ptsz_params; + +typedef struct cuGraphicsUnmapResources_ptsz_params_st { + unsigned int count; + CUgraphicsResource *resources; + CUstream hStream; +} cuGraphicsUnmapResources_ptsz_params; + +typedef struct cuGetProcAddress_v2_params_st { + const char *symbol; + void **pfn; + int cudaVersion; + cuuint64_t flags; + CUdriverProcAddressQueryResult *symbolStatus; +} cuGetProcAddress_v2_params; + +typedef struct cuCoredumpGetAttribute_params_st { + CUcoredumpSettings attrib; + void *value; + size_t *size; +} cuCoredumpGetAttribute_params; + +typedef struct cuCoredumpGetAttributeGlobal_params_st { + CUcoredumpSettings attrib; + void *value; + size_t *size; +} cuCoredumpGetAttributeGlobal_params; + +typedef struct cuCoredumpSetAttribute_params_st { + CUcoredumpSettings attrib; + void *value; + size_t *size; +} cuCoredumpSetAttribute_params; + +typedef struct cuCoredumpSetAttributeGlobal_params_st { + CUcoredumpSettings attrib; + void *value; + size_t *size; +} cuCoredumpSetAttributeGlobal_params; + +typedef struct cuGetExportTable_params_st { + const void **ppExportTable; + const CUuuid *pExportTableId; +} cuGetExportTable_params; + +typedef struct cuGreenCtxCreate_params_st { + CUgreenCtx *phCtx; + CUdevResourceDesc desc; + CUdevice dev; + unsigned int flags; +} cuGreenCtxCreate_params; + +typedef struct cuGreenCtxDestroy_params_st { + CUgreenCtx hCtx; +} cuGreenCtxDestroy_params; + +typedef struct cuCtxFromGreenCtx_params_st { + CUcontext *pContext; + CUgreenCtx hCtx; +} cuCtxFromGreenCtx_params; + +typedef struct cuDeviceGetDevResource_params_st { + CUdevice device; + CUdevResource *resource; + CUdevResourceType type; +} cuDeviceGetDevResource_params; + +typedef struct cuCtxGetDevResource_params_st { + CUcontext hCtx; + CUdevResource *resource; + CUdevResourceType type; +} cuCtxGetDevResource_params; + +typedef struct cuGreenCtxGetDevResource_params_st { + CUgreenCtx hCtx; + CUdevResource *resource; + CUdevResourceType type; +} cuGreenCtxGetDevResource_params; + +typedef struct cuDevSmResourceSplitByCount_params_st { + CUdevResource *result; + unsigned int *nbGroups; + const CUdevResource *input; + CUdevResource *remaining; + unsigned int useFlags; + unsigned int minCount; +} cuDevSmResourceSplitByCount_params; + +typedef struct cuDevResourceGenerateDesc_params_st { + CUdevResourceDesc *phDesc; + CUdevResource *resources; + unsigned int nbResources; +} cuDevResourceGenerateDesc_params; + +typedef struct cuGreenCtxRecordEvent_params_st { + CUgreenCtx hCtx; + CUevent hEvent; +} cuGreenCtxRecordEvent_params; + +typedef struct cuGreenCtxWaitEvent_params_st { + CUgreenCtx hCtx; + CUevent hEvent; +} cuGreenCtxWaitEvent_params; + +typedef struct cuStreamGetGreenCtx_params_st { + CUstream hStream; + CUgreenCtx *phCtx; +} cuStreamGetGreenCtx_params; + +typedef struct cuGreenCtxStreamCreate_params_st { + CUstream *phStream; + CUgreenCtx greenCtx; + unsigned int flags; + int priority; +} cuGreenCtxStreamCreate_params; + +typedef struct cuMemHostRegister_params_st { + void *p; + size_t bytesize; + unsigned int Flags; +} cuMemHostRegister_params; + +typedef struct cuGraphicsResourceSetMapFlags_params_st { + CUgraphicsResource resource; + unsigned int flags; +} cuGraphicsResourceSetMapFlags_params; + +typedef struct cuLinkCreate_params_st { + unsigned int numOptions; + CUjit_option *options; + void **optionValues; + CUlinkState *stateOut; +} cuLinkCreate_params; + +typedef struct cuLinkAddData_params_st { + CUlinkState state; + CUjitInputType type; + void *data; + size_t size; + const char *name; + unsigned int numOptions; + CUjit_option *options; + void **optionValues; +} cuLinkAddData_params; + +typedef struct cuLinkAddFile_params_st { + CUlinkState state; + CUjitInputType type; + const char *path; + unsigned int numOptions; + CUjit_option *options; + void **optionValues; +} cuLinkAddFile_params; + +typedef struct cuTexRefSetAddress2D_v2_params_st { + CUtexref hTexRef; + const CUDA_ARRAY_DESCRIPTOR *desc; + CUdeviceptr dptr; + size_t Pitch; +} cuTexRefSetAddress2D_v2_params; + +typedef struct cuDeviceTotalMem_params_st { + unsigned int *bytes; + CUdevice dev; +} cuDeviceTotalMem_params; + +typedef struct cuCtxCreate_params_st { + CUcontext *pctx; + unsigned int flags; + CUdevice dev; +} cuCtxCreate_params; + +typedef struct cuModuleGetGlobal_params_st { + CUdeviceptr_v1 *dptr; + unsigned int *bytes; + CUmodule hmod; + const char *name; +} cuModuleGetGlobal_params; + +typedef struct cuMemGetInfo_params_st { + unsigned int *free; + unsigned int *total; +} cuMemGetInfo_params; + +typedef struct cuMemAlloc_params_st { + CUdeviceptr_v1 *dptr; + unsigned int bytesize; +} cuMemAlloc_params; + +typedef struct cuMemAllocPitch_params_st { + CUdeviceptr_v1 *dptr; + unsigned int *pPitch; + unsigned int WidthInBytes; + unsigned int Height; + unsigned int ElementSizeBytes; +} cuMemAllocPitch_params; + +typedef struct cuMemFree_params_st { + CUdeviceptr_v1 dptr; +} cuMemFree_params; + +typedef struct cuMemGetAddressRange_params_st { + CUdeviceptr_v1 *pbase; + unsigned int *psize; + CUdeviceptr_v1 dptr; +} cuMemGetAddressRange_params; + +typedef struct cuMemAllocHost_params_st { + void **pp; + unsigned int bytesize; +} cuMemAllocHost_params; + +typedef struct cuMemHostGetDevicePointer_params_st { + CUdeviceptr_v1 *pdptr; + void *p; + unsigned int Flags; +} cuMemHostGetDevicePointer_params; + +typedef struct cuMemcpyHtoD_params_st { + CUdeviceptr_v1 dstDevice; + const void *srcHost; + unsigned int ByteCount; +} cuMemcpyHtoD_params; + +typedef struct cuMemcpyDtoH_params_st { + void *dstHost; + CUdeviceptr_v1 srcDevice; + unsigned int ByteCount; +} cuMemcpyDtoH_params; + +typedef struct cuMemcpyDtoD_params_st { + CUdeviceptr_v1 dstDevice; + CUdeviceptr_v1 srcDevice; + unsigned int ByteCount; +} cuMemcpyDtoD_params; + +typedef struct cuMemcpyDtoA_params_st { + CUarray dstArray; + unsigned int dstOffset; + CUdeviceptr_v1 srcDevice; + unsigned int ByteCount; +} cuMemcpyDtoA_params; + +typedef struct cuMemcpyAtoD_params_st { + CUdeviceptr_v1 dstDevice; + CUarray srcArray; + unsigned int srcOffset; + unsigned int ByteCount; +} cuMemcpyAtoD_params; + +typedef struct cuMemcpyHtoA_params_st { + CUarray dstArray; + unsigned int dstOffset; + const void *srcHost; + unsigned int ByteCount; +} cuMemcpyHtoA_params; + +typedef struct cuMemcpyAtoH_params_st { + void *dstHost; + CUarray srcArray; + unsigned int srcOffset; + unsigned int ByteCount; +} cuMemcpyAtoH_params; + +typedef struct cuMemcpyAtoA_params_st { + CUarray dstArray; + unsigned int dstOffset; + CUarray srcArray; + unsigned int srcOffset; + unsigned int ByteCount; +} cuMemcpyAtoA_params; + +typedef struct cuMemcpyHtoAAsync_params_st { + CUarray dstArray; + unsigned int dstOffset; + const void *srcHost; + unsigned int ByteCount; + CUstream hStream; +} cuMemcpyHtoAAsync_params; + +typedef struct cuMemcpyAtoHAsync_params_st { + void *dstHost; + CUarray srcArray; + unsigned int srcOffset; + unsigned int ByteCount; + CUstream hStream; +} cuMemcpyAtoHAsync_params; + +typedef struct cuMemcpy2D_params_st { + const CUDA_MEMCPY2D_v1 *pCopy; +} cuMemcpy2D_params; + +typedef struct cuMemcpy2DUnaligned_params_st { + const CUDA_MEMCPY2D_v1 *pCopy; +} cuMemcpy2DUnaligned_params; + +typedef struct cuMemcpy3D_params_st { + const CUDA_MEMCPY3D_v1 *pCopy; +} cuMemcpy3D_params; + +typedef struct cuMemcpyHtoDAsync_params_st { + CUdeviceptr_v1 dstDevice; + const void *srcHost; + unsigned int ByteCount; + CUstream hStream; +} cuMemcpyHtoDAsync_params; + +typedef struct cuMemcpyDtoHAsync_params_st { + void *dstHost; + CUdeviceptr_v1 srcDevice; + unsigned int ByteCount; + CUstream hStream; +} cuMemcpyDtoHAsync_params; + +typedef struct cuMemcpyDtoDAsync_params_st { + CUdeviceptr_v1 dstDevice; + CUdeviceptr_v1 srcDevice; + unsigned int ByteCount; + CUstream hStream; +} cuMemcpyDtoDAsync_params; + +typedef struct cuMemcpy2DAsync_params_st { + const CUDA_MEMCPY2D_v1 *pCopy; + CUstream hStream; +} cuMemcpy2DAsync_params; + +typedef struct cuMemcpy3DAsync_params_st { + const CUDA_MEMCPY3D_v1 *pCopy; + CUstream hStream; +} cuMemcpy3DAsync_params; + +typedef struct cuMemsetD8_params_st { + CUdeviceptr_v1 dstDevice; + unsigned char uc; + unsigned int N; +} cuMemsetD8_params; + +typedef struct cuMemsetD16_params_st { + CUdeviceptr_v1 dstDevice; + unsigned short us; + unsigned int N; +} cuMemsetD16_params; + +typedef struct cuMemsetD32_params_st { + CUdeviceptr_v1 dstDevice; + unsigned int ui; + unsigned int N; +} cuMemsetD32_params; + +typedef struct cuMemsetD2D8_params_st { + CUdeviceptr_v1 dstDevice; + unsigned int dstPitch; + unsigned char uc; + unsigned int Width; + unsigned int Height; +} cuMemsetD2D8_params; + +typedef struct cuMemsetD2D16_params_st { + CUdeviceptr_v1 dstDevice; + unsigned int dstPitch; + unsigned short us; + unsigned int Width; + unsigned int Height; +} cuMemsetD2D16_params; + +typedef struct cuMemsetD2D32_params_st { + CUdeviceptr_v1 dstDevice; + unsigned int dstPitch; + unsigned int ui; + unsigned int Width; + unsigned int Height; +} cuMemsetD2D32_params; + +typedef struct cuArrayCreate_params_st { + CUarray *pHandle; + const CUDA_ARRAY_DESCRIPTOR_v1 *pAllocateArray; +} cuArrayCreate_params; + +typedef struct cuArrayGetDescriptor_params_st { + CUDA_ARRAY_DESCRIPTOR_v1 *pArrayDescriptor; + CUarray hArray; +} cuArrayGetDescriptor_params; + +typedef struct cuArray3DCreate_params_st { + CUarray *pHandle; + const CUDA_ARRAY3D_DESCRIPTOR_v1 *pAllocateArray; +} cuArray3DCreate_params; + +typedef struct cuArray3DGetDescriptor_params_st { + CUDA_ARRAY3D_DESCRIPTOR_v1 *pArrayDescriptor; + CUarray hArray; +} cuArray3DGetDescriptor_params; + +typedef struct cuTexRefSetAddress_params_st { + unsigned int *ByteOffset; + CUtexref hTexRef; + CUdeviceptr_v1 dptr; + unsigned int bytes; +} cuTexRefSetAddress_params; + +typedef struct cuTexRefSetAddress2D_params_st { + CUtexref hTexRef; + const CUDA_ARRAY_DESCRIPTOR_v1 *desc; + CUdeviceptr_v1 dptr; + unsigned int Pitch; +} cuTexRefSetAddress2D_params; + +typedef struct cuTexRefGetAddress_params_st { + CUdeviceptr_v1 *pdptr; + CUtexref hTexRef; +} cuTexRefGetAddress_params; + +typedef struct cuGraphicsResourceGetMappedPointer_params_st { + CUdeviceptr_v1 *pDevPtr; + unsigned int *pSize; + CUgraphicsResource resource; +} cuGraphicsResourceGetMappedPointer_params; + +typedef struct cuCtxDestroy_params_st { + CUcontext ctx; +} cuCtxDestroy_params; + +typedef struct cuCtxPopCurrent_params_st { + CUcontext *pctx; +} cuCtxPopCurrent_params; + +typedef struct cuCtxPushCurrent_params_st { + CUcontext ctx; +} cuCtxPushCurrent_params; + +typedef struct cuStreamDestroy_params_st { + CUstream hStream; +} cuStreamDestroy_params; + +typedef struct cuEventDestroy_params_st { + CUevent hEvent; +} cuEventDestroy_params; + +typedef struct cuDevicePrimaryCtxRelease_params_st { + CUdevice dev; +} cuDevicePrimaryCtxRelease_params; + +typedef struct cuDevicePrimaryCtxReset_params_st { + CUdevice dev; +} cuDevicePrimaryCtxReset_params; + +typedef struct cuDevicePrimaryCtxSetFlags_params_st { + CUdevice dev; + unsigned int flags; +} cuDevicePrimaryCtxSetFlags_params; + +typedef struct cuMemcpyHtoD_v2_params_st { + CUdeviceptr dstDevice; + const void *srcHost; + size_t ByteCount; +} cuMemcpyHtoD_v2_params; + +typedef struct cuMemcpyDtoH_v2_params_st { + void *dstHost; + CUdeviceptr srcDevice; + size_t ByteCount; +} cuMemcpyDtoH_v2_params; + +typedef struct cuMemcpyDtoD_v2_params_st { + CUdeviceptr dstDevice; + CUdeviceptr srcDevice; + size_t ByteCount; +} cuMemcpyDtoD_v2_params; + +typedef struct cuMemcpyDtoA_v2_params_st { + CUarray dstArray; + size_t dstOffset; + CUdeviceptr srcDevice; + size_t ByteCount; +} cuMemcpyDtoA_v2_params; + +typedef struct cuMemcpyAtoD_v2_params_st { + CUdeviceptr dstDevice; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; +} cuMemcpyAtoD_v2_params; + +typedef struct cuMemcpyHtoA_v2_params_st { + CUarray dstArray; + size_t dstOffset; + const void *srcHost; + size_t ByteCount; +} cuMemcpyHtoA_v2_params; + +typedef struct cuMemcpyAtoH_v2_params_st { + void *dstHost; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; +} cuMemcpyAtoH_v2_params; + +typedef struct cuMemcpyAtoA_v2_params_st { + CUarray dstArray; + size_t dstOffset; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; +} cuMemcpyAtoA_v2_params; + +typedef struct cuMemcpyHtoAAsync_v2_params_st { + CUarray dstArray; + size_t dstOffset; + const void *srcHost; + size_t ByteCount; + CUstream hStream; +} cuMemcpyHtoAAsync_v2_params; + +typedef struct cuMemcpyAtoHAsync_v2_params_st { + void *dstHost; + CUarray srcArray; + size_t srcOffset; + size_t ByteCount; + CUstream hStream; +} cuMemcpyAtoHAsync_v2_params; + +typedef struct cuMemcpy2D_v2_params_st { + const CUDA_MEMCPY2D *pCopy; +} cuMemcpy2D_v2_params; + +typedef struct cuMemcpy2DUnaligned_v2_params_st { + const CUDA_MEMCPY2D *pCopy; +} cuMemcpy2DUnaligned_v2_params; + +typedef struct cuMemcpy3D_v2_params_st { + const CUDA_MEMCPY3D *pCopy; +} cuMemcpy3D_v2_params; + +typedef struct cuMemcpyHtoDAsync_v2_params_st { + CUdeviceptr dstDevice; + const void *srcHost; + size_t ByteCount; + CUstream hStream; +} cuMemcpyHtoDAsync_v2_params; + +typedef struct cuMemcpyDtoHAsync_v2_params_st { + void *dstHost; + CUdeviceptr srcDevice; + size_t ByteCount; + CUstream hStream; +} cuMemcpyDtoHAsync_v2_params; + +typedef struct cuMemcpyDtoDAsync_v2_params_st { + CUdeviceptr dstDevice; + CUdeviceptr srcDevice; + size_t ByteCount; + CUstream hStream; +} cuMemcpyDtoDAsync_v2_params; + +typedef struct cuMemcpy2DAsync_v2_params_st { + const CUDA_MEMCPY2D *pCopy; + CUstream hStream; +} cuMemcpy2DAsync_v2_params; + +typedef struct cuMemcpy3DAsync_v2_params_st { + const CUDA_MEMCPY3D *pCopy; + CUstream hStream; +} cuMemcpy3DAsync_v2_params; + +typedef struct cuMemsetD8_v2_params_st { + CUdeviceptr dstDevice; + unsigned char uc; + size_t N; +} cuMemsetD8_v2_params; + +typedef struct cuMemsetD16_v2_params_st { + CUdeviceptr dstDevice; + unsigned short us; + size_t N; +} cuMemsetD16_v2_params; + +typedef struct cuMemsetD32_v2_params_st { + CUdeviceptr dstDevice; + unsigned int ui; + size_t N; +} cuMemsetD32_v2_params; + +typedef struct cuMemsetD2D8_v2_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned char uc; + size_t Width; + size_t Height; +} cuMemsetD2D8_v2_params; + +typedef struct cuMemsetD2D16_v2_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned short us; + size_t Width; + size_t Height; +} cuMemsetD2D16_v2_params; + +typedef struct cuMemsetD2D32_v2_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned int ui; + size_t Width; + size_t Height; +} cuMemsetD2D32_v2_params; + +typedef struct cuMemcpy_params_st { + CUdeviceptr dst; + CUdeviceptr src; + size_t ByteCount; +} cuMemcpy_params; + +typedef struct cuMemcpyAsync_params_st { + CUdeviceptr dst; + CUdeviceptr src; + size_t ByteCount; + CUstream hStream; +} cuMemcpyAsync_params; + +typedef struct cuMemcpyPeer_params_st { + CUdeviceptr dstDevice; + CUcontext dstContext; + CUdeviceptr srcDevice; + CUcontext srcContext; + size_t ByteCount; +} cuMemcpyPeer_params; + +typedef struct cuMemcpyPeerAsync_params_st { + CUdeviceptr dstDevice; + CUcontext dstContext; + CUdeviceptr srcDevice; + CUcontext srcContext; + size_t ByteCount; + CUstream hStream; +} cuMemcpyPeerAsync_params; + +typedef struct cuMemcpy3DPeer_params_st { + const CUDA_MEMCPY3D_PEER *pCopy; +} cuMemcpy3DPeer_params; + +typedef struct cuMemcpy3DPeerAsync_params_st { + const CUDA_MEMCPY3D_PEER *pCopy; + CUstream hStream; +} cuMemcpy3DPeerAsync_params; + +typedef struct cuMemsetD8Async_params_st { + CUdeviceptr dstDevice; + unsigned char uc; + size_t N; + CUstream hStream; +} cuMemsetD8Async_params; + +typedef struct cuMemsetD16Async_params_st { + CUdeviceptr dstDevice; + unsigned short us; + size_t N; + CUstream hStream; +} cuMemsetD16Async_params; + +typedef struct cuMemsetD32Async_params_st { + CUdeviceptr dstDevice; + unsigned int ui; + size_t N; + CUstream hStream; +} cuMemsetD32Async_params; + +typedef struct cuMemsetD2D8Async_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned char uc; + size_t Width; + size_t Height; + CUstream hStream; +} cuMemsetD2D8Async_params; + +typedef struct cuMemsetD2D16Async_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned short us; + size_t Width; + size_t Height; + CUstream hStream; +} cuMemsetD2D16Async_params; + +typedef struct cuMemsetD2D32Async_params_st { + CUdeviceptr dstDevice; + size_t dstPitch; + unsigned int ui; + size_t Width; + size_t Height; + CUstream hStream; +} cuMemsetD2D32Async_params; + +typedef struct cuStreamGetPriority_params_st { + CUstream hStream; + int *priority; +} cuStreamGetPriority_params; + +typedef struct cuStreamGetId_params_st { + CUstream hStream; + unsigned long long *streamId; +} cuStreamGetId_params; + +typedef struct cuStreamGetFlags_params_st { + CUstream hStream; + unsigned int *flags; +} cuStreamGetFlags_params; + +typedef struct cuStreamGetCtx_params_st { + CUstream hStream; + CUcontext *pctx; +} cuStreamGetCtx_params; + +typedef struct cuStreamGetCtx_v2_params_st { + CUstream hStream; + CUcontext *pCtx; + CUgreenCtx *pGreenCtx; +} cuStreamGetCtx_v2_params; + +typedef struct cuStreamWaitEvent_params_st { + CUstream hStream; + CUevent hEvent; + unsigned int Flags; +} cuStreamWaitEvent_params; + +typedef struct cuStreamAddCallback_params_st { + CUstream hStream; + CUstreamCallback callback; + void *userData; + unsigned int flags; +} cuStreamAddCallback_params; + +typedef struct cuStreamAttachMemAsync_params_st { + CUstream hStream; + CUdeviceptr dptr; + size_t length; + unsigned int flags; +} cuStreamAttachMemAsync_params; + +typedef struct cuStreamQuery_params_st { + CUstream hStream; +} cuStreamQuery_params; + +typedef struct cuStreamSynchronize_params_st { + CUstream hStream; +} cuStreamSynchronize_params; + +typedef struct cuEventRecord_params_st { + CUevent hEvent; + CUstream hStream; +} cuEventRecord_params; + +typedef struct cuEventRecordWithFlags_params_st { + CUevent hEvent; + CUstream hStream; + unsigned int flags; +} cuEventRecordWithFlags_params; + +typedef struct cuLaunchKernel_params_st { + CUfunction f; + unsigned int gridDimX; + unsigned int gridDimY; + unsigned int gridDimZ; + unsigned int blockDimX; + unsigned int blockDimY; + unsigned int blockDimZ; + unsigned int sharedMemBytes; + CUstream hStream; + void **kernelParams; + void **extra; +} cuLaunchKernel_params; + +typedef struct cuLaunchKernelEx_params_st { + const CUlaunchConfig *config; + CUfunction f; + void **kernelParams; + void **extra; +} cuLaunchKernelEx_params; + +typedef struct cuLaunchHostFunc_params_st { + CUstream hStream; + CUhostFn fn; + void *userData; +} cuLaunchHostFunc_params; + +typedef struct cuGraphicsMapResources_params_st { + unsigned int count; + CUgraphicsResource *resources; + CUstream hStream; +} cuGraphicsMapResources_params; + +typedef struct cuGraphicsUnmapResources_params_st { + unsigned int count; + CUgraphicsResource *resources; + CUstream hStream; +} cuGraphicsUnmapResources_params; + +typedef struct cuStreamWriteValue32_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWriteValue32_params; + +typedef struct cuStreamWaitValue32_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWaitValue32_params; + +typedef struct cuStreamWriteValue64_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWriteValue64_params; + +typedef struct cuStreamWaitValue64_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWaitValue64_params; + +typedef struct cuStreamBatchMemOp_params_st { + CUstream stream; + unsigned int count; + CUstreamBatchMemOpParams *paramArray; + unsigned int flags; +} cuStreamBatchMemOp_params; + +typedef struct cuStreamWriteValue32_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWriteValue32_ptsz_params; + +typedef struct cuStreamWaitValue32_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWaitValue32_ptsz_params; + +typedef struct cuStreamWriteValue64_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWriteValue64_ptsz_params; + +typedef struct cuStreamWaitValue64_ptsz_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWaitValue64_ptsz_params; + +typedef struct cuStreamBatchMemOp_ptsz_params_st { + CUstream stream; + unsigned int count; + CUstreamBatchMemOpParams *paramArray; + unsigned int flags; +} cuStreamBatchMemOp_ptsz_params; + +typedef struct cuStreamWriteValue32_v2_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWriteValue32_v2_params; + +typedef struct cuStreamWaitValue32_v2_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint32_t value; + unsigned int flags; +} cuStreamWaitValue32_v2_params; + +typedef struct cuStreamWriteValue64_v2_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWriteValue64_v2_params; + +typedef struct cuStreamWaitValue64_v2_params_st { + CUstream stream; + CUdeviceptr addr; + cuuint64_t value; + unsigned int flags; +} cuStreamWaitValue64_v2_params; + +typedef struct cuStreamBatchMemOp_v2_params_st { + CUstream stream; + unsigned int count; + CUstreamBatchMemOpParams *paramArray; + unsigned int flags; +} cuStreamBatchMemOp_v2_params; + +typedef struct cuMemPrefetchAsync_params_st { + CUdeviceptr devPtr; + size_t count; + CUdevice dstDevice; + CUstream hStream; +} cuMemPrefetchAsync_params; + +typedef struct cuMemPrefetchAsync_v2_params_st { + CUdeviceptr devPtr; + size_t count; + CUmemLocation location; + unsigned int flags; + CUstream hStream; +} cuMemPrefetchAsync_v2_params; + +typedef struct cuLaunchCooperativeKernel_params_st { + CUfunction f; + unsigned int gridDimX; + unsigned int gridDimY; + unsigned int gridDimZ; + unsigned int blockDimX; + unsigned int blockDimY; + unsigned int blockDimZ; + unsigned int sharedMemBytes; + CUstream hStream; + void **kernelParams; +} cuLaunchCooperativeKernel_params; + +typedef struct cuSignalExternalSemaphoresAsync_params_st { + const CUexternalSemaphore *extSemArray; + const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS *paramsArray; + unsigned int numExtSems; + CUstream stream; +} cuSignalExternalSemaphoresAsync_params; + +typedef struct cuWaitExternalSemaphoresAsync_params_st { + const CUexternalSemaphore *extSemArray; + const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS *paramsArray; + unsigned int numExtSems; + CUstream stream; +} cuWaitExternalSemaphoresAsync_params; + +typedef struct cuStreamBeginCapture_params_st { + CUstream hStream; +} cuStreamBeginCapture_params; + +typedef struct cuStreamBeginCapture_ptsz_params_st { + CUstream hStream; +} cuStreamBeginCapture_ptsz_params; + +typedef struct cuStreamBeginCapture_v2_params_st { + CUstream hStream; + CUstreamCaptureMode mode; +} cuStreamBeginCapture_v2_params; + +typedef struct cuStreamBeginCaptureToGraph_params_st { + CUstream hStream; + CUgraph hGraph; + const CUgraphNode *dependencies; + const CUgraphEdgeData *dependencyData; + size_t numDependencies; + CUstreamCaptureMode mode; +} cuStreamBeginCaptureToGraph_params; + +typedef struct cuStreamEndCapture_params_st { + CUstream hStream; + CUgraph *phGraph; +} cuStreamEndCapture_params; + +typedef struct cuStreamIsCapturing_params_st { + CUstream hStream; + CUstreamCaptureStatus *captureStatus; +} cuStreamIsCapturing_params; + +typedef struct cuStreamGetCaptureInfo_params_st { + CUstream hStream; + CUstreamCaptureStatus *captureStatus_out; + cuuint64_t *id_out; +} cuStreamGetCaptureInfo_params; + +typedef struct cuStreamGetCaptureInfo_ptsz_params_st { + CUstream hStream; + CUstreamCaptureStatus *captureStatus_out; + cuuint64_t *id_out; +} cuStreamGetCaptureInfo_ptsz_params; + +typedef struct cuStreamGetCaptureInfo_v2_params_st { + CUstream hStream; + CUstreamCaptureStatus *captureStatus_out; + cuuint64_t *id_out; + CUgraph *graph_out; + const CUgraphNode **dependencies_out; + size_t *numDependencies_out; +} cuStreamGetCaptureInfo_v2_params; + +typedef struct cuStreamGetCaptureInfo_v3_params_st { + CUstream hStream; + CUstreamCaptureStatus *captureStatus_out; + cuuint64_t *id_out; + CUgraph *graph_out; + const CUgraphNode **dependencies_out; + const CUgraphEdgeData **edgeData_out; + size_t *numDependencies_out; +} cuStreamGetCaptureInfo_v3_params; + +typedef struct cuGraphAddKernelNode_params_st { + CUgraphNode *phGraphNode; + CUgraph hGraph; + const CUgraphNode *dependencies; + size_t numDependencies; + const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams; +} cuGraphAddKernelNode_params; + +typedef struct cuGraphKernelNodeGetParams_params_st { + CUgraphNode hNode; + CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams; +} cuGraphKernelNodeGetParams_params; + +typedef struct cuGraphKernelNodeSetParams_params_st { + CUgraphNode hNode; + const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams; +} cuGraphKernelNodeSetParams_params; + +typedef struct cuGraphExecKernelNodeSetParams_params_st { + CUgraphExec hGraphExec; + CUgraphNode hNode; + const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams; +} cuGraphExecKernelNodeSetParams_params; + +typedef struct cuGraphInstantiateWithParams_params_st { + CUgraphExec *phGraphExec; + CUgraph hGraph; + CUDA_GRAPH_INSTANTIATE_PARAMS *instantiateParams; +} cuGraphInstantiateWithParams_params; + +typedef struct cuGraphExecUpdate_params_st { + CUgraphExec hGraphExec; + CUgraph hGraph; + CUgraphNode *hErrorNode_out; + CUgraphExecUpdateResult *updateResult_out; +} cuGraphExecUpdate_params; + +typedef struct cuGraphUpload_params_st { + CUgraphExec hGraph; + CUstream hStream; +} cuGraphUpload_params; + +typedef struct cuGraphLaunch_params_st { + CUgraphExec hGraph; + CUstream hStream; +} cuGraphLaunch_params; + +typedef struct cuStreamCopyAttributes_params_st { + CUstream dstStream; + CUstream srcStream; +} cuStreamCopyAttributes_params; + +typedef struct cuStreamGetAttribute_params_st { + CUstream hStream; + CUstreamAttrID attr; + CUstreamAttrValue *value; +} cuStreamGetAttribute_params; + +typedef struct cuStreamSetAttribute_params_st { + CUstream hStream; + CUstreamAttrID attr; + const CUstreamAttrValue *param; +} cuStreamSetAttribute_params; + +typedef struct cuIpcOpenMemHandle_params_st { + CUdeviceptr *pdptr; + CUipcMemHandle handle; + unsigned int Flags; +} cuIpcOpenMemHandle_params; + +typedef struct cuGraphInstantiate_params_st { + CUgraphExec *phGraphExec; + CUgraph hGraph; + CUgraphNode *phErrorNode; + char *logBuffer; + size_t bufferSize; +} cuGraphInstantiate_params; + +typedef struct cuGraphInstantiate_v2_params_st { + CUgraphExec *phGraphExec; + CUgraph hGraph; + CUgraphNode *phErrorNode; + char *logBuffer; + size_t bufferSize; +} cuGraphInstantiate_v2_params; + +typedef struct cuMemMapArrayAsync_params_st { + CUarrayMapInfo *mapInfoList; + unsigned int count; + CUstream hStream; +} cuMemMapArrayAsync_params; + +typedef struct cuMemFreeAsync_params_st { + CUdeviceptr dptr; + CUstream hStream; +} cuMemFreeAsync_params; + +typedef struct cuMemAllocAsync_params_st { + CUdeviceptr *dptr; + size_t bytesize; + CUstream hStream; +} cuMemAllocAsync_params; + +typedef struct cuMemAllocFromPoolAsync_params_st { + CUdeviceptr *dptr; + size_t bytesize; + CUmemoryPool pool; + CUstream hStream; +} cuMemAllocFromPoolAsync_params; + +typedef struct cuStreamUpdateCaptureDependencies_params_st { + CUstream hStream; + CUgraphNode *dependencies; + size_t numDependencies; + unsigned int flags; +} cuStreamUpdateCaptureDependencies_params; + +typedef struct cuStreamUpdateCaptureDependencies_v2_params_st { + CUstream hStream; + CUgraphNode *dependencies; + const CUgraphEdgeData *dependencyData; + size_t numDependencies; + unsigned int flags; +} cuStreamUpdateCaptureDependencies_v2_params; + +typedef struct cuGetProcAddress_params_st { + const char *symbol; + void **pfn; + int cudaVersion; + cuuint64_t flags; +} cuGetProcAddress_params; diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cuda_runtime_api_meta.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cuda_runtime_api_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..fb28568677699e9396716c6f8d91e9beb0941b8b --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cuda_runtime_api_meta.h @@ -0,0 +1,2316 @@ +// This file is generated. Any changes you make will be lost during the next clean build. + +// CUDA public interface, for type definitions and api function prototypes +#include "cuda_runtime_api.h" + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +// Currently used parameter trace structures +typedef struct cudaDeviceSetLimit_v3020_params_st { + enum cudaLimit limit; + size_t value; +} cudaDeviceSetLimit_v3020_params; + +typedef struct cudaDeviceGetLimit_v3020_params_st { + size_t *pValue; + enum cudaLimit limit; +} cudaDeviceGetLimit_v3020_params; + +typedef struct cudaDeviceGetTexture1DLinearMaxWidth_v11010_params_st { + size_t *maxWidthInElements; + const struct cudaChannelFormatDesc *fmtDesc; + int device; +} cudaDeviceGetTexture1DLinearMaxWidth_v11010_params; + +typedef struct cudaDeviceGetCacheConfig_v3020_params_st { + enum cudaFuncCache *pCacheConfig; +} cudaDeviceGetCacheConfig_v3020_params; + +typedef struct cudaDeviceGetStreamPriorityRange_v5050_params_st { + int *leastPriority; + int *greatestPriority; +} cudaDeviceGetStreamPriorityRange_v5050_params; + +typedef struct cudaDeviceSetCacheConfig_v3020_params_st { + enum cudaFuncCache cacheConfig; +} cudaDeviceSetCacheConfig_v3020_params; + +typedef struct cudaDeviceGetByPCIBusId_v4010_params_st { + int *device; + const char *pciBusId; +} cudaDeviceGetByPCIBusId_v4010_params; + +typedef struct cudaDeviceGetPCIBusId_v4010_params_st { + char *pciBusId; + int len; + int device; +} cudaDeviceGetPCIBusId_v4010_params; + +typedef struct cudaIpcGetEventHandle_v4010_params_st { + cudaIpcEventHandle_t *handle; + cudaEvent_t event; +} cudaIpcGetEventHandle_v4010_params; + +typedef struct cudaIpcOpenEventHandle_v4010_params_st { + cudaEvent_t *event; + cudaIpcEventHandle_t handle; +} cudaIpcOpenEventHandle_v4010_params; + +typedef struct cudaIpcGetMemHandle_v4010_params_st { + cudaIpcMemHandle_t *handle; + void *devPtr; +} cudaIpcGetMemHandle_v4010_params; + +typedef struct cudaIpcOpenMemHandle_v4010_params_st { + void **devPtr; + cudaIpcMemHandle_t handle; + unsigned int flags; +} cudaIpcOpenMemHandle_v4010_params; + +typedef struct cudaIpcCloseMemHandle_v4010_params_st { + void *devPtr; +} cudaIpcCloseMemHandle_v4010_params; + +typedef struct cudaDeviceFlushGPUDirectRDMAWrites_v11030_params_st { + enum cudaFlushGPUDirectRDMAWritesTarget target; + enum cudaFlushGPUDirectRDMAWritesScope scope; +} cudaDeviceFlushGPUDirectRDMAWrites_v11030_params; + +typedef struct cudaDeviceRegisterAsyncNotification_v12040_params_st { + int device; + cudaAsyncCallback callbackFunc; + void *userData; + cudaAsyncCallbackHandle_t *callback; +} cudaDeviceRegisterAsyncNotification_v12040_params; + +typedef struct cudaDeviceUnregisterAsyncNotification_v12040_params_st { + int device; + cudaAsyncCallbackHandle_t callback; +} cudaDeviceUnregisterAsyncNotification_v12040_params; + +typedef struct cudaDeviceGetSharedMemConfig_v4020_params_st { + enum cudaSharedMemConfig *pConfig; +} cudaDeviceGetSharedMemConfig_v4020_params; + +typedef struct cudaDeviceSetSharedMemConfig_v4020_params_st { + enum cudaSharedMemConfig config; +} cudaDeviceSetSharedMemConfig_v4020_params; + +typedef struct cudaGetErrorName_v6050_params_st { + cudaError_t error; +} cudaGetErrorName_v6050_params; + +typedef struct cudaGetErrorString_v3020_params_st { + cudaError_t error; +} cudaGetErrorString_v3020_params; + +typedef struct cudaGetDeviceCount_v3020_params_st { + int *count; +} cudaGetDeviceCount_v3020_params; + +typedef struct cudaGetDeviceProperties_v2_v12000_params_st { + struct cudaDeviceProp *prop; + int device; +} cudaGetDeviceProperties_v2_v12000_params; + +typedef struct cudaDeviceGetAttribute_v5000_params_st { + int *value; + enum cudaDeviceAttr attr; + int device; +} cudaDeviceGetAttribute_v5000_params; + +typedef struct cudaDeviceGetDefaultMemPool_v11020_params_st { + cudaMemPool_t *memPool; + int device; +} cudaDeviceGetDefaultMemPool_v11020_params; + +typedef struct cudaDeviceSetMemPool_v11020_params_st { + int device; + cudaMemPool_t memPool; +} cudaDeviceSetMemPool_v11020_params; + +typedef struct cudaDeviceGetMemPool_v11020_params_st { + cudaMemPool_t *memPool; + int device; +} cudaDeviceGetMemPool_v11020_params; + +typedef struct cudaDeviceGetNvSciSyncAttributes_v10020_params_st { + void *nvSciSyncAttrList; + int device; + int flags; +} cudaDeviceGetNvSciSyncAttributes_v10020_params; + +typedef struct cudaDeviceGetP2PAttribute_v8000_params_st { + int *value; + enum cudaDeviceP2PAttr attr; + int srcDevice; + int dstDevice; +} cudaDeviceGetP2PAttribute_v8000_params; + +typedef struct cudaChooseDevice_v3020_params_st { + int *device; + const struct cudaDeviceProp *prop; +} cudaChooseDevice_v3020_params; + +typedef struct cudaInitDevice_v12000_params_st { + int device; + unsigned int deviceFlags; + unsigned int flags; +} cudaInitDevice_v12000_params; + +typedef struct cudaSetDevice_v3020_params_st { + int device; +} cudaSetDevice_v3020_params; + +typedef struct cudaGetDevice_v3020_params_st { + int *device; +} cudaGetDevice_v3020_params; + +typedef struct cudaSetValidDevices_v3020_params_st { + int *device_arr; + int len; +} cudaSetValidDevices_v3020_params; + +typedef struct cudaSetDeviceFlags_v3020_params_st { + unsigned int flags; +} cudaSetDeviceFlags_v3020_params; + +typedef struct cudaGetDeviceFlags_v7000_params_st { + unsigned int *flags; +} cudaGetDeviceFlags_v7000_params; + +typedef struct cudaStreamCreate_v3020_params_st { + cudaStream_t *pStream; +} cudaStreamCreate_v3020_params; + +typedef struct cudaStreamCreateWithFlags_v5000_params_st { + cudaStream_t *pStream; + unsigned int flags; +} cudaStreamCreateWithFlags_v5000_params; + +typedef struct cudaStreamCreateWithPriority_v5050_params_st { + cudaStream_t *pStream; + unsigned int flags; + int priority; +} cudaStreamCreateWithPriority_v5050_params; + +typedef struct cudaStreamGetPriority_ptsz_v7000_params_st { + cudaStream_t hStream; + int *priority; +} cudaStreamGetPriority_ptsz_v7000_params; + +typedef struct cudaStreamGetFlags_ptsz_v7000_params_st { + cudaStream_t hStream; + unsigned int *flags; +} cudaStreamGetFlags_ptsz_v7000_params; + +typedef struct cudaStreamGetId_ptsz_v12000_params_st { + cudaStream_t hStream; + unsigned long long *streamId; +} cudaStreamGetId_ptsz_v12000_params; + +typedef struct cudaStreamCopyAttributes_ptsz_v11000_params_st { + cudaStream_t dst; + cudaStream_t src; +} cudaStreamCopyAttributes_ptsz_v11000_params; + +typedef struct cudaStreamGetAttribute_ptsz_v11000_params_st { + cudaStream_t hStream; + cudaStreamAttrID attr; + cudaStreamAttrValue *value_out; +} cudaStreamGetAttribute_ptsz_v11000_params; + +typedef struct cudaStreamSetAttribute_ptsz_v11000_params_st { + cudaStream_t hStream; + cudaStreamAttrID attr; + const cudaStreamAttrValue *value; +} cudaStreamSetAttribute_ptsz_v11000_params; + +typedef struct cudaStreamDestroy_v5050_params_st { + cudaStream_t stream; +} cudaStreamDestroy_v5050_params; + +typedef struct cudaStreamWaitEvent_ptsz_v7000_params_st { + cudaStream_t stream; + cudaEvent_t event; + unsigned int flags; +} cudaStreamWaitEvent_ptsz_v7000_params; + +typedef struct cudaStreamAddCallback_ptsz_v7000_params_st { + cudaStream_t stream; + cudaStreamCallback_t callback; + void *userData; + unsigned int flags; +} cudaStreamAddCallback_ptsz_v7000_params; + +typedef struct cudaStreamSynchronize_ptsz_v7000_params_st { + cudaStream_t stream; +} cudaStreamSynchronize_ptsz_v7000_params; + +typedef struct cudaStreamQuery_ptsz_v7000_params_st { + cudaStream_t stream; +} cudaStreamQuery_ptsz_v7000_params; + +typedef struct cudaStreamAttachMemAsync_ptsz_v7000_params_st { + cudaStream_t stream; + void *devPtr; + size_t length; + unsigned int flags; +} cudaStreamAttachMemAsync_ptsz_v7000_params; + +typedef struct cudaStreamBeginCapture_ptsz_v10000_params_st { + cudaStream_t stream; + enum cudaStreamCaptureMode mode; +} cudaStreamBeginCapture_ptsz_v10000_params; + +typedef struct cudaStreamBeginCaptureToGraph_ptsz_v12030_params_st { + cudaStream_t stream; + cudaGraph_t graph; + const cudaGraphNode_t *dependencies; + const cudaGraphEdgeData *dependencyData; + size_t numDependencies; + enum cudaStreamCaptureMode mode; +} cudaStreamBeginCaptureToGraph_ptsz_v12030_params; + +typedef struct cudaThreadExchangeStreamCaptureMode_v10010_params_st { + enum cudaStreamCaptureMode *mode; +} cudaThreadExchangeStreamCaptureMode_v10010_params; + +typedef struct cudaStreamEndCapture_ptsz_v10000_params_st { + cudaStream_t stream; + cudaGraph_t *pGraph; +} cudaStreamEndCapture_ptsz_v10000_params; + +typedef struct cudaStreamIsCapturing_ptsz_v10000_params_st { + cudaStream_t stream; + enum cudaStreamCaptureStatus *pCaptureStatus; +} cudaStreamIsCapturing_ptsz_v10000_params; + +typedef struct cudaStreamGetCaptureInfo_v2_ptsz_v11030_params_st { + cudaStream_t stream; + enum cudaStreamCaptureStatus *captureStatus_out; + unsigned long long *id_out; + cudaGraph_t *graph_out; + const cudaGraphNode_t **dependencies_out; + size_t *numDependencies_out; +} cudaStreamGetCaptureInfo_v2_ptsz_v11030_params; + +typedef struct cudaStreamGetCaptureInfo_v3_ptsz_v12030_params_st { + cudaStream_t stream; + enum cudaStreamCaptureStatus *captureStatus_out; + unsigned long long *id_out; + cudaGraph_t *graph_out; + const cudaGraphNode_t **dependencies_out; + const cudaGraphEdgeData **edgeData_out; + size_t *numDependencies_out; +} cudaStreamGetCaptureInfo_v3_ptsz_v12030_params; + +typedef struct cudaStreamUpdateCaptureDependencies_ptsz_v11030_params_st { + cudaStream_t stream; + cudaGraphNode_t *dependencies; + size_t numDependencies; + unsigned int flags; +} cudaStreamUpdateCaptureDependencies_ptsz_v11030_params; + +typedef struct cudaStreamUpdateCaptureDependencies_v2_ptsz_v12030_params_st { + cudaStream_t stream; + cudaGraphNode_t *dependencies; + const cudaGraphEdgeData *dependencyData; + size_t numDependencies; + unsigned int flags; +} cudaStreamUpdateCaptureDependencies_v2_ptsz_v12030_params; + +typedef struct cudaEventCreate_v3020_params_st { + cudaEvent_t *event; +} cudaEventCreate_v3020_params; + +typedef struct cudaEventCreateWithFlags_v3020_params_st { + cudaEvent_t *event; + unsigned int flags; +} cudaEventCreateWithFlags_v3020_params; + +typedef struct cudaEventRecord_ptsz_v7000_params_st { + cudaEvent_t event; + cudaStream_t stream; +} cudaEventRecord_ptsz_v7000_params; + +typedef struct cudaEventRecordWithFlags_ptsz_v11010_params_st { + cudaEvent_t event; + cudaStream_t stream; + unsigned int flags; +} cudaEventRecordWithFlags_ptsz_v11010_params; + +typedef struct cudaEventQuery_v3020_params_st { + cudaEvent_t event; +} cudaEventQuery_v3020_params; + +typedef struct cudaEventSynchronize_v3020_params_st { + cudaEvent_t event; +} cudaEventSynchronize_v3020_params; + +typedef struct cudaEventDestroy_v3020_params_st { + cudaEvent_t event; +} cudaEventDestroy_v3020_params; + +typedef struct cudaEventElapsedTime_v3020_params_st { + float *ms; + cudaEvent_t start; + cudaEvent_t end; +} cudaEventElapsedTime_v3020_params; + +typedef struct cudaImportExternalMemory_v10000_params_st { + cudaExternalMemory_t *extMem_out; + const struct cudaExternalMemoryHandleDesc *memHandleDesc; +} cudaImportExternalMemory_v10000_params; + +typedef struct cudaExternalMemoryGetMappedBuffer_v10000_params_st { + void **devPtr; + cudaExternalMemory_t extMem; + const struct cudaExternalMemoryBufferDesc *bufferDesc; +} cudaExternalMemoryGetMappedBuffer_v10000_params; + +typedef struct cudaExternalMemoryGetMappedMipmappedArray_v10000_params_st { + cudaMipmappedArray_t *mipmap; + cudaExternalMemory_t extMem; + const struct cudaExternalMemoryMipmappedArrayDesc *mipmapDesc; +} cudaExternalMemoryGetMappedMipmappedArray_v10000_params; + +typedef struct cudaDestroyExternalMemory_v10000_params_st { + cudaExternalMemory_t extMem; +} cudaDestroyExternalMemory_v10000_params; + +typedef struct cudaImportExternalSemaphore_v10000_params_st { + cudaExternalSemaphore_t *extSem_out; + const struct cudaExternalSemaphoreHandleDesc *semHandleDesc; +} cudaImportExternalSemaphore_v10000_params; + +typedef struct cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreSignalParams *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaSignalExternalSemaphoresAsync_v2_ptsz_v11020_params; + +typedef struct cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreWaitParams *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaWaitExternalSemaphoresAsync_v2_ptsz_v11020_params; + +typedef struct cudaDestroyExternalSemaphore_v10000_params_st { + cudaExternalSemaphore_t extSem; +} cudaDestroyExternalSemaphore_v10000_params; + +typedef struct cudaLaunchKernel_ptsz_v7000_params_st { + const void *func; + dim3 gridDim; + dim3 blockDim; + void **args; + size_t sharedMem; + cudaStream_t stream; +} cudaLaunchKernel_ptsz_v7000_params; + +typedef struct cudaLaunchKernelExC_ptsz_v11060_params_st { + const cudaLaunchConfig_t *config; + const void *func; + void **args; +} cudaLaunchKernelExC_ptsz_v11060_params; + +typedef struct cudaLaunchCooperativeKernel_ptsz_v9000_params_st { + const void *func; + dim3 gridDim; + dim3 blockDim; + void **args; + size_t sharedMem; + cudaStream_t stream; +} cudaLaunchCooperativeKernel_ptsz_v9000_params; + +typedef struct cudaLaunchCooperativeKernelMultiDevice_v9000_params_st { + struct cudaLaunchParams *launchParamsList; + unsigned int numDevices; + unsigned int flags; +} cudaLaunchCooperativeKernelMultiDevice_v9000_params; + +typedef struct cudaFuncSetCacheConfig_v3020_params_st { + const void *func; + enum cudaFuncCache cacheConfig; +} cudaFuncSetCacheConfig_v3020_params; + +typedef struct cudaFuncGetAttributes_v3020_params_st { + struct cudaFuncAttributes *attr; + const void *func; +} cudaFuncGetAttributes_v3020_params; + +typedef struct cudaFuncSetAttribute_v9000_params_st { + const void *func; + enum cudaFuncAttribute attr; + int value; +} cudaFuncSetAttribute_v9000_params; + +typedef struct cudaFuncGetName_v12030_params_st { + const char **name; + const void *func; +} cudaFuncGetName_v12030_params; + +typedef struct cudaFuncGetParamInfo_v12040_params_st { + const void *func; + size_t paramIndex; + size_t *paramOffset; + size_t *paramSize; +} cudaFuncGetParamInfo_v12040_params; + +typedef struct cudaLaunchHostFunc_ptsz_v10000_params_st { + cudaStream_t stream; + cudaHostFn_t fn; + void *userData; +} cudaLaunchHostFunc_ptsz_v10000_params; + +typedef struct cudaFuncSetSharedMemConfig_v4020_params_st { + const void *func; + enum cudaSharedMemConfig config; +} cudaFuncSetSharedMemConfig_v4020_params; + +typedef struct cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050_params_st { + int *numBlocks; + const void *func; + int blockSize; + size_t dynamicSMemSize; +} cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6050_params; + +typedef struct cudaOccupancyAvailableDynamicSMemPerBlock_v10200_params_st { + size_t *dynamicSmemSize; + const void *func; + int numBlocks; + int blockSize; +} cudaOccupancyAvailableDynamicSMemPerBlock_v10200_params; + +typedef struct cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000_params_st { + int *numBlocks; + const void *func; + int blockSize; + size_t dynamicSMemSize; + unsigned int flags; +} cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000_params; + +typedef struct cudaOccupancyMaxPotentialClusterSize_v11070_params_st { + int *clusterSize; + const void *func; + const cudaLaunchConfig_t *launchConfig; +} cudaOccupancyMaxPotentialClusterSize_v11070_params; + +typedef struct cudaOccupancyMaxActiveClusters_v11070_params_st { + int *numClusters; + const void *func; + const cudaLaunchConfig_t *launchConfig; +} cudaOccupancyMaxActiveClusters_v11070_params; + +typedef struct cudaMallocManaged_v6000_params_st { + void **devPtr; + size_t size; + unsigned int flags; +} cudaMallocManaged_v6000_params; + +typedef struct cudaMalloc_v3020_params_st { + void **devPtr; + size_t size; +} cudaMalloc_v3020_params; + +typedef struct cudaMallocHost_v3020_params_st { + void **ptr; + size_t size; +} cudaMallocHost_v3020_params; + +typedef struct cudaMallocPitch_v3020_params_st { + void **devPtr; + size_t *pitch; + size_t width; + size_t height; +} cudaMallocPitch_v3020_params; + +typedef struct cudaMallocArray_v3020_params_st { + cudaArray_t *array; + const struct cudaChannelFormatDesc *desc; + size_t width; + size_t height; + unsigned int flags; +} cudaMallocArray_v3020_params; + +typedef struct cudaFree_v3020_params_st { + void *devPtr; +} cudaFree_v3020_params; + +typedef struct cudaFreeHost_v3020_params_st { + void *ptr; +} cudaFreeHost_v3020_params; + +typedef struct cudaFreeArray_v3020_params_st { + cudaArray_t array; +} cudaFreeArray_v3020_params; + +typedef struct cudaFreeMipmappedArray_v5000_params_st { + cudaMipmappedArray_t mipmappedArray; +} cudaFreeMipmappedArray_v5000_params; + +typedef struct cudaHostAlloc_v3020_params_st { + void **pHost; + size_t size; + unsigned int flags; +} cudaHostAlloc_v3020_params; + +typedef struct cudaHostRegister_v4000_params_st { + void *ptr; + size_t size; + unsigned int flags; +} cudaHostRegister_v4000_params; + +typedef struct cudaHostUnregister_v4000_params_st { + void *ptr; +} cudaHostUnregister_v4000_params; + +typedef struct cudaHostGetDevicePointer_v3020_params_st { + void **pDevice; + void *pHost; + unsigned int flags; +} cudaHostGetDevicePointer_v3020_params; + +typedef struct cudaHostGetFlags_v3020_params_st { + unsigned int *pFlags; + void *pHost; +} cudaHostGetFlags_v3020_params; + +typedef struct cudaMalloc3D_v3020_params_st { + struct cudaPitchedPtr *pitchedDevPtr; + struct cudaExtent extent; +} cudaMalloc3D_v3020_params; + +typedef struct cudaMalloc3DArray_v3020_params_st { + cudaArray_t *array; + const struct cudaChannelFormatDesc *desc; + struct cudaExtent extent; + unsigned int flags; +} cudaMalloc3DArray_v3020_params; + +typedef struct cudaMallocMipmappedArray_v5000_params_st { + cudaMipmappedArray_t *mipmappedArray; + const struct cudaChannelFormatDesc *desc; + struct cudaExtent extent; + unsigned int numLevels; + unsigned int flags; +} cudaMallocMipmappedArray_v5000_params; + +typedef struct cudaGetMipmappedArrayLevel_v5000_params_st { + cudaArray_t *levelArray; + cudaMipmappedArray_const_t mipmappedArray; + unsigned int level; +} cudaGetMipmappedArrayLevel_v5000_params; + +typedef struct cudaMemcpy3D_ptds_v7000_params_st { + const struct cudaMemcpy3DParms *p; +} cudaMemcpy3D_ptds_v7000_params; + +typedef struct cudaMemcpy3DPeer_ptds_v7000_params_st { + const struct cudaMemcpy3DPeerParms *p; +} cudaMemcpy3DPeer_ptds_v7000_params; + +typedef struct cudaMemcpy3DAsync_ptsz_v7000_params_st { + const struct cudaMemcpy3DParms *p; + cudaStream_t stream; +} cudaMemcpy3DAsync_ptsz_v7000_params; + +typedef struct cudaMemcpy3DPeerAsync_ptsz_v7000_params_st { + const struct cudaMemcpy3DPeerParms *p; + cudaStream_t stream; +} cudaMemcpy3DPeerAsync_ptsz_v7000_params; + +typedef struct cudaMemGetInfo_v3020_params_st { + size_t *free; + size_t *total; +} cudaMemGetInfo_v3020_params; + +typedef struct cudaArrayGetInfo_v4010_params_st { + struct cudaChannelFormatDesc *desc; + struct cudaExtent *extent; + unsigned int *flags; + cudaArray_t array; +} cudaArrayGetInfo_v4010_params; + +typedef struct cudaArrayGetPlane_v11020_params_st { + cudaArray_t *pPlaneArray; + cudaArray_t hArray; + unsigned int planeIdx; +} cudaArrayGetPlane_v11020_params; + +typedef struct cudaArrayGetMemoryRequirements_v11060_params_st { + struct cudaArrayMemoryRequirements *memoryRequirements; + cudaArray_t array; + int device; +} cudaArrayGetMemoryRequirements_v11060_params; + +typedef struct cudaMipmappedArrayGetMemoryRequirements_v11060_params_st { + struct cudaArrayMemoryRequirements *memoryRequirements; + cudaMipmappedArray_t mipmap; + int device; +} cudaMipmappedArrayGetMemoryRequirements_v11060_params; + +typedef struct cudaArrayGetSparseProperties_v11010_params_st { + struct cudaArraySparseProperties *sparseProperties; + cudaArray_t array; +} cudaArrayGetSparseProperties_v11010_params; + +typedef struct cudaMipmappedArrayGetSparseProperties_v11010_params_st { + struct cudaArraySparseProperties *sparseProperties; + cudaMipmappedArray_t mipmap; +} cudaMipmappedArrayGetSparseProperties_v11010_params; + +typedef struct cudaMemcpy_ptds_v7000_params_st { + void *dst; + const void *src; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpy_ptds_v7000_params; + +typedef struct cudaMemcpyPeer_v4000_params_st { + void *dst; + int dstDevice; + const void *src; + int srcDevice; + size_t count; +} cudaMemcpyPeer_v4000_params; + +typedef struct cudaMemcpy2D_ptds_v7000_params_st { + void *dst; + size_t dpitch; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2D_ptds_v7000_params; + +typedef struct cudaMemcpy2DToArray_ptds_v7000_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2DToArray_ptds_v7000_params; + +typedef struct cudaMemcpy2DFromArray_ptds_v7000_params_st { + void *dst; + size_t dpitch; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2DFromArray_ptds_v7000_params; + +typedef struct cudaMemcpy2DArrayToArray_ptds_v7000_params_st { + cudaArray_t dst; + size_t wOffsetDst; + size_t hOffsetDst; + cudaArray_const_t src; + size_t wOffsetSrc; + size_t hOffsetSrc; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2DArrayToArray_ptds_v7000_params; + +typedef struct cudaMemcpyToSymbol_ptds_v7000_params_st { + const void *symbol; + const void *src; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaMemcpyToSymbol_ptds_v7000_params; + +typedef struct cudaMemcpyFromSymbol_ptds_v7000_params_st { + void *dst; + const void *symbol; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaMemcpyFromSymbol_ptds_v7000_params; + +typedef struct cudaMemcpyAsync_ptsz_v7000_params_st { + void *dst; + const void *src; + size_t count; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyAsync_ptsz_v7000_params; + +typedef struct cudaMemcpyPeerAsync_v4000_params_st { + void *dst; + int dstDevice; + const void *src; + int srcDevice; + size_t count; + cudaStream_t stream; +} cudaMemcpyPeerAsync_v4000_params; + +typedef struct cudaMemcpy2DAsync_ptsz_v7000_params_st { + void *dst; + size_t dpitch; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpy2DAsync_ptsz_v7000_params; + +typedef struct cudaMemcpy2DToArrayAsync_ptsz_v7000_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpy2DToArrayAsync_ptsz_v7000_params; + +typedef struct cudaMemcpy2DFromArrayAsync_ptsz_v7000_params_st { + void *dst; + size_t dpitch; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t width; + size_t height; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpy2DFromArrayAsync_ptsz_v7000_params; + +typedef struct cudaMemcpyToSymbolAsync_ptsz_v7000_params_st { + const void *symbol; + const void *src; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyToSymbolAsync_ptsz_v7000_params; + +typedef struct cudaMemcpyFromSymbolAsync_ptsz_v7000_params_st { + void *dst; + const void *symbol; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyFromSymbolAsync_ptsz_v7000_params; + +typedef struct cudaMemset_ptds_v7000_params_st { + void *devPtr; + int value; + size_t count; +} cudaMemset_ptds_v7000_params; + +typedef struct cudaMemset2D_ptds_v7000_params_st { + void *devPtr; + size_t pitch; + int value; + size_t width; + size_t height; +} cudaMemset2D_ptds_v7000_params; + +typedef struct cudaMemset3D_ptds_v7000_params_st { + struct cudaPitchedPtr pitchedDevPtr; + int value; + struct cudaExtent extent; +} cudaMemset3D_ptds_v7000_params; + +typedef struct cudaMemsetAsync_ptsz_v7000_params_st { + void *devPtr; + int value; + size_t count; + cudaStream_t stream; +} cudaMemsetAsync_ptsz_v7000_params; + +typedef struct cudaMemset2DAsync_ptsz_v7000_params_st { + void *devPtr; + size_t pitch; + int value; + size_t width; + size_t height; + cudaStream_t stream; +} cudaMemset2DAsync_ptsz_v7000_params; + +typedef struct cudaMemset3DAsync_ptsz_v7000_params_st { + struct cudaPitchedPtr pitchedDevPtr; + int value; + struct cudaExtent extent; + cudaStream_t stream; +} cudaMemset3DAsync_ptsz_v7000_params; + +typedef struct cudaGetSymbolAddress_v3020_params_st { + void **devPtr; + const void *symbol; +} cudaGetSymbolAddress_v3020_params; + +typedef struct cudaGetSymbolSize_v3020_params_st { + size_t *size; + const void *symbol; +} cudaGetSymbolSize_v3020_params; + +typedef struct cudaMemPrefetchAsync_ptsz_v8000_params_st { + const void *devPtr; + size_t count; + int dstDevice; + cudaStream_t stream; +} cudaMemPrefetchAsync_ptsz_v8000_params; + +typedef struct cudaMemPrefetchAsync_v2_ptsz_v12020_params_st { + const void *devPtr; + size_t count; + struct cudaMemLocation location; + unsigned int flags; + cudaStream_t stream; +} cudaMemPrefetchAsync_v2_ptsz_v12020_params; + +typedef struct cudaMemAdvise_v8000_params_st { + const void *devPtr; + size_t count; + enum cudaMemoryAdvise advice; + int device; +} cudaMemAdvise_v8000_params; + +typedef struct cudaMemAdvise_v2_v12020_params_st { + const void *devPtr; + size_t count; + enum cudaMemoryAdvise advice; + struct cudaMemLocation location; +} cudaMemAdvise_v2_v12020_params; + +typedef struct cudaMemRangeGetAttribute_v8000_params_st { + void *data; + size_t dataSize; + enum cudaMemRangeAttribute attribute; + const void *devPtr; + size_t count; +} cudaMemRangeGetAttribute_v8000_params; + +typedef struct cudaMemRangeGetAttributes_v8000_params_st { + void **data; + size_t *dataSizes; + enum cudaMemRangeAttribute *attributes; + size_t numAttributes; + const void *devPtr; + size_t count; +} cudaMemRangeGetAttributes_v8000_params; + +typedef struct cudaMemcpyToArray_ptds_v7000_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpyToArray_ptds_v7000_params; + +typedef struct cudaMemcpyFromArray_ptds_v7000_params_st { + void *dst; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpyFromArray_ptds_v7000_params; + +typedef struct cudaMemcpyArrayToArray_ptds_v7000_params_st { + cudaArray_t dst; + size_t wOffsetDst; + size_t hOffsetDst; + cudaArray_const_t src; + size_t wOffsetSrc; + size_t hOffsetSrc; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpyArrayToArray_ptds_v7000_params; + +typedef struct cudaMemcpyToArrayAsync_ptsz_v7000_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t count; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyToArrayAsync_ptsz_v7000_params; + +typedef struct cudaMemcpyFromArrayAsync_ptsz_v7000_params_st { + void *dst; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t count; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyFromArrayAsync_ptsz_v7000_params; + +typedef struct cudaMallocAsync_ptsz_v11020_params_st { + void **devPtr; + size_t size; + cudaStream_t hStream; +} cudaMallocAsync_ptsz_v11020_params; + +typedef struct cudaFreeAsync_ptsz_v11020_params_st { + void *devPtr; + cudaStream_t hStream; +} cudaFreeAsync_ptsz_v11020_params; + +typedef struct cudaMemPoolTrimTo_v11020_params_st { + cudaMemPool_t memPool; + size_t minBytesToKeep; +} cudaMemPoolTrimTo_v11020_params; + +typedef struct cudaMemPoolSetAttribute_v11020_params_st { + cudaMemPool_t memPool; + enum cudaMemPoolAttr attr; + void *value; +} cudaMemPoolSetAttribute_v11020_params; + +typedef struct cudaMemPoolGetAttribute_v11020_params_st { + cudaMemPool_t memPool; + enum cudaMemPoolAttr attr; + void *value; +} cudaMemPoolGetAttribute_v11020_params; + +typedef struct cudaMemPoolSetAccess_v11020_params_st { + cudaMemPool_t memPool; + const struct cudaMemAccessDesc *descList; + size_t count; +} cudaMemPoolSetAccess_v11020_params; + +typedef struct cudaMemPoolGetAccess_v11020_params_st { + enum cudaMemAccessFlags *flags; + cudaMemPool_t memPool; + struct cudaMemLocation *location; +} cudaMemPoolGetAccess_v11020_params; + +typedef struct cudaMemPoolCreate_v11020_params_st { + cudaMemPool_t *memPool; + const struct cudaMemPoolProps *poolProps; +} cudaMemPoolCreate_v11020_params; + +typedef struct cudaMemPoolDestroy_v11020_params_st { + cudaMemPool_t memPool; +} cudaMemPoolDestroy_v11020_params; + +typedef struct cudaMallocFromPoolAsync_ptsz_v11020_params_st { + void **ptr; + size_t size; + cudaMemPool_t memPool; + cudaStream_t stream; +} cudaMallocFromPoolAsync_ptsz_v11020_params; + +typedef struct cudaMemPoolExportToShareableHandle_v11020_params_st { + void *shareableHandle; + cudaMemPool_t memPool; + enum cudaMemAllocationHandleType handleType; + unsigned int flags; +} cudaMemPoolExportToShareableHandle_v11020_params; + +typedef struct cudaMemPoolImportFromShareableHandle_v11020_params_st { + cudaMemPool_t *memPool; + void *shareableHandle; + enum cudaMemAllocationHandleType handleType; + unsigned int flags; +} cudaMemPoolImportFromShareableHandle_v11020_params; + +typedef struct cudaMemPoolExportPointer_v11020_params_st { + struct cudaMemPoolPtrExportData *exportData; + void *ptr; +} cudaMemPoolExportPointer_v11020_params; + +typedef struct cudaMemPoolImportPointer_v11020_params_st { + void **ptr; + cudaMemPool_t memPool; + struct cudaMemPoolPtrExportData *exportData; +} cudaMemPoolImportPointer_v11020_params; + +typedef struct cudaPointerGetAttributes_v4000_params_st { + struct cudaPointerAttributes *attributes; + const void *ptr; +} cudaPointerGetAttributes_v4000_params; + +typedef struct cudaDeviceCanAccessPeer_v4000_params_st { + int *canAccessPeer; + int device; + int peerDevice; +} cudaDeviceCanAccessPeer_v4000_params; + +typedef struct cudaDeviceEnablePeerAccess_v4000_params_st { + int peerDevice; + unsigned int flags; +} cudaDeviceEnablePeerAccess_v4000_params; + +typedef struct cudaDeviceDisablePeerAccess_v4000_params_st { + int peerDevice; +} cudaDeviceDisablePeerAccess_v4000_params; + +typedef struct cudaGraphicsUnregisterResource_v3020_params_st { + cudaGraphicsResource_t resource; +} cudaGraphicsUnregisterResource_v3020_params; + +typedef struct cudaGraphicsResourceSetMapFlags_v3020_params_st { + cudaGraphicsResource_t resource; + unsigned int flags; +} cudaGraphicsResourceSetMapFlags_v3020_params; + +typedef struct cudaGraphicsMapResources_v3020_params_st { + int count; + cudaGraphicsResource_t *resources; + cudaStream_t stream; +} cudaGraphicsMapResources_v3020_params; + +typedef struct cudaGraphicsUnmapResources_v3020_params_st { + int count; + cudaGraphicsResource_t *resources; + cudaStream_t stream; +} cudaGraphicsUnmapResources_v3020_params; + +typedef struct cudaGraphicsResourceGetMappedPointer_v3020_params_st { + void **devPtr; + size_t *size; + cudaGraphicsResource_t resource; +} cudaGraphicsResourceGetMappedPointer_v3020_params; + +typedef struct cudaGraphicsSubResourceGetMappedArray_v3020_params_st { + cudaArray_t *array; + cudaGraphicsResource_t resource; + unsigned int arrayIndex; + unsigned int mipLevel; +} cudaGraphicsSubResourceGetMappedArray_v3020_params; + +typedef struct cudaGraphicsResourceGetMappedMipmappedArray_v5000_params_st { + cudaMipmappedArray_t *mipmappedArray; + cudaGraphicsResource_t resource; +} cudaGraphicsResourceGetMappedMipmappedArray_v5000_params; + +typedef struct cudaGetChannelDesc_v3020_params_st { + struct cudaChannelFormatDesc *desc; + cudaArray_const_t array; +} cudaGetChannelDesc_v3020_params; + +typedef struct cudaCreateChannelDesc_v3020_params_st { + int x; + int y; + int z; + int w; + enum cudaChannelFormatKind f; +} cudaCreateChannelDesc_v3020_params; + +typedef struct cudaCreateTextureObject_v5000_params_st { + cudaTextureObject_t *pTexObject; + const struct cudaResourceDesc *pResDesc; + const struct cudaTextureDesc *pTexDesc; + const struct cudaResourceViewDesc *pResViewDesc; +} cudaCreateTextureObject_v5000_params; + +typedef struct cudaDestroyTextureObject_v5000_params_st { + cudaTextureObject_t texObject; +} cudaDestroyTextureObject_v5000_params; + +typedef struct cudaGetTextureObjectResourceDesc_v5000_params_st { + struct cudaResourceDesc *pResDesc; + cudaTextureObject_t texObject; +} cudaGetTextureObjectResourceDesc_v5000_params; + +typedef struct cudaGetTextureObjectTextureDesc_v5000_params_st { + struct cudaTextureDesc *pTexDesc; + cudaTextureObject_t texObject; +} cudaGetTextureObjectTextureDesc_v5000_params; + +typedef struct cudaGetTextureObjectResourceViewDesc_v5000_params_st { + struct cudaResourceViewDesc *pResViewDesc; + cudaTextureObject_t texObject; +} cudaGetTextureObjectResourceViewDesc_v5000_params; + +typedef struct cudaCreateSurfaceObject_v5000_params_st { + cudaSurfaceObject_t *pSurfObject; + const struct cudaResourceDesc *pResDesc; +} cudaCreateSurfaceObject_v5000_params; + +typedef struct cudaDestroySurfaceObject_v5000_params_st { + cudaSurfaceObject_t surfObject; +} cudaDestroySurfaceObject_v5000_params; + +typedef struct cudaGetSurfaceObjectResourceDesc_v5000_params_st { + struct cudaResourceDesc *pResDesc; + cudaSurfaceObject_t surfObject; +} cudaGetSurfaceObjectResourceDesc_v5000_params; + +typedef struct cudaDriverGetVersion_v3020_params_st { + int *driverVersion; +} cudaDriverGetVersion_v3020_params; + +typedef struct cudaRuntimeGetVersion_v3020_params_st { + int *runtimeVersion; +} cudaRuntimeGetVersion_v3020_params; + +typedef struct cudaGraphCreate_v10000_params_st { + cudaGraph_t *pGraph; + unsigned int flags; +} cudaGraphCreate_v10000_params; + +typedef struct cudaGraphAddKernelNode_v10000_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + const struct cudaKernelNodeParams *pNodeParams; +} cudaGraphAddKernelNode_v10000_params; + +typedef struct cudaGraphKernelNodeGetParams_v10000_params_st { + cudaGraphNode_t node; + struct cudaKernelNodeParams *pNodeParams; +} cudaGraphKernelNodeGetParams_v10000_params; + +typedef struct cudaGraphKernelNodeSetParams_v10000_params_st { + cudaGraphNode_t node; + const struct cudaKernelNodeParams *pNodeParams; +} cudaGraphKernelNodeSetParams_v10000_params; + +typedef struct cudaGraphKernelNodeCopyAttributes_v11000_params_st { + cudaGraphNode_t hSrc; + cudaGraphNode_t hDst; +} cudaGraphKernelNodeCopyAttributes_v11000_params; + +typedef struct cudaGraphKernelNodeGetAttribute_v11000_params_st { + cudaGraphNode_t hNode; + cudaKernelNodeAttrID attr; + cudaKernelNodeAttrValue *value_out; +} cudaGraphKernelNodeGetAttribute_v11000_params; + +typedef struct cudaGraphKernelNodeSetAttribute_v11000_params_st { + cudaGraphNode_t hNode; + cudaKernelNodeAttrID attr; + const cudaKernelNodeAttrValue *value; +} cudaGraphKernelNodeSetAttribute_v11000_params; + +typedef struct cudaGraphAddMemcpyNode_v10000_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + const struct cudaMemcpy3DParms *pCopyParams; +} cudaGraphAddMemcpyNode_v10000_params; + +typedef struct cudaGraphAddMemcpyNodeToSymbol_v11010_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + const void *symbol; + const void *src; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaGraphAddMemcpyNodeToSymbol_v11010_params; + +typedef struct cudaGraphAddMemcpyNodeFromSymbol_v11010_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + void *dst; + const void *symbol; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaGraphAddMemcpyNodeFromSymbol_v11010_params; + +typedef struct cudaGraphAddMemcpyNode1D_v11010_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + void *dst; + const void *src; + size_t count; + enum cudaMemcpyKind kind; +} cudaGraphAddMemcpyNode1D_v11010_params; + +typedef struct cudaGraphMemcpyNodeGetParams_v10000_params_st { + cudaGraphNode_t node; + struct cudaMemcpy3DParms *pNodeParams; +} cudaGraphMemcpyNodeGetParams_v10000_params; + +typedef struct cudaGraphMemcpyNodeSetParams_v10000_params_st { + cudaGraphNode_t node; + const struct cudaMemcpy3DParms *pNodeParams; +} cudaGraphMemcpyNodeSetParams_v10000_params; + +typedef struct cudaGraphMemcpyNodeSetParamsToSymbol_v11010_params_st { + cudaGraphNode_t node; + const void *symbol; + const void *src; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaGraphMemcpyNodeSetParamsToSymbol_v11010_params; + +typedef struct cudaGraphMemcpyNodeSetParamsFromSymbol_v11010_params_st { + cudaGraphNode_t node; + void *dst; + const void *symbol; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaGraphMemcpyNodeSetParamsFromSymbol_v11010_params; + +typedef struct cudaGraphMemcpyNodeSetParams1D_v11010_params_st { + cudaGraphNode_t node; + void *dst; + const void *src; + size_t count; + enum cudaMemcpyKind kind; +} cudaGraphMemcpyNodeSetParams1D_v11010_params; + +typedef struct cudaGraphAddMemsetNode_v10000_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + const struct cudaMemsetParams *pMemsetParams; +} cudaGraphAddMemsetNode_v10000_params; + +typedef struct cudaGraphMemsetNodeGetParams_v10000_params_st { + cudaGraphNode_t node; + struct cudaMemsetParams *pNodeParams; +} cudaGraphMemsetNodeGetParams_v10000_params; + +typedef struct cudaGraphMemsetNodeSetParams_v10000_params_st { + cudaGraphNode_t node; + const struct cudaMemsetParams *pNodeParams; +} cudaGraphMemsetNodeSetParams_v10000_params; + +typedef struct cudaGraphAddHostNode_v10000_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + const struct cudaHostNodeParams *pNodeParams; +} cudaGraphAddHostNode_v10000_params; + +typedef struct cudaGraphHostNodeGetParams_v10000_params_st { + cudaGraphNode_t node; + struct cudaHostNodeParams *pNodeParams; +} cudaGraphHostNodeGetParams_v10000_params; + +typedef struct cudaGraphHostNodeSetParams_v10000_params_st { + cudaGraphNode_t node; + const struct cudaHostNodeParams *pNodeParams; +} cudaGraphHostNodeSetParams_v10000_params; + +typedef struct cudaGraphAddChildGraphNode_v10000_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + cudaGraph_t childGraph; +} cudaGraphAddChildGraphNode_v10000_params; + +typedef struct cudaGraphChildGraphNodeGetGraph_v10000_params_st { + cudaGraphNode_t node; + cudaGraph_t *pGraph; +} cudaGraphChildGraphNodeGetGraph_v10000_params; + +typedef struct cudaGraphAddEmptyNode_v10000_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; +} cudaGraphAddEmptyNode_v10000_params; + +typedef struct cudaGraphAddEventRecordNode_v11010_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + cudaEvent_t event; +} cudaGraphAddEventRecordNode_v11010_params; + +typedef struct cudaGraphEventRecordNodeGetEvent_v11010_params_st { + cudaGraphNode_t node; + cudaEvent_t *event_out; +} cudaGraphEventRecordNodeGetEvent_v11010_params; + +typedef struct cudaGraphEventRecordNodeSetEvent_v11010_params_st { + cudaGraphNode_t node; + cudaEvent_t event; +} cudaGraphEventRecordNodeSetEvent_v11010_params; + +typedef struct cudaGraphAddEventWaitNode_v11010_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + cudaEvent_t event; +} cudaGraphAddEventWaitNode_v11010_params; + +typedef struct cudaGraphEventWaitNodeGetEvent_v11010_params_st { + cudaGraphNode_t node; + cudaEvent_t *event_out; +} cudaGraphEventWaitNodeGetEvent_v11010_params; + +typedef struct cudaGraphEventWaitNodeSetEvent_v11010_params_st { + cudaGraphNode_t node; + cudaEvent_t event; +} cudaGraphEventWaitNodeSetEvent_v11010_params; + +typedef struct cudaGraphAddExternalSemaphoresSignalNode_v11020_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + const struct cudaExternalSemaphoreSignalNodeParams *nodeParams; +} cudaGraphAddExternalSemaphoresSignalNode_v11020_params; + +typedef struct cudaGraphExternalSemaphoresSignalNodeGetParams_v11020_params_st { + cudaGraphNode_t hNode; + struct cudaExternalSemaphoreSignalNodeParams *params_out; +} cudaGraphExternalSemaphoresSignalNodeGetParams_v11020_params; + +typedef struct cudaGraphExternalSemaphoresSignalNodeSetParams_v11020_params_st { + cudaGraphNode_t hNode; + const struct cudaExternalSemaphoreSignalNodeParams *nodeParams; +} cudaGraphExternalSemaphoresSignalNodeSetParams_v11020_params; + +typedef struct cudaGraphAddExternalSemaphoresWaitNode_v11020_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + const struct cudaExternalSemaphoreWaitNodeParams *nodeParams; +} cudaGraphAddExternalSemaphoresWaitNode_v11020_params; + +typedef struct cudaGraphExternalSemaphoresWaitNodeGetParams_v11020_params_st { + cudaGraphNode_t hNode; + struct cudaExternalSemaphoreWaitNodeParams *params_out; +} cudaGraphExternalSemaphoresWaitNodeGetParams_v11020_params; + +typedef struct cudaGraphExternalSemaphoresWaitNodeSetParams_v11020_params_st { + cudaGraphNode_t hNode; + const struct cudaExternalSemaphoreWaitNodeParams *nodeParams; +} cudaGraphExternalSemaphoresWaitNodeSetParams_v11020_params; + +typedef struct cudaGraphAddMemAllocNode_v11040_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + struct cudaMemAllocNodeParams *nodeParams; +} cudaGraphAddMemAllocNode_v11040_params; + +typedef struct cudaGraphMemAllocNodeGetParams_v11040_params_st { + cudaGraphNode_t node; + struct cudaMemAllocNodeParams *params_out; +} cudaGraphMemAllocNodeGetParams_v11040_params; + +typedef struct cudaGraphAddMemFreeNode_v11040_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + void *dptr; +} cudaGraphAddMemFreeNode_v11040_params; + +typedef struct cudaGraphMemFreeNodeGetParams_v11040_params_st { + cudaGraphNode_t node; + void *dptr_out; +} cudaGraphMemFreeNodeGetParams_v11040_params; + +typedef struct cudaDeviceGraphMemTrim_v11040_params_st { + int device; +} cudaDeviceGraphMemTrim_v11040_params; + +typedef struct cudaDeviceGetGraphMemAttribute_v11040_params_st { + int device; + enum cudaGraphMemAttributeType attr; + void *value; +} cudaDeviceGetGraphMemAttribute_v11040_params; + +typedef struct cudaDeviceSetGraphMemAttribute_v11040_params_st { + int device; + enum cudaGraphMemAttributeType attr; + void *value; +} cudaDeviceSetGraphMemAttribute_v11040_params; + +typedef struct cudaGraphClone_v10000_params_st { + cudaGraph_t *pGraphClone; + cudaGraph_t originalGraph; +} cudaGraphClone_v10000_params; + +typedef struct cudaGraphNodeFindInClone_v10000_params_st { + cudaGraphNode_t *pNode; + cudaGraphNode_t originalNode; + cudaGraph_t clonedGraph; +} cudaGraphNodeFindInClone_v10000_params; + +typedef struct cudaGraphNodeGetType_v10000_params_st { + cudaGraphNode_t node; + enum cudaGraphNodeType *pType; +} cudaGraphNodeGetType_v10000_params; + +typedef struct cudaGraphGetNodes_v10000_params_st { + cudaGraph_t graph; + cudaGraphNode_t *nodes; + size_t *numNodes; +} cudaGraphGetNodes_v10000_params; + +typedef struct cudaGraphGetRootNodes_v10000_params_st { + cudaGraph_t graph; + cudaGraphNode_t *pRootNodes; + size_t *pNumRootNodes; +} cudaGraphGetRootNodes_v10000_params; + +typedef struct cudaGraphGetEdges_v10000_params_st { + cudaGraph_t graph; + cudaGraphNode_t *from; + cudaGraphNode_t *to; + size_t *numEdges; +} cudaGraphGetEdges_v10000_params; + +typedef struct cudaGraphGetEdges_v2_v12030_params_st { + cudaGraph_t graph; + cudaGraphNode_t *from; + cudaGraphNode_t *to; + cudaGraphEdgeData *edgeData; + size_t *numEdges; +} cudaGraphGetEdges_v2_v12030_params; + +typedef struct cudaGraphNodeGetDependencies_v10000_params_st { + cudaGraphNode_t node; + cudaGraphNode_t *pDependencies; + size_t *pNumDependencies; +} cudaGraphNodeGetDependencies_v10000_params; + +typedef struct cudaGraphNodeGetDependencies_v2_v12030_params_st { + cudaGraphNode_t node; + cudaGraphNode_t *pDependencies; + cudaGraphEdgeData *edgeData; + size_t *pNumDependencies; +} cudaGraphNodeGetDependencies_v2_v12030_params; + +typedef struct cudaGraphNodeGetDependentNodes_v10000_params_st { + cudaGraphNode_t node; + cudaGraphNode_t *pDependentNodes; + size_t *pNumDependentNodes; +} cudaGraphNodeGetDependentNodes_v10000_params; + +typedef struct cudaGraphNodeGetDependentNodes_v2_v12030_params_st { + cudaGraphNode_t node; + cudaGraphNode_t *pDependentNodes; + cudaGraphEdgeData *edgeData; + size_t *pNumDependentNodes; +} cudaGraphNodeGetDependentNodes_v2_v12030_params; + +typedef struct cudaGraphAddDependencies_v10000_params_st { + cudaGraph_t graph; + const cudaGraphNode_t *from; + const cudaGraphNode_t *to; + size_t numDependencies; +} cudaGraphAddDependencies_v10000_params; + +typedef struct cudaGraphAddDependencies_v2_v12030_params_st { + cudaGraph_t graph; + const cudaGraphNode_t *from; + const cudaGraphNode_t *to; + const cudaGraphEdgeData *edgeData; + size_t numDependencies; +} cudaGraphAddDependencies_v2_v12030_params; + +typedef struct cudaGraphRemoveDependencies_v10000_params_st { + cudaGraph_t graph; + const cudaGraphNode_t *from; + const cudaGraphNode_t *to; + size_t numDependencies; +} cudaGraphRemoveDependencies_v10000_params; + +typedef struct cudaGraphRemoveDependencies_v2_v12030_params_st { + cudaGraph_t graph; + const cudaGraphNode_t *from; + const cudaGraphNode_t *to; + const cudaGraphEdgeData *edgeData; + size_t numDependencies; +} cudaGraphRemoveDependencies_v2_v12030_params; + +typedef struct cudaGraphDestroyNode_v10000_params_st { + cudaGraphNode_t node; +} cudaGraphDestroyNode_v10000_params; + +typedef struct cudaGraphInstantiate_v12000_params_st { + cudaGraphExec_t *pGraphExec; + cudaGraph_t graph; + unsigned long long flags; +} cudaGraphInstantiate_v12000_params; + +typedef struct cudaGraphInstantiateWithFlags_v11040_params_st { + cudaGraphExec_t *pGraphExec; + cudaGraph_t graph; + unsigned long long flags; +} cudaGraphInstantiateWithFlags_v11040_params; + +typedef struct cudaGraphInstantiateWithParams_ptsz_v12000_params_st { + cudaGraphExec_t *pGraphExec; + cudaGraph_t graph; + cudaGraphInstantiateParams *instantiateParams; +} cudaGraphInstantiateWithParams_ptsz_v12000_params; + +typedef struct cudaGraphExecGetFlags_v12000_params_st { + cudaGraphExec_t graphExec; + unsigned long long *flags; +} cudaGraphExecGetFlags_v12000_params; + +typedef struct cudaGraphExecKernelNodeSetParams_v10010_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + const struct cudaKernelNodeParams *pNodeParams; +} cudaGraphExecKernelNodeSetParams_v10010_params; + +typedef struct cudaGraphExecMemcpyNodeSetParams_v10020_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + const struct cudaMemcpy3DParms *pNodeParams; +} cudaGraphExecMemcpyNodeSetParams_v10020_params; + +typedef struct cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + const void *symbol; + const void *src; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaGraphExecMemcpyNodeSetParamsToSymbol_v11010_params; + +typedef struct cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + void *dst; + const void *symbol; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaGraphExecMemcpyNodeSetParamsFromSymbol_v11010_params; + +typedef struct cudaGraphExecMemcpyNodeSetParams1D_v11010_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + void *dst; + const void *src; + size_t count; + enum cudaMemcpyKind kind; +} cudaGraphExecMemcpyNodeSetParams1D_v11010_params; + +typedef struct cudaGraphExecMemsetNodeSetParams_v10020_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + const struct cudaMemsetParams *pNodeParams; +} cudaGraphExecMemsetNodeSetParams_v10020_params; + +typedef struct cudaGraphExecHostNodeSetParams_v10020_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + const struct cudaHostNodeParams *pNodeParams; +} cudaGraphExecHostNodeSetParams_v10020_params; + +typedef struct cudaGraphExecChildGraphNodeSetParams_v11010_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t node; + cudaGraph_t childGraph; +} cudaGraphExecChildGraphNodeSetParams_v11010_params; + +typedef struct cudaGraphExecEventRecordNodeSetEvent_v11010_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t hNode; + cudaEvent_t event; +} cudaGraphExecEventRecordNodeSetEvent_v11010_params; + +typedef struct cudaGraphExecEventWaitNodeSetEvent_v11010_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t hNode; + cudaEvent_t event; +} cudaGraphExecEventWaitNodeSetEvent_v11010_params; + +typedef struct cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t hNode; + const struct cudaExternalSemaphoreSignalNodeParams *nodeParams; +} cudaGraphExecExternalSemaphoresSignalNodeSetParams_v11020_params; + +typedef struct cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t hNode; + const struct cudaExternalSemaphoreWaitNodeParams *nodeParams; +} cudaGraphExecExternalSemaphoresWaitNodeSetParams_v11020_params; + +typedef struct cudaGraphNodeSetEnabled_v11060_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t hNode; + unsigned int isEnabled; +} cudaGraphNodeSetEnabled_v11060_params; + +typedef struct cudaGraphNodeGetEnabled_v11060_params_st { + cudaGraphExec_t hGraphExec; + cudaGraphNode_t hNode; + unsigned int *isEnabled; +} cudaGraphNodeGetEnabled_v11060_params; + +typedef struct cudaGraphExecUpdate_v10020_params_st { + cudaGraphExec_t hGraphExec; + cudaGraph_t hGraph; + cudaGraphExecUpdateResultInfo *resultInfo; +} cudaGraphExecUpdate_v10020_params; + +typedef struct cudaGraphUpload_ptsz_v10000_params_st { + cudaGraphExec_t graphExec; + cudaStream_t stream; +} cudaGraphUpload_ptsz_v10000_params; + +typedef struct cudaGraphLaunch_ptsz_v10000_params_st { + cudaGraphExec_t graphExec; + cudaStream_t stream; +} cudaGraphLaunch_ptsz_v10000_params; + +typedef struct cudaGraphExecDestroy_v10000_params_st { + cudaGraphExec_t graphExec; +} cudaGraphExecDestroy_v10000_params; + +typedef struct cudaGraphDestroy_v10000_params_st { + cudaGraph_t graph; +} cudaGraphDestroy_v10000_params; + +typedef struct cudaGraphDebugDotPrint_v11030_params_st { + cudaGraph_t graph; + const char *path; + unsigned int flags; +} cudaGraphDebugDotPrint_v11030_params; + +typedef struct cudaUserObjectCreate_v11030_params_st { + cudaUserObject_t *object_out; + void *ptr; + cudaHostFn_t destroy; + unsigned int initialRefcount; + unsigned int flags; +} cudaUserObjectCreate_v11030_params; + +typedef struct cudaUserObjectRetain_v11030_params_st { + cudaUserObject_t object; + unsigned int count; +} cudaUserObjectRetain_v11030_params; + +typedef struct cudaUserObjectRelease_v11030_params_st { + cudaUserObject_t object; + unsigned int count; +} cudaUserObjectRelease_v11030_params; + +typedef struct cudaGraphRetainUserObject_v11030_params_st { + cudaGraph_t graph; + cudaUserObject_t object; + unsigned int count; + unsigned int flags; +} cudaGraphRetainUserObject_v11030_params; + +typedef struct cudaGraphReleaseUserObject_v11030_params_st { + cudaGraph_t graph; + cudaUserObject_t object; + unsigned int count; +} cudaGraphReleaseUserObject_v11030_params; + +typedef struct cudaGraphAddNode_v12020_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + size_t numDependencies; + struct cudaGraphNodeParams *nodeParams; +} cudaGraphAddNode_v12020_params; + +typedef struct cudaGraphAddNode_v2_v12030_params_st { + cudaGraphNode_t *pGraphNode; + cudaGraph_t graph; + const cudaGraphNode_t *pDependencies; + const cudaGraphEdgeData *dependencyData; + size_t numDependencies; + struct cudaGraphNodeParams *nodeParams; +} cudaGraphAddNode_v2_v12030_params; + +typedef struct cudaGraphNodeSetParams_v12020_params_st { + cudaGraphNode_t node; + struct cudaGraphNodeParams *nodeParams; +} cudaGraphNodeSetParams_v12020_params; + +typedef struct cudaGraphExecNodeSetParams_v12020_params_st { + cudaGraphExec_t graphExec; + cudaGraphNode_t node; + struct cudaGraphNodeParams *nodeParams; +} cudaGraphExecNodeSetParams_v12020_params; + +typedef struct cudaGraphConditionalHandleCreate_v12030_params_st { + cudaGraphConditionalHandle *pHandle_out; + cudaGraph_t graph; + unsigned int defaultLaunchValue; + unsigned int flags; +} cudaGraphConditionalHandleCreate_v12030_params; + +typedef struct cudaGetDriverEntryPoint_ptsz_v11030_params_st { + const char *symbol; + void **funcPtr; + unsigned long long flags; + enum cudaDriverEntryPointQueryResult *driverStatus; +} cudaGetDriverEntryPoint_ptsz_v11030_params; + +typedef struct cudaGetDriverEntryPointByVersion_ptsz_v12050_params_st { + const char *symbol; + void **funcPtr; + unsigned int cudaVersion; + unsigned long long flags; + enum cudaDriverEntryPointQueryResult *driverStatus; +} cudaGetDriverEntryPointByVersion_ptsz_v12050_params; + +typedef struct cudaGetFuncBySymbol_v11000_params_st { + cudaFunction_t *functionPtr; + const void *symbolPtr; +} cudaGetFuncBySymbol_v11000_params; + +typedef struct cudaGetKernel_v12000_params_st { + cudaKernel_t *kernelPtr; + const void *entryFuncAddr; +} cudaGetKernel_v12000_params; + +typedef struct cudaMemcpy_v3020_params_st { + void *dst; + const void *src; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpy_v3020_params; + +typedef struct cudaMemcpyToSymbol_v3020_params_st { + const void *symbol; + const void *src; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaMemcpyToSymbol_v3020_params; + +typedef struct cudaMemcpyFromSymbol_v3020_params_st { + void *dst; + const void *symbol; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; +} cudaMemcpyFromSymbol_v3020_params; + +typedef struct cudaMemcpy2D_v3020_params_st { + void *dst; + size_t dpitch; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2D_v3020_params; + +typedef struct cudaMemcpyToArray_v3020_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpyToArray_v3020_params; + +typedef struct cudaMemcpy2DToArray_v3020_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2DToArray_v3020_params; + +typedef struct cudaMemcpyFromArray_v3020_params_st { + void *dst; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpyFromArray_v3020_params; + +typedef struct cudaMemcpy2DFromArray_v3020_params_st { + void *dst; + size_t dpitch; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2DFromArray_v3020_params; + +typedef struct cudaMemcpyArrayToArray_v3020_params_st { + cudaArray_t dst; + size_t wOffsetDst; + size_t hOffsetDst; + cudaArray_const_t src; + size_t wOffsetSrc; + size_t hOffsetSrc; + size_t count; + enum cudaMemcpyKind kind; +} cudaMemcpyArrayToArray_v3020_params; + +typedef struct cudaMemcpy2DArrayToArray_v3020_params_st { + cudaArray_t dst; + size_t wOffsetDst; + size_t hOffsetDst; + cudaArray_const_t src; + size_t wOffsetSrc; + size_t hOffsetSrc; + size_t width; + size_t height; + enum cudaMemcpyKind kind; +} cudaMemcpy2DArrayToArray_v3020_params; + +typedef struct cudaMemcpy3D_v3020_params_st { + const struct cudaMemcpy3DParms *p; +} cudaMemcpy3D_v3020_params; + +typedef struct cudaMemcpy3DPeer_v4000_params_st { + const struct cudaMemcpy3DPeerParms *p; +} cudaMemcpy3DPeer_v4000_params; + +typedef struct cudaMemset_v3020_params_st { + void *devPtr; + int value; + size_t count; +} cudaMemset_v3020_params; + +typedef struct cudaMemset2D_v3020_params_st { + void *devPtr; + size_t pitch; + int value; + size_t width; + size_t height; +} cudaMemset2D_v3020_params; + +typedef struct cudaMemset3D_v3020_params_st { + struct cudaPitchedPtr pitchedDevPtr; + int value; + struct cudaExtent extent; +} cudaMemset3D_v3020_params; + +typedef struct cudaMemcpyAsync_v3020_params_st { + void *dst; + const void *src; + size_t count; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyAsync_v3020_params; + +typedef struct cudaMemcpyToSymbolAsync_v3020_params_st { + const void *symbol; + const void *src; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyToSymbolAsync_v3020_params; + +typedef struct cudaMemcpyFromSymbolAsync_v3020_params_st { + void *dst; + const void *symbol; + size_t count; + size_t offset; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyFromSymbolAsync_v3020_params; + +typedef struct cudaMemcpy2DAsync_v3020_params_st { + void *dst; + size_t dpitch; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpy2DAsync_v3020_params; + +typedef struct cudaMemcpyToArrayAsync_v3020_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t count; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyToArrayAsync_v3020_params; + +typedef struct cudaMemcpy2DToArrayAsync_v3020_params_st { + cudaArray_t dst; + size_t wOffset; + size_t hOffset; + const void *src; + size_t spitch; + size_t width; + size_t height; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpy2DToArrayAsync_v3020_params; + +typedef struct cudaMemcpyFromArrayAsync_v3020_params_st { + void *dst; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t count; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpyFromArrayAsync_v3020_params; + +typedef struct cudaMemcpy2DFromArrayAsync_v3020_params_st { + void *dst; + size_t dpitch; + cudaArray_const_t src; + size_t wOffset; + size_t hOffset; + size_t width; + size_t height; + enum cudaMemcpyKind kind; + cudaStream_t stream; +} cudaMemcpy2DFromArrayAsync_v3020_params; + +typedef struct cudaMemcpy3DAsync_v3020_params_st { + const struct cudaMemcpy3DParms *p; + cudaStream_t stream; +} cudaMemcpy3DAsync_v3020_params; + +typedef struct cudaMemcpy3DPeerAsync_v4000_params_st { + const struct cudaMemcpy3DPeerParms *p; + cudaStream_t stream; +} cudaMemcpy3DPeerAsync_v4000_params; + +typedef struct cudaMemsetAsync_v3020_params_st { + void *devPtr; + int value; + size_t count; + cudaStream_t stream; +} cudaMemsetAsync_v3020_params; + +typedef struct cudaMemset2DAsync_v3020_params_st { + void *devPtr; + size_t pitch; + int value; + size_t width; + size_t height; + cudaStream_t stream; +} cudaMemset2DAsync_v3020_params; + +typedef struct cudaMemset3DAsync_v3020_params_st { + struct cudaPitchedPtr pitchedDevPtr; + int value; + struct cudaExtent extent; + cudaStream_t stream; +} cudaMemset3DAsync_v3020_params; + +typedef struct cudaStreamQuery_v3020_params_st { + cudaStream_t stream; +} cudaStreamQuery_v3020_params; + +typedef struct cudaStreamGetFlags_v5050_params_st { + cudaStream_t hStream; + unsigned int *flags; +} cudaStreamGetFlags_v5050_params; + +typedef struct cudaStreamGetId_v12000_params_st { + cudaStream_t hStream; + unsigned long long *streamId; +} cudaStreamGetId_v12000_params; + +typedef struct cudaStreamGetPriority_v5050_params_st { + cudaStream_t hStream; + int *priority; +} cudaStreamGetPriority_v5050_params; + +typedef struct cudaEventRecord_v3020_params_st { + cudaEvent_t event; + cudaStream_t stream; +} cudaEventRecord_v3020_params; + +typedef struct cudaEventRecordWithFlags_v11010_params_st { + cudaEvent_t event; + cudaStream_t stream; + unsigned int flags; +} cudaEventRecordWithFlags_v11010_params; + +typedef struct cudaStreamWaitEvent_v3020_params_st { + cudaStream_t stream; + cudaEvent_t event; + unsigned int flags; +} cudaStreamWaitEvent_v3020_params; + +typedef struct cudaStreamAddCallback_v5000_params_st { + cudaStream_t stream; + cudaStreamCallback_t callback; + void *userData; + unsigned int flags; +} cudaStreamAddCallback_v5000_params; + +typedef struct cudaStreamAttachMemAsync_v6000_params_st { + cudaStream_t stream; + void *devPtr; + size_t length; + unsigned int flags; +} cudaStreamAttachMemAsync_v6000_params; + +typedef struct cudaStreamSynchronize_v3020_params_st { + cudaStream_t stream; +} cudaStreamSynchronize_v3020_params; + +typedef struct cudaLaunchKernel_v7000_params_st { + const void *func; + dim3 gridDim; + dim3 blockDim; + void **args; + size_t sharedMem; + cudaStream_t stream; +} cudaLaunchKernel_v7000_params; + +typedef struct cudaLaunchKernelExC_v11060_params_st { + const cudaLaunchConfig_t *config; + const void *func; + void **args; +} cudaLaunchKernelExC_v11060_params; + +typedef struct cudaLaunchCooperativeKernel_v9000_params_st { + const void *func; + dim3 gridDim; + dim3 blockDim; + void **args; + size_t sharedMem; + cudaStream_t stream; +} cudaLaunchCooperativeKernel_v9000_params; + +typedef struct cudaLaunchHostFunc_v10000_params_st { + cudaStream_t stream; + cudaHostFn_t fn; + void *userData; +} cudaLaunchHostFunc_v10000_params; + +typedef struct cudaMemPrefetchAsync_v8000_params_st { + const void *devPtr; + size_t count; + int dstDevice; + cudaStream_t stream; +} cudaMemPrefetchAsync_v8000_params; + +typedef struct cudaMemPrefetchAsync_v2_v12020_params_st { + const void *devPtr; + size_t count; + struct cudaMemLocation location; + unsigned int flags; + cudaStream_t stream; +} cudaMemPrefetchAsync_v2_v12020_params; + +typedef struct cudaSignalExternalSemaphoresAsync_v10000_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreSignalParams_v1 *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaSignalExternalSemaphoresAsync_v10000_params; + +typedef struct cudaSignalExternalSemaphoresAsync_ptsz_v10000_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreSignalParams_v1 *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaSignalExternalSemaphoresAsync_ptsz_v10000_params; + +typedef struct cudaSignalExternalSemaphoresAsync_v2_v11020_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreSignalParams *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaSignalExternalSemaphoresAsync_v2_v11020_params; + +typedef struct cudaWaitExternalSemaphoresAsync_v10000_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreWaitParams_v1 *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaWaitExternalSemaphoresAsync_v10000_params; + +typedef struct cudaWaitExternalSemaphoresAsync_ptsz_v10000_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreWaitParams_v1 *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaWaitExternalSemaphoresAsync_ptsz_v10000_params; + +typedef struct cudaWaitExternalSemaphoresAsync_v2_v11020_params_st { + const cudaExternalSemaphore_t *extSemArray; + const struct cudaExternalSemaphoreWaitParams *paramsArray; + unsigned int numExtSems; + cudaStream_t stream; +} cudaWaitExternalSemaphoresAsync_v2_v11020_params; + +typedef struct cudaGraphInstantiateWithParams_v12000_params_st { + cudaGraphExec_t *pGraphExec; + cudaGraph_t graph; + cudaGraphInstantiateParams *instantiateParams; +} cudaGraphInstantiateWithParams_v12000_params; + +typedef struct cudaGraphUpload_v10000_params_st { + cudaGraphExec_t graphExec; + cudaStream_t stream; +} cudaGraphUpload_v10000_params; + +typedef struct cudaGraphLaunch_v10000_params_st { + cudaGraphExec_t graphExec; + cudaStream_t stream; +} cudaGraphLaunch_v10000_params; + +typedef struct cudaStreamBeginCapture_v10000_params_st { + cudaStream_t stream; + enum cudaStreamCaptureMode mode; +} cudaStreamBeginCapture_v10000_params; + +typedef struct cudaStreamBeginCaptureToGraph_v12030_params_st { + cudaStream_t stream; + cudaGraph_t graph; + const cudaGraphNode_t *dependencies; + const cudaGraphEdgeData *dependencyData; + size_t numDependencies; + enum cudaStreamCaptureMode mode; +} cudaStreamBeginCaptureToGraph_v12030_params; + +typedef struct cudaStreamEndCapture_v10000_params_st { + cudaStream_t stream; + cudaGraph_t *pGraph; +} cudaStreamEndCapture_v10000_params; + +typedef struct cudaStreamIsCapturing_v10000_params_st { + cudaStream_t stream; + enum cudaStreamCaptureStatus *pCaptureStatus; +} cudaStreamIsCapturing_v10000_params; + +typedef struct cudaStreamGetCaptureInfo_v10010_params_st { + cudaStream_t stream; + enum cudaStreamCaptureStatus *captureStatus_out; + unsigned long long *id_out; +} cudaStreamGetCaptureInfo_v10010_params; + +typedef struct cudaStreamGetCaptureInfo_ptsz_v10010_params_st { + cudaStream_t stream; + enum cudaStreamCaptureStatus *captureStatus_out; + unsigned long long *id_out; +} cudaStreamGetCaptureInfo_ptsz_v10010_params; + +typedef struct cudaStreamGetCaptureInfo_v2_v11030_params_st { + cudaStream_t stream; + enum cudaStreamCaptureStatus *captureStatus_out; + unsigned long long *id_out; + cudaGraph_t *graph_out; + const cudaGraphNode_t **dependencies_out; + size_t *numDependencies_out; +} cudaStreamGetCaptureInfo_v2_v11030_params; + +typedef struct cudaStreamGetCaptureInfo_v3_v12030_params_st { + cudaStream_t stream; + enum cudaStreamCaptureStatus *captureStatus_out; + unsigned long long *id_out; + cudaGraph_t *graph_out; + const cudaGraphNode_t **dependencies_out; + const cudaGraphEdgeData **edgeData_out; + size_t *numDependencies_out; +} cudaStreamGetCaptureInfo_v3_v12030_params; + +typedef struct cudaStreamUpdateCaptureDependencies_v11030_params_st { + cudaStream_t stream; + cudaGraphNode_t *dependencies; + size_t numDependencies; + unsigned int flags; +} cudaStreamUpdateCaptureDependencies_v11030_params; + +typedef struct cudaStreamUpdateCaptureDependencies_v2_v12030_params_st { + cudaStream_t stream; + cudaGraphNode_t *dependencies; + const cudaGraphEdgeData *dependencyData; + size_t numDependencies; + unsigned int flags; +} cudaStreamUpdateCaptureDependencies_v2_v12030_params; + +typedef struct cudaStreamCopyAttributes_v11000_params_st { + cudaStream_t dstStream; + cudaStream_t srcStream; +} cudaStreamCopyAttributes_v11000_params; + +typedef struct cudaStreamGetAttribute_v11000_params_st { + cudaStream_t stream; + cudaStreamAttrID attr; + cudaStreamAttrValue *value; +} cudaStreamGetAttribute_v11000_params; + +typedef struct cudaStreamSetAttribute_v11000_params_st { + cudaStream_t stream; + cudaStreamAttrID attr; + const cudaStreamAttrValue *param; +} cudaStreamSetAttribute_v11000_params; + +typedef struct cudaMallocAsync_v11020_params_st { + void **devPtr; + size_t size; + cudaStream_t hStream; +} cudaMallocAsync_v11020_params; + +typedef struct cudaFreeAsync_v11020_params_st { + void *devPtr; + cudaStream_t hStream; +} cudaFreeAsync_v11020_params; + +typedef struct cudaMallocFromPoolAsync_v11020_params_st { + void **ptr; + size_t size; + cudaMemPool_t memPool; + cudaStream_t stream; +} cudaMallocFromPoolAsync_v11020_params; + +typedef struct cudaGetDriverEntryPoint_v11030_params_st { + const char *symbol; + void **funcPtr; + unsigned long long flags; + enum cudaDriverEntryPointQueryResult *driverStatus; +} cudaGetDriverEntryPoint_v11030_params; + +typedef struct cudaGetDriverEntryPointByVersion_v12050_params_st { + const char *symbol; + void **funcPtr; + unsigned int cudaVersion; + unsigned long long flags; + enum cudaDriverEntryPointQueryResult *driverStatus; +} cudaGetDriverEntryPointByVersion_v12050_params; + +typedef struct cudaGetDeviceProperties_v3020_params_st { + struct cudaDeviceProp *prop; + int device; +} cudaGetDeviceProperties_v3020_params; + +// Parameter trace structures for removed functions + + +// End of parameter trace structures diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cuda_vdpau_interop_meta.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cuda_vdpau_interop_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..88e79d1957925c4bbacd381e9461d5072de88f24 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cuda_vdpau_interop_meta.h @@ -0,0 +1,38 @@ +// This file is generated. Any changes you make will be lost during the next clean build. + +// CUDA public interface, for type definitions and api function prototypes +#include "cuda_vdpau_interop.h" + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +// Currently used parameter trace structures +typedef struct cudaVDPAUGetDevice_v3020_params_st { + int *device; + VdpDevice vdpDevice; + VdpGetProcAddress *vdpGetProcAddress; +} cudaVDPAUGetDevice_v3020_params; + +typedef struct cudaVDPAUSetVDPAUDevice_v3020_params_st { + int device; + VdpDevice vdpDevice; + VdpGetProcAddress *vdpGetProcAddress; +} cudaVDPAUSetVDPAUDevice_v3020_params; + +typedef struct cudaGraphicsVDPAURegisterVideoSurface_v3020_params_st { + struct cudaGraphicsResource **resource; + VdpVideoSurface vdpSurface; + unsigned int flags; +} cudaGraphicsVDPAURegisterVideoSurface_v3020_params; + +typedef struct cudaGraphicsVDPAURegisterOutputSurface_v3020_params_st { + struct cudaGraphicsResource **resource; + VdpOutputSurface vdpSurface; + unsigned int flags; +} cudaGraphicsVDPAURegisterOutputSurface_v3020_params; + +// Parameter trace structures for removed functions + + +// End of parameter trace structures diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cudart_removed_meta.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cudart_removed_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..a0fc27a71bb3fc883db9fe7562eea3f28145430d --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_cudart_removed_meta.h @@ -0,0 +1,162 @@ +// This file is generated. Any changes you make will be lost during the next clean build. + +// CUDA public interface, for type definitions and api function prototypes +#include "cudart_removed.h" + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +// Currently used parameter trace structures +typedef struct cudaStreamDestroy_v3020_params_st { + cudaStream_t stream; +} cudaStreamDestroy_v3020_params; + +typedef struct cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000_params_st { + int *numBlocks; + const void *func; + size_t numDynamicSmemBytes; +} cudaOccupancyMaxActiveBlocksPerMultiprocessor_v6000_params; + +typedef struct cudaConfigureCall_v3020_params_st { + dim3 gridDim; + dim3 blockDim; + size_t sharedMem __dv; + cudaStream_t stream __dv; +} cudaConfigureCall_v3020_params; + +typedef struct cudaSetupArgument_v3020_params_st { + const void *arg; + size_t size; + size_t offset; +} cudaSetupArgument_v3020_params; + +typedef struct cudaLaunch_v3020_params_st { + const void *func; +} cudaLaunch_v3020_params; + +typedef struct cudaLaunch_ptsz_v7000_params_st { + const void *func; +} cudaLaunch_ptsz_v7000_params; + +typedef struct cudaStreamSetFlags_v10200_params_st { + cudaStream_t hStream; + unsigned int flags; +} cudaStreamSetFlags_v10200_params; + +typedef struct cudaStreamSetFlags_ptsz_v10200_params_st { + cudaStream_t hStream; + unsigned int flags; +} cudaStreamSetFlags_ptsz_v10200_params; + +typedef struct cudaProfilerInitialize_v4000_params_st { + const char *configFile; + const char *outputFile; + cudaOutputMode_t outputMode; +} cudaProfilerInitialize_v4000_params; + +typedef struct cudaThreadSetLimit_v3020_params_st { + enum cudaLimit limit; + size_t value; +} cudaThreadSetLimit_v3020_params; + +typedef struct cudaThreadGetLimit_v3020_params_st { + size_t *pValue; + enum cudaLimit limit; +} cudaThreadGetLimit_v3020_params; + +typedef struct cudaThreadGetCacheConfig_v3020_params_st { + enum cudaFuncCache *pCacheConfig; +} cudaThreadGetCacheConfig_v3020_params; + +typedef struct cudaThreadSetCacheConfig_v3020_params_st { + enum cudaFuncCache cacheConfig; +} cudaThreadSetCacheConfig_v3020_params; + +typedef struct cudaSetDoubleForDevice_v3020_params_st { + double *d; +} cudaSetDoubleForDevice_v3020_params; + +typedef struct cudaSetDoubleForHost_v3020_params_st { + double *d; +} cudaSetDoubleForHost_v3020_params; + +typedef struct cudaCreateTextureObject_v2_v11080_params_st { + cudaTextureObject_t *pTexObject; + const struct cudaResourceDesc *pResDesc; + const struct cudaTextureDesc *pTexDesc; + const struct cudaResourceViewDesc *pResViewDesc; +} cudaCreateTextureObject_v2_v11080_params; + +typedef struct cudaGetTextureObjectTextureDesc_v2_v11080_params_st { + struct cudaTextureDesc *pTexDesc; + cudaTextureObject_t texObject; +} cudaGetTextureObjectTextureDesc_v2_v11080_params; + +typedef struct cudaBindTexture_v3020_params_st { + size_t *offset; + const struct textureReference *texref; + const void *devPtr; + const struct cudaChannelFormatDesc *desc; + size_t size __dv; +} cudaBindTexture_v3020_params; + +typedef struct cudaBindTexture2D_v3020_params_st { + size_t *offset; + const struct textureReference *texref; + const void *devPtr; + const struct cudaChannelFormatDesc *desc; + size_t width; + size_t height; + size_t pitch; +} cudaBindTexture2D_v3020_params; + +typedef struct cudaBindTextureToArray_v3020_params_st { + const struct textureReference *texref; + cudaArray_const_t array; + const struct cudaChannelFormatDesc *desc; +} cudaBindTextureToArray_v3020_params; + +typedef struct cudaBindTextureToMipmappedArray_v5000_params_st { + const struct textureReference *texref; + cudaMipmappedArray_const_t mipmappedArray; + const struct cudaChannelFormatDesc *desc; +} cudaBindTextureToMipmappedArray_v5000_params; + +typedef struct cudaUnbindTexture_v3020_params_st { + const struct textureReference *texref; +} cudaUnbindTexture_v3020_params; + +typedef struct cudaGetTextureAlignmentOffset_v3020_params_st { + size_t *offset; + const struct textureReference *texref; +} cudaGetTextureAlignmentOffset_v3020_params; + +typedef struct cudaGetTextureReference_v3020_params_st { + const struct textureReference **texref; + const void *symbol; +} cudaGetTextureReference_v3020_params; + +typedef struct cudaBindSurfaceToArray_v3020_params_st { + const struct surfaceReference *surfref; + cudaArray_const_t array; + const struct cudaChannelFormatDesc *desc; +} cudaBindSurfaceToArray_v3020_params; + +typedef struct cudaGetSurfaceReference_v3020_params_st { + const struct surfaceReference **surfref; + const void *symbol; +} cudaGetSurfaceReference_v3020_params; + +typedef struct cudaGraphInstantiate_v10000_params_st { + cudaGraphExec_t *pGraphExec; + cudaGraph_t graph; + cudaGraphNode_t *pErrorNode; + char *pLogBuffer; + size_t bufferSize; +} cudaGraphInstantiate_v10000_params; + +// Parameter trace structures for removed functions + + +// End of parameter trace structures diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_nvtx_meta.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_nvtx_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..ed8877e21f0651fe1564151090850694eb495cfb --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/generated_nvtx_meta.h @@ -0,0 +1,247 @@ +/* + * Copyright 2013-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility push(default) +#endif + +// ************************************************************************* +// Definitions of structs to hold parameters for each function +// ************************************************************************* + +typedef struct nvtxMarkEx_params_st { + const nvtxEventAttributes_t* eventAttrib; +} nvtxMarkEx_params; + +typedef struct nvtxMarkA_params_st { + const char* message; +} nvtxMarkA_params; + +typedef struct nvtxMarkW_params_st { + const wchar_t* message; +} nvtxMarkW_params; + +typedef struct nvtxRangeStartEx_params_st { + const nvtxEventAttributes_t* eventAttrib; +} nvtxRangeStartEx_params; + +typedef struct nvtxRangeStartA_params_st { + const char* message; +} nvtxRangeStartA_params; + +typedef struct nvtxRangeStartW_params_st { + const wchar_t* message; +} nvtxRangeStartW_params; + +typedef struct nvtxRangeEnd_params_st { + nvtxRangeId_t id; +} nvtxRangeEnd_params; + +typedef struct nvtxRangePushEx_params_st { + const nvtxEventAttributes_t* eventAttrib; +} nvtxRangePushEx_params; + +typedef struct nvtxRangePushA_params_st { + const char* message; +} nvtxRangePushA_params; + +typedef struct nvtxRangePushW_params_st { + const wchar_t* message; +} nvtxRangePushW_params; + +typedef struct nvtxRangePop_params_st { + /* WAR: Windows compiler doesn't allow empty structs */ + /* This field shouldn't be used */ + void *dummy; +} nvtxRangePop_params; + +typedef struct nvtxNameCategoryA_params_st { + uint32_t category; + const char* name; +} nvtxNameCategoryA_params; + +typedef struct nvtxNameCategoryW_params_st { + uint32_t category; + const wchar_t* name; +} nvtxNameCategoryW_params; + +typedef struct nvtxNameOsThreadA_params_st { + uint32_t threadId; + const char* name; +} nvtxNameOsThreadA_params; + +typedef struct nvtxNameOsThreadW_params_st { + uint32_t threadId; + const wchar_t* name; +} nvtxNameOsThreadW_params; + +typedef struct nvtxNameCuDeviceA_params_st { + CUdevice device; + const char* name; +} nvtxNameCuDeviceA_params; + +typedef struct nvtxNameCuDeviceW_params_st { + CUdevice device; + const wchar_t* name; +} nvtxNameCuDeviceW_params; + +typedef struct nvtxNameCuContextA_params_st { + CUcontext context; + const char* name; +} nvtxNameCuContextA_params; + +typedef struct nvtxNameCuContextW_params_st { + CUcontext context; + const wchar_t* name; +} nvtxNameCuContextW_params; + +typedef struct nvtxNameCuStreamA_params_st { + CUstream stream; + const char* name; +} nvtxNameCuStreamA_params; + +typedef struct nvtxNameCuStreamW_params_st { + CUstream stream; + const wchar_t* name; +} nvtxNameCuStreamW_params; + +typedef struct nvtxNameCuEventA_params_st { + CUevent event; + const char* name; +} nvtxNameCuEventA_params; + +typedef struct nvtxNameCuEventW_params_st { + CUevent event; + const wchar_t* name; +} nvtxNameCuEventW_params; + +typedef struct nvtxNameCudaDeviceA_params_st { + int device; + const char* name; +} nvtxNameCudaDeviceA_params; + +typedef struct nvtxNameCudaDeviceW_params_st { + int device; + const wchar_t* name; +} nvtxNameCudaDeviceW_params; + +typedef struct nvtxNameCudaStreamA_params_st { + cudaStream_t stream; + const char* name; +} nvtxNameCudaStreamA_params; + +typedef struct nvtxNameCudaStreamW_params_st { + cudaStream_t stream; + const wchar_t* name; +} nvtxNameCudaStreamW_params; + +typedef struct nvtxNameCudaEventA_params_st { + cudaEvent_t event; + const char* name; +} nvtxNameCudaEventA_params; + +typedef struct nvtxNameCudaEventW_params_st { + cudaEvent_t event; + const wchar_t* name; +} nvtxNameCudaEventW_params; + +typedef struct nvtxDomainCreateA_params_st { + const char* name; +} nvtxDomainCreateA_params; + +typedef struct nvtxDomainDestroy_params_st { + nvtxDomainHandle_t domain; +} nvtxDomainDestroy_params; + +typedef struct nvtxDomainMarkEx_params_st { + nvtxDomainHandle_t domain; + nvtxMarkEx_params core; +} nvtxDomainMarkEx_params; + +typedef struct nvtxDomainRangeStartEx_params_st { + nvtxDomainHandle_t domain; + nvtxRangeStartEx_params core; +} nvtxDomainRangeStartEx_params; + +typedef struct nvtxDomainRangeEnd_params_st { + nvtxDomainHandle_t domain; + nvtxRangeEnd_params core; +} nvtxDomainRangeEnd_params; + +typedef struct nvtxDomainRangePushEx_params_st { + nvtxDomainHandle_t domain; + nvtxRangePushEx_params core; +} nvtxDomainRangePushEx_params; + +typedef struct nvtxDomainRangePop_params_st { + nvtxDomainHandle_t domain; +} nvtxDomainRangePop_params; + +typedef struct nvtxSyncUserCreate_params_st { + nvtxDomainHandle_t domain; + const nvtxSyncUserAttributes_t* attribs; +} nvtxSyncUserCreate_params; + +typedef struct nvtxSyncUserCommon_params_st { + nvtxSyncUser_t handle; +} nvtxSyncUserCommon_params; + +typedef struct nvtxDomainRegisterStringA_params_st { + nvtxDomainHandle_t domain; + const char* string; +} nvtxDomainRegisterStringA_params; + +typedef struct nvtxDomainRegisterStringW_params_st { + nvtxDomainHandle_t domain; + const char* string; +} nvtxDomainRegisterStringW_params; + +#if defined(__GNUC__) && defined(CUPTI_LIB) + #pragma GCC visibility pop +#endif diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/nvperf_common.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/nvperf_common.h new file mode 100644 index 0000000000000000000000000000000000000000..0ed01f7bc2851f43678e58efe34fc5579cca3a35 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/nvperf_common.h @@ -0,0 +1,393 @@ +#ifndef NVPERF_COMMON_H +#define NVPERF_COMMON_H + +/* + * Copyright 2014-2024 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO USER: + * + * This source code is subject to NVIDIA ownership rights under U.S. and + * international Copyright laws. + * + * This software and the information contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions + * of a form of NVIDIA software license agreement. + * + * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE + * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR + * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH + * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, + * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS + * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE + * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE + * OR PERFORMANCE OF THIS SOURCE CODE. + * + * U.S. Government End Users. This source code is a "commercial item" as + * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of + * "commercial computer software" and "commercial computer software + * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) + * and is provided to the U.S. Government only as a commercial end item. + * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through + * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the + * source code with only those rights set forth herein. + * + * Any use of this source code in individual and commercial software must + * include, in the user documentation and internal comments to the code, + * the above Disclaimer and U.S. Government End Users Notice. + */ + +#include +#include + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility push(default) + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL __attribute__ ((visibility ("hidden"))) + #endif +#else + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @file nvperf_common.h + */ + +#ifndef NVPERF_NVPA_STATUS_DEFINED +#define NVPERF_NVPA_STATUS_DEFINED + + /// Error codes. + typedef enum NVPA_Status + { + /// Success + NVPA_STATUS_SUCCESS = 0, + /// Generic error. + NVPA_STATUS_ERROR = 1, + /// Internal error. Please file a bug! + NVPA_STATUS_INTERNAL_ERROR = 2, + /// NVPW_InitializeTarget() or NVPW_InitializeHost() has not been called yet. + NVPA_STATUS_NOT_INITIALIZED = 3, + /// The NvPerf DLL/DSO could not be loaded during NVPW_Initialize*(). Please ensure they are placed in the + /// appropriate location that can be founder by a dynamic linker. And on Linux systems, confirm that the + /// LD_LIBRARY_PATH environment variable is set correctly. Alternatively, you may utilize + /// NVPW_SetLibraryLoadPaths() to define additional library search paths. + NVPA_STATUS_NOT_LOADED = 4, + /// The function was not found in this version of the NvPerf DLL/DSO. Or if you are directly calling + /// NVPA_GetProcAddress(), please ensure the function name is spelled correctly. + NVPA_STATUS_FUNCTION_NOT_FOUND = 5, + /// The request was intentionally not supported. + NVPA_STATUS_NOT_SUPPORTED = 6, + /// The request was not implemented by this version. + NVPA_STATUS_NOT_IMPLEMENTED = 7, + /// Invalid argument. + NVPA_STATUS_INVALID_ARGUMENT = 8, + /// UNUSED + NVPA_STATUS_INVALID_METRIC_ID = 9, + /// No driver has been loaded via NVPW_*_LoadDriver(). + NVPA_STATUS_DRIVER_NOT_LOADED = 10, + /// Failed memory allocation. + NVPA_STATUS_OUT_OF_MEMORY = 11, + /// UNUSED + NVPA_STATUS_INVALID_THREAD_STATE = 12, + /// UNUSED + NVPA_STATUS_FAILED_CONTEXT_ALLOC = 13, + /// The specified GPU is not supported. It is recommended to call IsGpuSupported() for more information + NVPA_STATUS_UNSUPPORTED_GPU = 14, + /// The installed NVIDIA driver is too old. + NVPA_STATUS_INSUFFICIENT_DRIVER_VERSION = 15, + /// UNUSED + NVPA_STATUS_OBJECT_NOT_REGISTERED = 16, + /// Profiling permission not granted; see https://developer.nvidia.com/nvidia-development-tools-solutions- + /// ERR_NVGPUCTRPERM-permission-issue-performance-counters + NVPA_STATUS_INSUFFICIENT_PRIVILEGE = 17, + /// UNUSED + NVPA_STATUS_INVALID_CONTEXT_STATE = 18, + /// UNUSED + NVPA_STATUS_INVALID_OBJECT_STATE = 19, + /// The request could not be fulfilled because a system resource is already in use. + NVPA_STATUS_RESOURCE_UNAVAILABLE = 20, + /// UNUSED + NVPA_STATUS_DRIVER_LOADED_TOO_LATE = 21, + /// The provided buffer is not large enough. + NVPA_STATUS_INSUFFICIENT_SPACE = 22, + /// UNUSED + NVPA_STATUS_OBJECT_MISMATCH = 23, + /// Virtualized GPU (vGPU) is not supported. + NVPA_STATUS_VIRTUALIZED_DEVICE_NOT_SUPPORTED = 24, + /// Profiling permission was not granted or the device was disabled. + NVPA_STATUS_PROFILING_NOT_ALLOWED = 25, + NVPA_STATUS__COUNT + } NVPA_Status; + + + inline void NVPW_NVPAStatusToString(NVPA_Status status, const char** ppStatusStr, const char** ppCommentStr) + { + switch (status) + { + case NVPA_STATUS_SUCCESS: + *ppStatusStr = "NVPA_STATUS_SUCCESS"; + *ppCommentStr = "Success"; + return; + case NVPA_STATUS_ERROR: + *ppStatusStr = "NVPA_STATUS_ERROR"; + *ppCommentStr = "Generic error."; + return; + case NVPA_STATUS_INTERNAL_ERROR: + *ppStatusStr = "NVPA_STATUS_INTERNAL_ERROR"; + *ppCommentStr = "Internal error. Please file a bug!"; + return; + case NVPA_STATUS_NOT_INITIALIZED: + *ppStatusStr = "NVPA_STATUS_NOT_INITIALIZED"; + *ppCommentStr = "NVPW_InitializeTarget() or NVPW_InitializeHost() has not been called yet."; + return; + case NVPA_STATUS_NOT_LOADED: + *ppStatusStr = "NVPA_STATUS_NOT_LOADED"; + *ppCommentStr = "The NvPerf DLL/DSO could not be loaded during NVPW_Initialize*(). Please ensure they are placed in the appropriate location that can be founder by a dynamic linker. And on Linux systems, confirm that the LD_LIBRARY_PATH environment variable is set correctly. Alternatively, you may utilize NVPW_SetLibraryLoadPaths() to define additional library search paths."; + return; + case NVPA_STATUS_FUNCTION_NOT_FOUND: + *ppStatusStr = "NVPA_STATUS_FUNCTION_NOT_FOUND"; + *ppCommentStr = "The function was not found in this version of the NvPerf DLL/DSO. Or if you are directly calling NVPA_GetProcAddress(), please ensure the function name is spelled correctly."; + return; + case NVPA_STATUS_NOT_SUPPORTED: + *ppStatusStr = "NVPA_STATUS_NOT_SUPPORTED"; + *ppCommentStr = "The request was intentionally not supported."; + return; + case NVPA_STATUS_NOT_IMPLEMENTED: + *ppStatusStr = "NVPA_STATUS_NOT_IMPLEMENTED"; + *ppCommentStr = "The request was not implemented by this version."; + return; + case NVPA_STATUS_INVALID_ARGUMENT: + *ppStatusStr = "NVPA_STATUS_INVALID_ARGUMENT"; + *ppCommentStr = "Invalid argument."; + return; + case NVPA_STATUS_INVALID_METRIC_ID: + *ppStatusStr = "NVPA_STATUS_INVALID_METRIC_ID"; + *ppCommentStr = "UNUSED"; + return; + case NVPA_STATUS_DRIVER_NOT_LOADED: + *ppStatusStr = "NVPA_STATUS_DRIVER_NOT_LOADED"; + *ppCommentStr = "No driver has been loaded via NVPW_*_LoadDriver()."; + return; + case NVPA_STATUS_OUT_OF_MEMORY: + *ppStatusStr = "NVPA_STATUS_OUT_OF_MEMORY"; + *ppCommentStr = "Failed memory allocation."; + return; + case NVPA_STATUS_INVALID_THREAD_STATE: + *ppStatusStr = "NVPA_STATUS_INVALID_THREAD_STATE"; + *ppCommentStr = "UNUSED"; + return; + case NVPA_STATUS_FAILED_CONTEXT_ALLOC: + *ppStatusStr = "NVPA_STATUS_FAILED_CONTEXT_ALLOC"; + *ppCommentStr = "UNUSED"; + return; + case NVPA_STATUS_UNSUPPORTED_GPU: + *ppStatusStr = "NVPA_STATUS_UNSUPPORTED_GPU"; + *ppCommentStr = "The specified GPU is not supported. It is recommended to call IsGpuSupported() for more information"; + return; + case NVPA_STATUS_INSUFFICIENT_DRIVER_VERSION: + *ppStatusStr = "NVPA_STATUS_INSUFFICIENT_DRIVER_VERSION"; + *ppCommentStr = "The installed NVIDIA driver is too old."; + return; + case NVPA_STATUS_OBJECT_NOT_REGISTERED: + *ppStatusStr = "NVPA_STATUS_OBJECT_NOT_REGISTERED"; + *ppCommentStr = "UNUSED"; + return; + case NVPA_STATUS_INSUFFICIENT_PRIVILEGE: + *ppStatusStr = "NVPA_STATUS_INSUFFICIENT_PRIVILEGE"; + *ppCommentStr = "Profiling permission not granted; see https://developer.nvidia.com/nvidia-development-tools-solutions-ERR_NVGPUCTRPERM-permission-issue-performance-counters"; + return; + case NVPA_STATUS_INVALID_CONTEXT_STATE: + *ppStatusStr = "NVPA_STATUS_INVALID_CONTEXT_STATE"; + *ppCommentStr = "UNUSED"; + return; + case NVPA_STATUS_INVALID_OBJECT_STATE: + *ppStatusStr = "NVPA_STATUS_INVALID_OBJECT_STATE"; + *ppCommentStr = "UNUSED"; + return; + case NVPA_STATUS_RESOURCE_UNAVAILABLE: + *ppStatusStr = "NVPA_STATUS_RESOURCE_UNAVAILABLE"; + *ppCommentStr = "The request could not be fulfilled because a system resource is already in use."; + return; + case NVPA_STATUS_DRIVER_LOADED_TOO_LATE: + *ppStatusStr = "NVPA_STATUS_DRIVER_LOADED_TOO_LATE"; + *ppCommentStr = "UNUSED"; + return; + case NVPA_STATUS_INSUFFICIENT_SPACE: + *ppStatusStr = "NVPA_STATUS_INSUFFICIENT_SPACE"; + *ppCommentStr = "The provided buffer is not large enough."; + return; + case NVPA_STATUS_OBJECT_MISMATCH: + *ppStatusStr = "NVPA_STATUS_OBJECT_MISMATCH"; + *ppCommentStr = "UNUSED"; + return; + case NVPA_STATUS_VIRTUALIZED_DEVICE_NOT_SUPPORTED: + *ppStatusStr = "NVPA_STATUS_VIRTUALIZED_DEVICE_NOT_SUPPORTED"; + *ppCommentStr = "Virtualized GPU (vGPU) is not supported."; + return; + case NVPA_STATUS_PROFILING_NOT_ALLOWED: + *ppStatusStr = "NVPA_STATUS_PROFILING_NOT_ALLOWED"; + *ppCommentStr = "Profiling permission was not granted or the device was disabled."; + return; + default: + *ppStatusStr = "Unrecognized status"; + *ppCommentStr = "This status is unrecognized. Is it coming from a newer version of NvPerf library?"; + return; + } + } + + +#endif // NVPERF_NVPA_STATUS_DEFINED + + +#ifndef NVPERF_NVPA_ACTIVITY_KIND_DEFINED +#define NVPERF_NVPA_ACTIVITY_KIND_DEFINED + + /// The configuration's activity-kind dictates which types of data may be collected. + typedef enum NVPA_ActivityKind + { + /// Invalid value. + NVPA_ACTIVITY_KIND_INVALID = 0, + /// A workload-centric activity for serialized and pipelined collection. + /// + /// Profiler is capable of collecting both serialized and pipelined metrics. The library introduces any + /// synchronization required to collect serialized metrics. + NVPA_ACTIVITY_KIND_PROFILER, + /// A realtime activity for sampling counters from the CPU or GPU. + NVPA_ACTIVITY_KIND_REALTIME_SAMPLED, + /// A realtime activity for profiling counters from the CPU or GPU without CPU/GPU synchronizations. + NVPA_ACTIVITY_KIND_REALTIME_PROFILER, + NVPA_ACTIVITY_KIND__COUNT + } NVPA_ActivityKind; + + +#endif // NVPERF_NVPA_ACTIVITY_KIND_DEFINED + + +#ifndef NVPERF_NVPA_BOOL_DEFINED +#define NVPERF_NVPA_BOOL_DEFINED + /// The type used for boolean values. + typedef uint8_t NVPA_Bool; +#endif // NVPERF_NVPA_BOOL_DEFINED + +#ifndef NVPA_STRUCT_SIZE +#define NVPA_STRUCT_SIZE(type_, lastfield_) (offsetof(type_, lastfield_) + sizeof(((type_*)0)->lastfield_)) +#endif // NVPA_STRUCT_SIZE + +#ifndef NVPW_FIELD_EXISTS +#define NVPW_FIELD_EXISTS(pParams_, name_) \ + ((pParams_)->structSize >= (size_t)((const uint8_t*)(&(pParams_)->name_) + sizeof(pParams_)->name_ - (const uint8_t*)(pParams_))) +#endif // NVPW_FIELD_EXISTS + + +#ifndef NVPERF_NVPA_GETPROCADDRESS_DEFINED +#define NVPERF_NVPA_GETPROCADDRESS_DEFINED + +typedef NVPA_Status (*NVPA_GenericFn)(void); + + + /// + /// Gets the address of an NvPerf API function. + /// + /// \return A function pointer to the function, or NULL if the function is not available. + /// + /// \param pFunctionName [in] Name of the function to retrieve. + NVPA_GenericFn NVPA_GetProcAddress(const char* pFunctionName); + +#endif + +#ifndef NVPERF_NVPW_SETLIBRARYLOADPATHS_DEFINED +#define NVPERF_NVPW_SETLIBRARYLOADPATHS_DEFINED + + + typedef struct NVPW_SetLibraryLoadPaths_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] number of paths in ppPaths + size_t numPaths; + /// [in] array of null-terminated paths + const char** ppPaths; + } NVPW_SetLibraryLoadPaths_Params; +#define NVPW_SetLibraryLoadPaths_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_SetLibraryLoadPaths_Params, ppPaths) + + /// Sets library search path for \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget(). + /// \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget load the NvPerf DLL/DSO. This function sets + /// ordered paths that will be searched with the LoadLibrary() or dlopen() call. + /// If load paths are set by this function, the default set of load paths + /// will not be attempted. + /// Each path must point at a directory (not a file name). + /// This function is not thread-safe. + /// Example Usage: + /// \code + /// const char* paths[] = { + /// "path1", "path2", etc + /// }; + /// NVPW_SetLibraryLoadPaths_Params params{NVPW_SetLibraryLoadPaths_Params_STRUCT_SIZE}; + /// params.numPaths = sizeof(paths)/sizeof(paths[0]); + /// params.ppPaths = paths; + /// NVPW_SetLibraryLoadPaths(¶ms); + /// NVPW_InitializeHost(); + /// params.numPaths = 0; + /// params.ppPaths = NULL; + /// NVPW_SetLibraryLoadPaths(¶ms); + /// \endcode + NVPA_Status NVPW_SetLibraryLoadPaths(NVPW_SetLibraryLoadPaths_Params* pParams); + + typedef struct NVPW_SetLibraryLoadPathsW_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] number of paths in ppwPaths + size_t numPaths; + /// [in] array of null-terminated paths + const wchar_t** ppwPaths; + } NVPW_SetLibraryLoadPathsW_Params; +#define NVPW_SetLibraryLoadPathsW_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_SetLibraryLoadPathsW_Params, ppwPaths) + + /// Sets library search path for \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget(). + /// \ref NVPW_InitializeHost() and \ref NVPW_InitializeTarget load the NvPerf DLL/DSO. This function sets + /// ordered paths that will be searched with the LoadLibrary() or dlopen() call. + /// If load paths are set by this function, the default set of load paths + /// will not be attempted. + /// Each path must point at a directory (not a file name). + /// This function is not thread-safe. + /// Example Usage: + /// \code + /// const wchar_t* wpaths[] = { + /// L"path1", L"path2", etc + /// }; + /// NVPW_SetLibraryLoadPathsW_Params params{NVPW_SetLibraryLoadPathsW_Params_STRUCT_SIZE}; + /// params.numPaths = sizeof(wpaths)/sizeof(wpaths[0]); + /// params.ppwPaths = wpaths; + /// NVPW_SetLibraryLoadPathsW(¶ms); + /// NVPW_InitializeHost(); + /// params.numPaths = 0; + /// params.ppwPaths = NULL; + /// NVPW_SetLibraryLoadPathsW(¶ms); + /// \endcode + NVPA_Status NVPW_SetLibraryLoadPathsW(NVPW_SetLibraryLoadPathsW_Params* pParams); + +#endif + + + +#ifdef __cplusplus +} // extern "C" +#endif + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility pop +#endif + +#endif // NVPERF_COMMON_H diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/nvperf_cuda_host.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/nvperf_cuda_host.h new file mode 100644 index 0000000000000000000000000000000000000000..9b4533b25148b7cd28e0ed30be022893514415a5 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/nvperf_cuda_host.h @@ -0,0 +1,179 @@ +#ifndef NVPERF_CUDA_HOST_H +#define NVPERF_CUDA_HOST_H + +/* + * Copyright 2014-2024 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO USER: + * + * This source code is subject to NVIDIA ownership rights under U.S. and + * international Copyright laws. + * + * This software and the information contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions + * of a form of NVIDIA software license agreement. + * + * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE + * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR + * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH + * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, + * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS + * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE + * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE + * OR PERFORMANCE OF THIS SOURCE CODE. + * + * U.S. Government End Users. This source code is a "commercial item" as + * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of + * "commercial computer software" and "commercial computer software + * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) + * and is provided to the U.S. Government only as a commercial end item. + * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through + * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the + * source code with only those rights set forth herein. + * + * Any use of this source code in individual and commercial software must + * include, in the user documentation and internal comments to the code, + * the above Disclaimer and U.S. Government End Users Notice. + */ + +#include +#include +#include "nvperf_common.h" +#include "nvperf_host.h" + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility push(default) + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL __attribute__ ((visibility ("hidden"))) + #endif +#else + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @file nvperf_cuda_host.h + */ + + typedef struct NVPW_CUDA_RawMetricsConfig_Create_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + NVPA_ActivityKind activityKind; + /// [in] + const char* pChipName; + /// [out] new NVPA_RawMetricsConfig object + struct NVPA_RawMetricsConfig* pRawMetricsConfig; + } NVPW_CUDA_RawMetricsConfig_Create_Params; +#define NVPW_CUDA_RawMetricsConfig_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_RawMetricsConfig_Create_Params, pRawMetricsConfig) + + NVPA_Status NVPW_CUDA_RawMetricsConfig_Create(NVPW_CUDA_RawMetricsConfig_Create_Params* pParams); + + typedef struct NVPW_CUDA_RawMetricsConfig_Create_V2_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + NVPA_ActivityKind activityKind; + /// [in] accepted for chips supported at the time-of-release. + const char* pChipName; + /// [in] buffer with counter availability image - required for future chip support + const uint8_t* pCounterAvailabilityImage; + /// [out] new NVPA_RawMetricsConfig object + struct NVPA_RawMetricsConfig* pRawMetricsConfig; + } NVPW_CUDA_RawMetricsConfig_Create_V2_Params; +#define NVPW_CUDA_RawMetricsConfig_Create_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_RawMetricsConfig_Create_V2_Params, pRawMetricsConfig) + + /// Use either 'pChipName' or 'pCounterAvailabilityImage'. + NVPA_Status NVPW_CUDA_RawMetricsConfig_Create_V2(NVPW_CUDA_RawMetricsConfig_Create_V2_Params* pParams); + + typedef struct NVPW_CUDA_CounterDataBuilder_Create_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] accepted for chips supported at the time-of-release. + const char* pChipName; + /// [in] buffer with counter availability image - required for future chip support + const uint8_t* pCounterAvailabilityImage; + /// [out] new NVPA_CounterDataBuilder object + struct NVPA_CounterDataBuilder* pCounterDataBuilder; + } NVPW_CUDA_CounterDataBuilder_Create_Params; +#define NVPW_CUDA_CounterDataBuilder_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_CounterDataBuilder_Create_Params, pCounterDataBuilder) + + /// Use either 'pChipName' or 'pCounterAvailabilityImage'. + NVPA_Status NVPW_CUDA_CounterDataBuilder_Create(NVPW_CUDA_CounterDataBuilder_Create_Params* pParams); + + typedef struct NVPW_MetricsEvaluator NVPW_MetricsEvaluator; + + typedef struct NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] accepted for chips supported at the time-of-release. + const char* pChipName; + /// [in] buffer with counter availability image - required for future chip support + const uint8_t* pCounterAvailabilityImage; + /// [out] + size_t scratchBufferSize; + } NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params; +#define NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params, scratchBufferSize) + + /// Use either 'pChipName' or 'pCounterAvailabilityImage'. + NVPA_Status NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize(NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize_Params* pParams); + + typedef struct NVPW_CUDA_MetricsEvaluator_Initialize_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + uint8_t* pScratchBuffer; + /// [in] the size of the 'pScratchBuffer' array, should be at least the size of the 'scratchBufferSize' returned + /// by 'NVPW_CUDA_MetricsEvaluator_CalculateScratchBufferSize' + size_t scratchBufferSize; + /// [in] accepted for chips supported at the time-of-release. + const char* pChipName; + /// [in] buffer with counter availability image - required for future chip support + const uint8_t* pCounterAvailabilityImage; + /// [in] + const uint8_t* pCounterDataImage; + /// [in] must be provided if 'pCounterDataImage' is not NULL + size_t counterDataImageSize; + /// [out] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + } NVPW_CUDA_MetricsEvaluator_Initialize_Params; +#define NVPW_CUDA_MetricsEvaluator_Initialize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CUDA_MetricsEvaluator_Initialize_Params, pMetricsEvaluator) + + /// Use one of 'pChipName', 'pCounterAvailabilityImage', or 'pCounterDataImage'. 'pChipName' or + /// 'pCounterAvailabilityImage' will create a metrics evaluator based on a virtual device while 'pCounterDataImage' + /// will create a metrics evaluator based on the actual device. + NVPA_Status NVPW_CUDA_MetricsEvaluator_Initialize(NVPW_CUDA_MetricsEvaluator_Initialize_Params* pParams); + + + +#ifdef __cplusplus +} // extern "C" +#endif + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility pop +#endif + +#endif // NVPERF_CUDA_HOST_H diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/nvperf_host.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/nvperf_host.h new file mode 100644 index 0000000000000000000000000000000000000000..824fda055ebe65f5cf66b71d096f2ecdfa4b5b92 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/nvperf_host.h @@ -0,0 +1,1152 @@ +#ifndef NVPERF_HOST_H +#define NVPERF_HOST_H + +/* + * Copyright 2014-2024 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO USER: + * + * This source code is subject to NVIDIA ownership rights under U.S. and + * international Copyright laws. + * + * This software and the information contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions + * of a form of NVIDIA software license agreement. + * + * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE + * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR + * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH + * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, + * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS + * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE + * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE + * OR PERFORMANCE OF THIS SOURCE CODE. + * + * U.S. Government End Users. This source code is a "commercial item" as + * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of + * "commercial computer software" and "commercial computer software + * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) + * and is provided to the U.S. Government only as a commercial end item. + * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through + * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the + * source code with only those rights set forth herein. + * + * Any use of this source code in individual and commercial software must + * include, in the user documentation and internal comments to the code, + * the above Disclaimer and U.S. Government End Users Notice. + */ + +#include +#include +#include "nvperf_common.h" + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility push(default) + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL __attribute__ ((visibility ("hidden"))) + #endif +#else + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @file nvperf_host.h + */ + + +// Guard against multiple definition of NvPerf host types +#ifndef NVPERF_HOST_API_DEFINED +#define NVPERF_HOST_API_DEFINED + + +/***************************************************************************//** + * @name Host Configuration + * @{ + */ + + typedef struct NVPW_InitializeHost_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + } NVPW_InitializeHost_Params; +#define NVPW_InitializeHost_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_InitializeHost_Params, pPriv) + + /// Load the host library. + NVPA_Status NVPW_InitializeHost(NVPW_InitializeHost_Params* pParams); + + typedef struct NVPW_CounterData_CalculateCounterDataImageCopySize_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// The CounterDataPrefix generated from e.g. nvperf2 initdata or + /// NVPW_CounterDataBuilder_GetCounterDataPrefix(). Must be align(8). + const uint8_t* pCounterDataPrefix; + size_t counterDataPrefixSize; + /// max number of ranges that can be profiled + uint32_t maxNumRanges; + /// max number of RangeTree nodes; must be >= maxNumRanges + uint32_t maxNumRangeTreeNodes; + /// max string length of each RangeName, including the trailing NUL character + uint32_t maxRangeNameLength; + const uint8_t* pCounterDataSrc; + /// [out] required size of the copy buffer + size_t copyDataImageCounterSize; + } NVPW_CounterData_CalculateCounterDataImageCopySize_Params; +#define NVPW_CounterData_CalculateCounterDataImageCopySize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_CalculateCounterDataImageCopySize_Params, copyDataImageCounterSize) + + NVPA_Status NVPW_CounterData_CalculateCounterDataImageCopySize(NVPW_CounterData_CalculateCounterDataImageCopySize_Params* pParams); + + typedef struct NVPW_CounterData_InitializeCounterDataImageCopy_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// The CounterDataPrefix generated from e.g. nvperf2 initdata or + /// NVPW_CounterDataBuilder_GetCounterDataPrefix(). Must be align(8). + const uint8_t* pCounterDataPrefix; + size_t counterDataPrefixSize; + /// max number of ranges that can be profiled + uint32_t maxNumRanges; + /// max number of RangeTree nodes; must be >= maxNumRanges + uint32_t maxNumRangeTreeNodes; + /// max string length of each RangeName, including the trailing NUL character + uint32_t maxRangeNameLength; + const uint8_t* pCounterDataSrc; + uint8_t* pCounterDataDst; + } NVPW_CounterData_InitializeCounterDataImageCopy_Params; +#define NVPW_CounterData_InitializeCounterDataImageCopy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_InitializeCounterDataImageCopy_Params, pCounterDataDst) + + NVPA_Status NVPW_CounterData_InitializeCounterDataImageCopy(NVPW_CounterData_InitializeCounterDataImageCopy_Params* pParams); + + typedef struct NVPW_CounterData_ExtractCounterDataPrefix_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// The source buffer to extract the prefix from. + const uint8_t* pCounterDataSrc; + size_t counterDataSrcSize; + /// [in] If not NULL, the prefix will be copied into this buffer. + uint8_t* pCounterDataPrefix; + /// [inout] if 'pCounterDataPrefix' is NULL, size of counter data prefix will be returned; otherwise it should + /// be set to the size of buffer allocated for 'pCounterDataPrefix'. + size_t counterDataPrefixSize; + } NVPW_CounterData_ExtractCounterDataPrefix_Params; +#define NVPW_CounterData_ExtractCounterDataPrefix_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_ExtractCounterDataPrefix_Params, counterDataPrefixSize) + + NVPA_Status NVPW_CounterData_ExtractCounterDataPrefix(NVPW_CounterData_ExtractCounterDataPrefix_Params* pParams); + + typedef struct NVPA_CounterDataCombiner NVPA_CounterDataCombiner; + + typedef struct NVPW_CounterDataCombiner_Create_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// The destination counter data into which the source datas will be combined + uint8_t* pCounterDataDst; + /// [out] The created counter data combiner + NVPA_CounterDataCombiner* pCounterDataCombiner; + } NVPW_CounterDataCombiner_Create_Params; +#define NVPW_CounterDataCombiner_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_Create_Params, pCounterDataCombiner) + + NVPA_Status NVPW_CounterDataCombiner_Create(NVPW_CounterDataCombiner_Create_Params* pParams); + + typedef struct NVPW_CounterDataCombiner_Destroy_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataCombiner* pCounterDataCombiner; + } NVPW_CounterDataCombiner_Destroy_Params; +#define NVPW_CounterDataCombiner_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_Destroy_Params, pCounterDataCombiner) + + NVPA_Status NVPW_CounterDataCombiner_Destroy(NVPW_CounterDataCombiner_Destroy_Params* pParams); + + typedef struct NVPW_CounterDataCombiner_CreateRange_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataCombiner* pCounterDataCombiner; + size_t numDescriptions; + const char* const* ppDescriptions; + /// [out] + size_t rangeIndexDst; + } NVPW_CounterDataCombiner_CreateRange_Params; +#define NVPW_CounterDataCombiner_CreateRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_CreateRange_Params, rangeIndexDst) + + NVPA_Status NVPW_CounterDataCombiner_CreateRange(NVPW_CounterDataCombiner_CreateRange_Params* pParams); + + typedef struct NVPW_CounterDataCombiner_CopyIntoRange_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + NVPA_CounterDataCombiner* pCounterDataCombiner; + /// [in] + size_t rangeIndexDst; + /// [in] + const uint8_t* pCounterDataSrc; + /// [in] + size_t rangeIndexSrc; + } NVPW_CounterDataCombiner_CopyIntoRange_Params; +#define NVPW_CounterDataCombiner_CopyIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_CopyIntoRange_Params, rangeIndexSrc) + + /// In order to use this API, the source counter data and the destination counter data must have identical counters + NVPA_Status NVPW_CounterDataCombiner_CopyIntoRange(NVPW_CounterDataCombiner_CopyIntoRange_Params* pParams); + + typedef struct NVPW_CounterDataCombiner_AccumulateIntoRange_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataCombiner* pCounterDataCombiner; + size_t rangeIndexDst; + uint32_t dstMultiplier; + const uint8_t* pCounterDataSrc; + size_t rangeIndexSrc; + uint32_t srcMultiplier; + } NVPW_CounterDataCombiner_AccumulateIntoRange_Params; +#define NVPW_CounterDataCombiner_AccumulateIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_AccumulateIntoRange_Params, srcMultiplier) + + NVPA_Status NVPW_CounterDataCombiner_AccumulateIntoRange(NVPW_CounterDataCombiner_AccumulateIntoRange_Params* pParams); + + typedef struct NVPW_CounterDataCombiner_SumIntoRange_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataCombiner* pCounterDataCombiner; + size_t rangeIndexDst; + const uint8_t* pCounterDataSrc; + size_t rangeIndexSrc; + } NVPW_CounterDataCombiner_SumIntoRange_Params; +#define NVPW_CounterDataCombiner_SumIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_SumIntoRange_Params, rangeIndexSrc) + + NVPA_Status NVPW_CounterDataCombiner_SumIntoRange(NVPW_CounterDataCombiner_SumIntoRange_Params* pParams); + + typedef struct NVPW_CounterDataCombiner_WeightedSumIntoRange_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataCombiner* pCounterDataCombiner; + size_t rangeIndexDst; + double dstMultiplier; + const uint8_t* pCounterDataSrc; + size_t rangeIndexSrc; + double srcMultiplier; + } NVPW_CounterDataCombiner_WeightedSumIntoRange_Params; +#define NVPW_CounterDataCombiner_WeightedSumIntoRange_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataCombiner_WeightedSumIntoRange_Params, srcMultiplier) + + NVPA_Status NVPW_CounterDataCombiner_WeightedSumIntoRange(NVPW_CounterDataCombiner_WeightedSumIntoRange_Params* pParams); + +/** + * @} + ******************************************************************************/ + +/***************************************************************************//** + * @name Metrics Configuration + * @{ + */ + + typedef struct NVPA_RawMetricsConfig NVPA_RawMetricsConfig; + + typedef struct NVPA_RawMetricRequest + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// in + const char* pMetricName; + /// in + NVPA_Bool isolated; + /// in; ignored by AddMetric but observed by CounterData initialization + NVPA_Bool keepInstances; + } NVPA_RawMetricRequest; +#define NVPA_RAW_METRIC_REQUEST_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPA_RawMetricRequest, keepInstances) + + typedef struct NVPW_GetSupportedChipNames_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [out] + const char* const* ppChipNames; + /// [out] + size_t numChipNames; + } NVPW_GetSupportedChipNames_Params; +#define NVPW_GetSupportedChipNames_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_GetSupportedChipNames_Params, numChipNames) + + NVPA_Status NVPW_GetSupportedChipNames(NVPW_GetSupportedChipNames_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_Destroy_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_RawMetricsConfig* pRawMetricsConfig; + } NVPW_RawMetricsConfig_Destroy_Params; +#define NVPW_RawMetricsConfig_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_Destroy_Params, pRawMetricsConfig) + + NVPA_Status NVPW_RawMetricsConfig_Destroy(NVPW_RawMetricsConfig_Destroy_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_SetCounterAvailability_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_RawMetricsConfig* pRawMetricsConfig; + /// [in] buffer with counter availability image + const uint8_t* pCounterAvailabilityImage; + } NVPW_RawMetricsConfig_SetCounterAvailability_Params; +#define NVPW_RawMetricsConfig_SetCounterAvailability_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_SetCounterAvailability_Params, pCounterAvailabilityImage) + + NVPA_Status NVPW_RawMetricsConfig_SetCounterAvailability(NVPW_RawMetricsConfig_SetCounterAvailability_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_BeginPassGroup_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_RawMetricsConfig* pRawMetricsConfig; + size_t maxPassCount; + } NVPW_RawMetricsConfig_BeginPassGroup_Params; +#define NVPW_RawMetricsConfig_BeginPassGroup_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_BeginPassGroup_Params, maxPassCount) + + NVPA_Status NVPW_RawMetricsConfig_BeginPassGroup(NVPW_RawMetricsConfig_BeginPassGroup_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_EndPassGroup_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_RawMetricsConfig* pRawMetricsConfig; + } NVPW_RawMetricsConfig_EndPassGroup_Params; +#define NVPW_RawMetricsConfig_EndPassGroup_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_EndPassGroup_Params, pRawMetricsConfig) + + NVPA_Status NVPW_RawMetricsConfig_EndPassGroup(NVPW_RawMetricsConfig_EndPassGroup_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_GetNumMetrics_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const NVPA_RawMetricsConfig* pRawMetricsConfig; + /// [out] + size_t numMetrics; + } NVPW_RawMetricsConfig_GetNumMetrics_Params; +#define NVPW_RawMetricsConfig_GetNumMetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetNumMetrics_Params, numMetrics) + + NVPA_Status NVPW_RawMetricsConfig_GetNumMetrics(NVPW_RawMetricsConfig_GetNumMetrics_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_GetMetricProperties_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const NVPA_RawMetricsConfig* pRawMetricsConfig; + size_t metricIndex; + /// [out] + const char* pMetricName; + /// [out] + NVPA_Bool supportsPipelined; + /// [out] + NVPA_Bool supportsIsolated; + } NVPW_RawMetricsConfig_GetMetricProperties_Params; +#define NVPW_RawMetricsConfig_GetMetricProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetMetricProperties_Params, supportsIsolated) + + NVPA_Status NVPW_RawMetricsConfig_GetMetricProperties(NVPW_RawMetricsConfig_GetMetricProperties_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_GetMetricProperties_V2_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const NVPA_RawMetricsConfig* pRawMetricsConfig; + size_t metricIndex; + /// [out] + const char* pMetricName; + } NVPW_RawMetricsConfig_GetMetricProperties_V2_Params; +#define NVPW_RawMetricsConfig_GetMetricProperties_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetMetricProperties_V2_Params, pMetricName) + + NVPA_Status NVPW_RawMetricsConfig_GetMetricProperties_V2(NVPW_RawMetricsConfig_GetMetricProperties_V2_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_AddMetrics_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_RawMetricsConfig* pRawMetricsConfig; + const NVPA_RawMetricRequest* pRawMetricRequests; + size_t numMetricRequests; + } NVPW_RawMetricsConfig_AddMetrics_Params; +#define NVPW_RawMetricsConfig_AddMetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_AddMetrics_Params, numMetricRequests) + + NVPA_Status NVPW_RawMetricsConfig_AddMetrics(NVPW_RawMetricsConfig_AddMetrics_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_IsAddMetricsPossible_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const NVPA_RawMetricsConfig* pRawMetricsConfig; + const NVPA_RawMetricRequest* pRawMetricRequests; + size_t numMetricRequests; + /// [out] + NVPA_Bool isPossible; + } NVPW_RawMetricsConfig_IsAddMetricsPossible_Params; +#define NVPW_RawMetricsConfig_IsAddMetricsPossible_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_IsAddMetricsPossible_Params, isPossible) + + NVPA_Status NVPW_RawMetricsConfig_IsAddMetricsPossible(NVPW_RawMetricsConfig_IsAddMetricsPossible_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_GenerateConfigImage_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_RawMetricsConfig* pRawMetricsConfig; + /// [in] If true, all existing pass groups may be merged to reduce number of passes. + /// If merge was successful, distribution of counters in passes may be updated as a side-effect. The effects + /// will be persistent in pRawMetricsConfig. + NVPA_Bool mergeAllPassGroups; + } NVPW_RawMetricsConfig_GenerateConfigImage_Params; +#define NVPW_RawMetricsConfig_GenerateConfigImage_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GenerateConfigImage_Params, mergeAllPassGroups) + + /// This API may fail if called inside a pass group with `mergeAllPassGroups` = true. + NVPA_Status NVPW_RawMetricsConfig_GenerateConfigImage(NVPW_RawMetricsConfig_GenerateConfigImage_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_GetConfigImage_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const NVPA_RawMetricsConfig* pRawMetricsConfig; + /// [in] Number of bytes allocated for pBuffer + size_t bytesAllocated; + /// [out] [optional] Buffer receiving the config image + uint8_t* pBuffer; + /// [out] Count of bytes that would be copied into pBuffer + size_t bytesCopied; + } NVPW_RawMetricsConfig_GetConfigImage_Params; +#define NVPW_RawMetricsConfig_GetConfigImage_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetConfigImage_Params, bytesCopied) + + NVPA_Status NVPW_RawMetricsConfig_GetConfigImage(NVPW_RawMetricsConfig_GetConfigImage_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_GetNumPasses_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const NVPA_RawMetricsConfig* pRawMetricsConfig; + /// [out] + size_t numPipelinedPasses; + /// [out] + size_t numIsolatedPasses; + } NVPW_RawMetricsConfig_GetNumPasses_Params; +#define NVPW_RawMetricsConfig_GetNumPasses_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetNumPasses_Params, numIsolatedPasses) + + /// Total num passes = numPipelinedPasses + numIsolatedPasses * numNestingLevels + NVPA_Status NVPW_RawMetricsConfig_GetNumPasses(NVPW_RawMetricsConfig_GetNumPasses_Params* pParams); + + typedef struct NVPW_RawMetricsConfig_GetNumPasses_V2_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const NVPA_RawMetricsConfig* pRawMetricsConfig; + /// [out] + size_t numPasses; + } NVPW_RawMetricsConfig_GetNumPasses_V2_Params; +#define NVPW_RawMetricsConfig_GetNumPasses_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_RawMetricsConfig_GetNumPasses_V2_Params, numPasses) + + /// Total num passes = numPasses * numNestingLevels + NVPA_Status NVPW_RawMetricsConfig_GetNumPasses_V2(NVPW_RawMetricsConfig_GetNumPasses_V2_Params* pParams); + + typedef struct NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] Typically created by e.g. NVPW_RawMetricsConfig_GetConfigImage(), must be align(8). + const uint8_t* pConfig; + /// [in] + size_t configSize; + /// [out] + size_t sampleSize; + } NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params; +#define NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params, sampleSize) + + /// Estimate per sample records size based on a virtual device + NVPA_Status NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize(NVPW_PeriodicSampler_Config_GetSocEstimatedSampleSize_Params* pParams); + + typedef struct NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] Typically created by e.g. NVPW_RawMetricsConfig_GetConfigImage(), must be align(8). + const uint8_t* pConfig; + /// [in] + size_t configSize; + /// [out] + size_t sampleSize; + } NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params; +#define NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params, sampleSize) + + /// Estimate per sample records size based on a virtual device + NVPA_Status NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize(NVPW_PeriodicSampler_Config_GetGpuEstimatedSampleSize_Params* pParams); + +/** + * @} + ******************************************************************************/ + + typedef struct NVPW_Config_GetRawCounterInfo_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const uint8_t* pConfig; + /// [in] + size_t configSize; + /// [in] + const char* pRawCounterName; + /// [inout] array containing indices of passes the counter resides in. 'pPassIndices' is in, '*pPassIndices' is + /// out. + size_t* pPassIndices; + /// [inout] if 'pPassIndices' is NULL, the count of passes this counter resides in will be returned; otherwise + /// it should be set to the capacity of 'pPassIndices' array, and on return, it will be overwritten to reflect + /// the actual count filled into 'pPassIndices' + size_t numPassIndices; + } NVPW_Config_GetRawCounterInfo_Params; +#define NVPW_Config_GetRawCounterInfo_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Config_GetRawCounterInfo_Params, numPassIndices) + + NVPA_Status NVPW_Config_GetRawCounterInfo(NVPW_Config_GetRawCounterInfo_Params* pParams); + + typedef struct NVPW_Config_GetRawCounters_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const uint8_t* pConfig; + /// [in] + size_t configSize; + /// [in] + size_t passIndex; + /// [inout] array containing raw counter names. 'ppRawCounterNames' is in, '*ppRawCounterNames' is out. + const char** ppRawCounterNames; + /// [inout] if 'ppRawCounterNames' is NULL, the count of raw counters will be returned; otherwise it should be + /// set to the capacity of 'ppRawCounterNames' array, and on return, it will be overwritten to reflect the + /// actual count filled into 'ppRawCounterNames' + size_t numRawCounters; + } NVPW_Config_GetRawCounters_Params; +#define NVPW_Config_GetRawCounters_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Config_GetRawCounters_Params, numRawCounters) + + NVPA_Status NVPW_Config_GetRawCounters(NVPW_Config_GetRawCounters_Params* pParams); + +/***************************************************************************//** + * @name CounterData Creation + * @{ + */ + + typedef struct NVPA_CounterDataBuilder NVPA_CounterDataBuilder; + + typedef struct NVPW_CounterDataBuilder_Create_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [out] + NVPA_CounterDataBuilder* pCounterDataBuilder; + const char* pChipName; + } NVPW_CounterDataBuilder_Create_Params; +#define NVPW_CounterDataBuilder_Create_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_Create_Params, pChipName) + + NVPA_Status NVPW_CounterDataBuilder_Create(NVPW_CounterDataBuilder_Create_Params* pParams); + + typedef struct NVPW_CounterDataBuilder_Destroy_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataBuilder* pCounterDataBuilder; + } NVPW_CounterDataBuilder_Destroy_Params; +#define NVPW_CounterDataBuilder_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_Destroy_Params, pCounterDataBuilder) + + NVPA_Status NVPW_CounterDataBuilder_Destroy(NVPW_CounterDataBuilder_Destroy_Params* pParams); + + typedef struct NVPW_CounterDataBuilder_AddMetrics_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataBuilder* pCounterDataBuilder; + const NVPA_RawMetricRequest* pRawMetricRequests; + size_t numMetricRequests; + } NVPW_CounterDataBuilder_AddMetrics_Params; +#define NVPW_CounterDataBuilder_AddMetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_AddMetrics_Params, numMetricRequests) + + NVPA_Status NVPW_CounterDataBuilder_AddMetrics(NVPW_CounterDataBuilder_AddMetrics_Params* pParams); + + typedef struct NVPW_CounterDataBuilder_GetCounterDataPrefix_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + NVPA_CounterDataBuilder* pCounterDataBuilder; + /// [in] Number of bytes allocated for pBuffer + size_t bytesAllocated; + /// [out] [optional] Buffer receiving the counter data prefix + uint8_t* pBuffer; + /// [out] Count of bytes that would be copied to pBuffer + size_t bytesCopied; + } NVPW_CounterDataBuilder_GetCounterDataPrefix_Params; +#define NVPW_CounterDataBuilder_GetCounterDataPrefix_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterDataBuilder_GetCounterDataPrefix_Params, bytesCopied) + + NVPA_Status NVPW_CounterDataBuilder_GetCounterDataPrefix(NVPW_CounterDataBuilder_GetCounterDataPrefix_Params* pParams); + +/** + * @} + ******************************************************************************/ + +/***************************************************************************//** + * @name Metrics Evaluator + * @{ + */ + + typedef struct NVPW_MetricsEvaluator NVPW_MetricsEvaluator; + +#ifndef NVPW_DIM_UNIT_DEFINED +#define NVPW_DIM_UNIT_DEFINED + typedef enum NVPW_DimUnitName + { + NVPW_DIM_UNIT_INVALID = 3518299157, + NVPW_DIM_UNIT_UNITLESS = 2126137902, + NVPW_DIM_UNIT_ATTRIBUTES = 3776338729, + NVPW_DIM_UNIT_BYTES = 3797850191, + NVPW_DIM_UNIT_CTAS = 1960564139, + NVPW_DIM_UNIT_CTC_CYCLES = 2224883873, + NVPW_DIM_UNIT_DRAM_CYCLES = 2650981327, + NVPW_DIM_UNIT_FBP_CYCLES = 1785238957, + NVPW_DIM_UNIT_FE_OPS = 2919159083, + NVPW_DIM_UNIT_GPC_CYCLES = 1222631184, + NVPW_DIM_UNIT_IDC_REQUESTS = 2012649669, + NVPW_DIM_UNIT_INSTRUCTIONS = 1418625543, + NVPW_DIM_UNIT_KILOBYTES = 1335980302, + NVPW_DIM_UNIT_L1DATA_BANK_ACCESSES = 1479493682, + NVPW_DIM_UNIT_L1DATA_BANK_CONFLICTS = 3433170787, + NVPW_DIM_UNIT_L1TEX_REQUESTS = 1306473767, + NVPW_DIM_UNIT_L1TEX_TAGS = 26573010, + NVPW_DIM_UNIT_L1TEX_WAVEFRONTS = 129373765, + NVPW_DIM_UNIT_L2_REQUESTS = 1143695106, + NVPW_DIM_UNIT_L2_SECTORS = 3424101564, + NVPW_DIM_UNIT_L2_TAGS = 3755612781, + NVPW_DIM_UNIT_MCC_CYCLES = 1826685787, + NVPW_DIM_UNIT_NANOSECONDS = 3047500672, + NVPW_DIM_UNIT_NVLRX_CYCLES = 4059934930, + NVPW_DIM_UNIT_NVLTX_CYCLES = 1814350488, + NVPW_DIM_UNIT_PCIE_CYCLES = 1230450943, + NVPW_DIM_UNIT_PERCENT = 1284354694, + NVPW_DIM_UNIT_PIXELS = 4227616663, + NVPW_DIM_UNIT_PIXEL_SHADER_BARRIERS = 3705502518, + NVPW_DIM_UNIT_PRIMITIVES = 2373084002, + NVPW_DIM_UNIT_QUADS = 1539753497, + NVPW_DIM_UNIT_REGISTERS = 2837260947, + NVPW_DIM_UNIT_SAMPLES = 746046551, + NVPW_DIM_UNIT_SECONDS = 1164825258, + NVPW_DIM_UNIT_SYS_CYCLES = 3310821688, + NVPW_DIM_UNIT_TEXELS = 1293214069, + NVPW_DIM_UNIT_THREADS = 164261907, + NVPW_DIM_UNIT_VERTICES = 1873662209, + NVPW_DIM_UNIT_WARPS = 97951949, + NVPW_DIM_UNIT_WORKLOADS = 1728142656 + } NVPW_DimUnitName; +#endif //NVPW_DIM_UNIT_DEFINED + +#ifndef NVPW_HW_UNIT_DEFINED +#define NVPW_HW_UNIT_DEFINED + typedef enum NVPW_HwUnit + { + NVPW_HW_UNIT_INVALID = 3498035701, + NVPW_HW_UNIT_CROP = 2872137846, + NVPW_HW_UNIT_DRAM = 1662616918, + NVPW_HW_UNIT_DRAMC = 1401232876, + NVPW_HW_UNIT_FBP = 2947194306, + NVPW_HW_UNIT_FBPA = 690045803, + NVPW_HW_UNIT_FE = 2204924321, + NVPW_HW_UNIT_GPC = 1911735839, + NVPW_HW_UNIT_GPU = 1014363534, + NVPW_HW_UNIT_GR = 2933618517, + NVPW_HW_UNIT_IDC = 842765289, + NVPW_HW_UNIT_L1TEX = 893940957, + NVPW_HW_UNIT_LTS = 2333266697, + NVPW_HW_UNIT_MCC = 3980130194, + NVPW_HW_UNIT_NVLRX = 3091684901, + NVPW_HW_UNIT_NVLTX = 869679659, + NVPW_HW_UNIT_PCIE = 3433264174, + NVPW_HW_UNIT_PDA = 345193251, + NVPW_HW_UNIT_PES = 804128425, + NVPW_HW_UNIT_PROP = 3339255507, + NVPW_HW_UNIT_RASTER = 187932504, + NVPW_HW_UNIT_SM = 724224710, + NVPW_HW_UNIT_SMSP = 2837616917, + NVPW_HW_UNIT_SYS = 768990063, + NVPW_HW_UNIT_TPC = 1889024613, + NVPW_HW_UNIT_VAF = 753670509, + NVPW_HW_UNIT_VPC = 275561583, + NVPW_HW_UNIT_ZROP = 979500456 + } NVPW_HwUnit; +#endif //NVPW_HW_UNIT_DEFINED + + typedef enum NVPW_RollupOp + { + NVPW_ROLLUP_OP_AVG = 0, + NVPW_ROLLUP_OP_MAX, + NVPW_ROLLUP_OP_MIN, + NVPW_ROLLUP_OP_SUM, + NVPW_ROLLUP_OP__COUNT + } NVPW_RollupOp; + + typedef enum NVPW_MetricType + { + NVPW_METRIC_TYPE_COUNTER = 0, + NVPW_METRIC_TYPE_RATIO, + NVPW_METRIC_TYPE_THROUGHPUT, + NVPW_METRIC_TYPE__COUNT + } NVPW_MetricType; + + typedef enum NVPW_Submetric + { + NVPW_SUBMETRIC_NONE = 0, + NVPW_SUBMETRIC_PEAK_SUSTAINED = 1, + NVPW_SUBMETRIC_PEAK_SUSTAINED_ACTIVE = 2, + NVPW_SUBMETRIC_PEAK_SUSTAINED_ACTIVE_PER_SECOND = 3, + NVPW_SUBMETRIC_PEAK_SUSTAINED_ELAPSED = 4, + NVPW_SUBMETRIC_PEAK_SUSTAINED_ELAPSED_PER_SECOND = 5, + NVPW_SUBMETRIC_PEAK_SUSTAINED_FRAME = 6, + NVPW_SUBMETRIC_PEAK_SUSTAINED_FRAME_PER_SECOND = 7, + NVPW_SUBMETRIC_PEAK_SUSTAINED_REGION = 8, + NVPW_SUBMETRIC_PEAK_SUSTAINED_REGION_PER_SECOND = 9, + NVPW_SUBMETRIC_PER_CYCLE_ACTIVE = 10, + NVPW_SUBMETRIC_PER_CYCLE_ELAPSED = 11, + NVPW_SUBMETRIC_PER_CYCLE_IN_FRAME = 12, + NVPW_SUBMETRIC_PER_CYCLE_IN_REGION = 13, + NVPW_SUBMETRIC_PER_SECOND = 14, + NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_ACTIVE = 15, + NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_ELAPSED = 16, + NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_FRAME = 17, + NVPW_SUBMETRIC_PCT_OF_PEAK_SUSTAINED_REGION = 18, + NVPW_SUBMETRIC_MAX_RATE = 19, + NVPW_SUBMETRIC_PCT = 20, + NVPW_SUBMETRIC_RATIO = 21, + NVPW_SUBMETRIC__COUNT + } NVPW_Submetric; + + typedef struct NVPW_MetricEvalRequest + { + /// the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames' + size_t metricIndex; + /// one of 'NVPW_MetricType' + uint8_t metricType; + /// one of 'NVPW_RollupOp', required for Counter and Throughput, doesn't apply to Ratio + uint8_t rollupOp; + /// one of 'NVPW_Submetric', required for Ratio and Throughput, optional for Counter + uint16_t submetric; + } NVPW_MetricEvalRequest; +#define NVPW_MetricEvalRequest_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricEvalRequest, submetric) + + typedef struct NVPW_DimUnitFactor + { + /// one of 'NVPW_DimUnitName' + uint32_t dimUnit; + int8_t exponent; + } NVPW_DimUnitFactor; +#define NVPW_DimUnitFactor_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_DimUnitFactor, exponent) + + typedef struct NVPW_MetricsEvaluator_Destroy_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + } NVPW_MetricsEvaluator_Destroy_Params; +#define NVPW_MetricsEvaluator_Destroy_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_Destroy_Params, pMetricsEvaluator) + + NVPA_Status NVPW_MetricsEvaluator_Destroy(NVPW_MetricsEvaluator_Destroy_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetMetricNames_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] one of 'NVPW_MetricType' + uint8_t metricType; + /// [out] + const char* pMetricNames; + /// [out] + const size_t* pMetricNameBeginIndices; + /// [out] + size_t numMetrics; + } NVPW_MetricsEvaluator_GetMetricNames_Params; +#define NVPW_MetricsEvaluator_GetMetricNames_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricNames_Params, numMetrics) + + NVPA_Status NVPW_MetricsEvaluator_GetMetricNames(NVPW_MetricsEvaluator_GetMetricNames_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] can be either a base metric or a metric + const char* pMetricName; + /// [out] one of 'NVPW_MetricType' + uint8_t metricType; + /// [out] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames' + size_t metricIndex; + } NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params; +#define NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params, metricIndex) + + NVPA_Status NVPW_MetricsEvaluator_GetMetricTypeAndIndex(NVPW_MetricsEvaluator_GetMetricTypeAndIndex_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] + const char* pMetricName; + /// [inout] 'pMetricEvalRequest' is in, '*pMetricEvalRequest' is out + struct NVPW_MetricEvalRequest* pMetricEvalRequest; + /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE' + size_t metricEvalRequestStructSize; + } NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params; +#define NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params, metricEvalRequestStructSize) + + NVPA_Status NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest(NVPW_MetricsEvaluator_ConvertMetricNameToMetricEvalRequest_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_HwUnitToString_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] one of 'NVPW_HwUnit' + uint32_t hwUnit; + /// [out] + const char* pHwUnitName; + } NVPW_MetricsEvaluator_HwUnitToString_Params; +#define NVPW_MetricsEvaluator_HwUnitToString_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_HwUnitToString_Params, pHwUnitName) + + NVPA_Status NVPW_MetricsEvaluator_HwUnitToString(NVPW_MetricsEvaluator_HwUnitToString_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetCounterProperties_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames' + size_t counterIndex; + /// [out] + const char* pDescription; + /// [out] one of 'NVPW_HwUnit' + uint32_t hwUnit; + } NVPW_MetricsEvaluator_GetCounterProperties_Params; +#define NVPW_MetricsEvaluator_GetCounterProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetCounterProperties_Params, hwUnit) + + NVPA_Status NVPW_MetricsEvaluator_GetCounterProperties(NVPW_MetricsEvaluator_GetCounterProperties_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetRatioMetricProperties_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames' + size_t ratioMetricIndex; + /// [out] + const char* pDescription; + /// [out] + uint64_t hwUnit; + } NVPW_MetricsEvaluator_GetRatioMetricProperties_Params; +#define NVPW_MetricsEvaluator_GetRatioMetricProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetRatioMetricProperties_Params, hwUnit) + + NVPA_Status NVPW_MetricsEvaluator_GetRatioMetricProperties(NVPW_MetricsEvaluator_GetRatioMetricProperties_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] the metric index as in 'NVPW_MetricsEvaluator_GetMetricNames' + size_t throughputMetricIndex; + /// [out] + const char* pDescription; + /// [out] + uint32_t hwUnit; + /// [out] number of constituent counters for the throughput metric + size_t numCounters; + /// [out] metric indices as in 'NVPW_MetricsEvaluator_GetMetricNames', valid if 'numCounters' > 0, otherwise + /// returned as nullptr + const size_t* pCounterIndices; + /// [out] number of constituent sub-throughputs for the throughput metric + size_t numSubThroughputs; + /// [out] metric indices as in 'NVPW_MetricsEvaluator_GetMetricNames', valid if 'numSubThroughputs' > 0, + /// otherwise returned as nullptr + const size_t* pSubThroughputIndices; + } NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params; +#define NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params, pSubThroughputIndices) + + NVPA_Status NVPW_MetricsEvaluator_GetThroughputMetricProperties(NVPW_MetricsEvaluator_GetThroughputMetricProperties_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] one of 'NVPW_MetricType' + uint8_t metricType; + /// [out] an array of 'NVPW_Submetric' + const uint16_t* pSupportedSubmetrics; + /// [out] + size_t numSupportedSubmetrics; + } NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params; +#define NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params, numSupportedSubmetrics) + + NVPA_Status NVPW_MetricsEvaluator_GetSupportedSubmetrics(NVPW_MetricsEvaluator_GetSupportedSubmetrics_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetMetricRawDependencies_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] + const struct NVPW_MetricEvalRequest* pMetricEvalRequests; + /// [in] + size_t numMetricEvalRequests; + /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE' + size_t metricEvalRequestStructSize; + /// [in] set to sizeof('NVPW_MetricEvalRequest') + size_t metricEvalRequestStrideSize; + /// [inout] 'ppRawDependencies' is in, '*ppRawDependencies' is out + const char** ppRawDependencies; + /// [inout] if 'ppRawDependencies' is NULL, number of raw dependencies available will be returned; otherwise it + /// should be set to the number of elements allocated for 'ppRawDependencies', and on return, it will be + /// overwritten by number of elements copied to 'ppRawDependencies' + size_t numRawDependencies; + /// [inout] 'ppOptionalRawDependencies' is in, '*ppOptionalRawDependencies' is out + const char** ppOptionalRawDependencies; + /// [inout] if 'ppOptionalRawDependencies' is NULL, number of optional raw dependencies available will be + /// returned; otherwise it should be set to the number of elements allocated for 'ppOptionalRawDependencies', + /// and on return, it will be overwritten by number of elements copied to 'ppOptionalRawDependencies' + size_t numOptionalRawDependencies; + } NVPW_MetricsEvaluator_GetMetricRawDependencies_Params; +#define NVPW_MetricsEvaluator_GetMetricRawDependencies_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricRawDependencies_Params, numOptionalRawDependencies) + + NVPA_Status NVPW_MetricsEvaluator_GetMetricRawDependencies(NVPW_MetricsEvaluator_GetMetricRawDependencies_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_DimUnitToString_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] one of 'NVPW_DimUnitName' + uint32_t dimUnit; + /// [out] + const char* pSingularName; + /// [out] + const char* pPluralName; + } NVPW_MetricsEvaluator_DimUnitToString_Params; +#define NVPW_MetricsEvaluator_DimUnitToString_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_DimUnitToString_Params, pPluralName) + + NVPA_Status NVPW_MetricsEvaluator_DimUnitToString(NVPW_MetricsEvaluator_DimUnitToString_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_GetMetricDimUnits_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] + const struct NVPW_MetricEvalRequest* pMetricEvalRequest; + /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE' + size_t metricEvalRequestStructSize; + /// [inout] 'pDimUnits' is in, '*pDimUnits' is out + NVPW_DimUnitFactor* pDimUnits; + /// [inout] if 'pDimUnits' is NULL, number of dim-units available will be returned; otherwise it should be set + /// to the number of elements allocated for 'pDimUnits', and on return, it will be overwritten by number of + /// elements copied to 'pDimUnits' + size_t numDimUnits; + /// [in] set to 'NVPW_DimUnitFactor_STRUCT_SIZE' + size_t dimUnitFactorStructSize; + } NVPW_MetricsEvaluator_GetMetricDimUnits_Params; +#define NVPW_MetricsEvaluator_GetMetricDimUnits_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_GetMetricDimUnits_Params, dimUnitFactorStructSize) + + NVPA_Status NVPW_MetricsEvaluator_GetMetricDimUnits(NVPW_MetricsEvaluator_GetMetricDimUnits_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_SetUserData_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] duration in ns of user defined frame + double frameDuration; + /// [in] duration in ns of user defined region + double regionDuration; + /// [in] + NVPA_Bool isolated; + } NVPW_MetricsEvaluator_SetUserData_Params; +#define NVPW_MetricsEvaluator_SetUserData_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_SetUserData_Params, isolated) + + NVPA_Status NVPW_MetricsEvaluator_SetUserData(NVPW_MetricsEvaluator_SetUserData_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_EvaluateToGpuValues_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] + const struct NVPW_MetricEvalRequest* pMetricEvalRequests; + /// [in] + size_t numMetricEvalRequests; + /// [in] set to 'NVPW_MetricEvalRequest_STRUCT_SIZE' + size_t metricEvalRequestStructSize; + /// [in] set to sizeof('NVPW_MetricEvalRequest') + size_t metricEvalRequestStrideSize; + /// [in] + const uint8_t* pCounterDataImage; + /// [in] + size_t counterDataImageSize; + /// [in] + size_t rangeIndex; + /// [in] + NVPA_Bool isolated; + /// [inout] 'pMetricValues' is in, '*pMetricValues' is out + double* pMetricValues; + } NVPW_MetricsEvaluator_EvaluateToGpuValues_Params; +#define NVPW_MetricsEvaluator_EvaluateToGpuValues_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_EvaluateToGpuValues_Params, pMetricValues) + + NVPA_Status NVPW_MetricsEvaluator_EvaluateToGpuValues(NVPW_MetricsEvaluator_EvaluateToGpuValues_Params* pParams); + + typedef struct NVPW_MetricsEvaluator_SetDeviceAttributes_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct NVPW_MetricsEvaluator* pMetricsEvaluator; + /// [in] + const uint8_t* pCounterDataImage; + /// [in] + size_t counterDataImageSize; + } NVPW_MetricsEvaluator_SetDeviceAttributes_Params; +#define NVPW_MetricsEvaluator_SetDeviceAttributes_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_MetricsEvaluator_SetDeviceAttributes_Params, counterDataImageSize) + + NVPA_Status NVPW_MetricsEvaluator_SetDeviceAttributes(NVPW_MetricsEvaluator_SetDeviceAttributes_Params* pParams); + +/** + * @} + ******************************************************************************/ + + +#endif // NVPERF_HOST_API_DEFINED + + + + +#ifdef __cplusplus +} // extern "C" +#endif + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility pop +#endif + +#endif // NVPERF_HOST_H diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/nvperf_target.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/nvperf_target.h new file mode 100644 index 0000000000000000000000000000000000000000..a8ee051da76a97013be9815f84f1b98cb359977b --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/include/nvperf_target.h @@ -0,0 +1,595 @@ +#ifndef NVPERF_TARGET_H +#define NVPERF_TARGET_H + +/* + * Copyright 2014-2024 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO USER: + * + * This source code is subject to NVIDIA ownership rights under U.S. and + * international Copyright laws. + * + * This software and the information contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions + * of a form of NVIDIA software license agreement. + * + * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE + * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR + * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH + * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, + * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS + * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE + * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE + * OR PERFORMANCE OF THIS SOURCE CODE. + * + * U.S. Government End Users. This source code is a "commercial item" as + * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of + * "commercial computer software" and "commercial computer software + * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) + * and is provided to the U.S. Government only as a commercial end item. + * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through + * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the + * source code with only those rights set forth herein. + * + * Any use of this source code in individual and commercial software must + * include, in the user documentation and internal comments to the code, + * the above Disclaimer and U.S. Government End Users Notice. + */ + +#include +#include +#include "nvperf_common.h" + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility push(default) + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL __attribute__ ((visibility ("hidden"))) + #endif +#else + #if !defined(NVPW_LOCAL) + #define NVPW_LOCAL + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @file nvperf_target.h + */ + +#ifndef NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_DEFINED +#define NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_DEFINED + /// GPU architecture support level + typedef enum NVPW_GpuArchitectureSupportLevel + { + NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_UNKNOWN = 0, + NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_UNSUPPORTED, + NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_SUPPORTED + } NVPW_GpuArchitectureSupportLevel; +#endif //NVPW_GPU_ARCHITECTURE_SUPPORT_LEVEL_DEFINED + +#ifndef NVPW_SLI_SUPPORT_LEVEL_DEFINED +#define NVPW_SLI_SUPPORT_LEVEL_DEFINED + /// SLI configuration support level + typedef enum NVPW_SliSupportLevel + { + NVPW_SLI_SUPPORT_LEVEL_UNKNOWN = 0, + NVPW_SLI_SUPPORT_LEVEL_UNSUPPORTED, + /// Only Non-SLI configurations are supported. + NVPW_SLI_SUPPORT_LEVEL_SUPPORTED_NON_SLI_CONFIGURATION + } NVPW_SliSupportLevel; +#endif //NVPW_SLI_SUPPORT_LEVEL_DEFINED + +#ifndef NVPW_VGPU_SUPPORT_LEVEL_DEFINED +#define NVPW_VGPU_SUPPORT_LEVEL_DEFINED + /// Virtualized GPU configuration support level + typedef enum NVPW_VGpuSupportLevel + { + NVPW_VGPU_SUPPORT_LEVEL_UNKNOWN = 0, + NVPW_VGPU_SUPPORT_LEVEL_UNSUPPORTED, + /// Supported but not allowed by system admin. + NVPW_VGPU_SUPPORT_LEVEL_SUPPORTED_DISALLOWED, + NVPW_VGPU_SUPPORT_LEVEL_SUPPORTED_ALLOWED, + NVPW_VGPU_SUPPORT_LEVEL_SUPPORTED_NON_VGPU_CONFIGURATION + } NVPW_VGpuSupportLevel; +#endif //NVPW_VGPU_SUPPORT_LEVEL_DEFINED + +#ifndef NVPW_CONF_COMPUTE_SUPPORT_LEVEL_DEFINED +#define NVPW_CONF_COMPUTE_SUPPORT_LEVEL_DEFINED + /// Confidential Compute mode support level + typedef enum NVPW_ConfidentialComputeSupportLevel + { + NVPW_CONF_COMPUTE_SUPPORT_LEVEL_UNKNOWN = 0, + NVPW_CONF_COMPUTE_SUPPORT_LEVEL_UNSUPPORTED, + NVPW_CONF_COMPUTE_SUPPORT_LEVEL_SUPPORTED_NON_CONF_COMPUTE_CONFIGURATION, + NVPW_CONF_COMPUTE_SUPPORT_LEVEL_SUPPORTED_CONF_COMPUTE_DEVTOOLS_MODE + } NVPW_ConfidentialComputeSupportLevel; +#endif //NVPW_CONF_COMPUTE_SUPPORT_LEVEL_DEFINED + +#ifndef NVPW_CMP_SUPPORT_LEVEL_DEFINED +#define NVPW_CMP_SUPPORT_LEVEL_DEFINED + /// CMP support level + typedef enum NVPW_CmpSupportLevel + { + NVPW_CMP_SUPPORT_LEVEL_UNKNOWN = 0, + NVPW_CMP_SUPPORT_LEVEL_UNSUPPORTED, + NVPW_CMP_SUPPORT_LEVEL_SUPPORTED_NON_CMP_CONFIGURATON + } NVPW_CmpSupportLevel; +#endif //NVPW_CMP_SUPPORT_LEVEL_DEFINED + +#ifndef NVPW_WSL_SUPPORT_LEVEL_DEFINED +#define NVPW_WSL_SUPPORT_LEVEL_DEFINED + /// WSL support level + typedef enum NVPW_WslSupportLevel + { + NVPW_WSL_SUPPORT_LEVEL_UNKNOWN = 0, + NVPW_WSL_SUPPORT_LEVEL_UNSUPPORTED_INSUFFICIENT_DRIVER_VERSION, + NVPW_WSL_SUPPORT_LEVEL_SUPPORTED, + NVPW_WSL_SUPPORT_LEVEL_SUPPORTED_NON_WSL_CONFIGURATION + } NVPW_WslSupportLevel; +#endif //NVPW_WSL_SUPPORT_LEVEL_DEFINED + + typedef struct NVPW_InitializeTarget_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + } NVPW_InitializeTarget_Params; +#define NVPW_InitializeTarget_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_InitializeTarget_Params, pPriv) + + /// Load the target library. + NVPA_Status NVPW_InitializeTarget(NVPW_InitializeTarget_Params* pParams); + + typedef struct NVPW_GetDeviceCount_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + size_t numDevices; + } NVPW_GetDeviceCount_Params; +#define NVPW_GetDeviceCount_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_GetDeviceCount_Params, numDevices) + + NVPA_Status NVPW_GetDeviceCount(NVPW_GetDeviceCount_Params* pParams); + + typedef struct NVPW_Device_GetNames_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + size_t deviceIndex; + const char* pDeviceName; + const char* pChipName; + } NVPW_Device_GetNames_Params; +#define NVPW_Device_GetNames_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Device_GetNames_Params, pChipName) + + NVPA_Status NVPW_Device_GetNames(NVPW_Device_GetNames_Params* pParams); + + typedef struct NVPW_PciBusId + { + /// The PCI domain on which the device bus resides. + uint32_t domain; + /// The bus on which the device resides. + uint16_t bus; + /// device ID. + uint16_t device; + } NVPW_PciBusId; +#define NVPW_PciBusId_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PciBusId, device) + + typedef struct NVPW_Device_GetPciBusIds_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] caller-allocated array of NVPW_PciBusId, indexed by NVPW deviceIndex + NVPW_PciBusId* pBusIds; + /// [in] size of the pBusIDs array; use result from NVPW_GetDeviceCount + size_t numDevices; + } NVPW_Device_GetPciBusIds_Params; +#define NVPW_Device_GetPciBusIds_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Device_GetPciBusIds_Params, numDevices) + + NVPA_Status NVPW_Device_GetPciBusIds(NVPW_Device_GetPciBusIds_Params* pParams); + + +#define NVPW_DEVICE_MIG_GPU_INSTANCE_ID_INVALID 0xFFFFFFFFu +#define NVPW_DEVICE_MIG_GPU_INSTANCE_ID_FULLCHIP 0xFFFFFFFEu + + + typedef struct NVPW_Device_GetMigAttributes_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + size_t deviceIndex; + /// [out] + NVPA_Bool isMigPartition; + /// [out] + uint32_t gpuInstanceId; + /// [out] + uint32_t computeInstanceId; + } NVPW_Device_GetMigAttributes_Params; +#define NVPW_Device_GetMigAttributes_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Device_GetMigAttributes_Params, computeInstanceId) + + NVPA_Status NVPW_Device_GetMigAttributes(NVPW_Device_GetMigAttributes_Params* pParams); + + typedef struct NVPW_Adapter_GetDeviceIndex_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + struct IDXGIAdapter* pAdapter; + /// [in] + size_t sliIndex; + /// [out] + size_t deviceIndex; + } NVPW_Adapter_GetDeviceIndex_Params; +#define NVPW_Adapter_GetDeviceIndex_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Adapter_GetDeviceIndex_Params, deviceIndex) + + NVPA_Status NVPW_Adapter_GetDeviceIndex(NVPW_Adapter_GetDeviceIndex_Params* pParams); + + typedef struct NVPW_CounterData_GetNumRanges_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const uint8_t* pCounterDataImage; + size_t numRanges; + } NVPW_CounterData_GetNumRanges_Params; +#define NVPW_CounterData_GetNumRanges_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_GetNumRanges_Params, numRanges) + + NVPA_Status NVPW_CounterData_GetNumRanges(NVPW_CounterData_GetNumRanges_Params* pParams); + + typedef struct NVPW_CounterData_GetChipName_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const uint8_t* pCounterDataImage; + /// [in] + size_t counterDataImageSize; + /// [out] + const char* pChipName; + } NVPW_CounterData_GetChipName_Params; +#define NVPW_CounterData_GetChipName_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_GetChipName_Params, pChipName) + + NVPA_Status NVPW_CounterData_GetChipName(NVPW_CounterData_GetChipName_Params* pParams); + + typedef struct NVPW_Config_GetNumPasses_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const uint8_t* pConfig; + /// [out] + size_t numPipelinedPasses; + /// [out] + size_t numIsolatedPasses; + } NVPW_Config_GetNumPasses_Params; +#define NVPW_Config_GetNumPasses_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Config_GetNumPasses_Params, numIsolatedPasses) + + /// Total num passes = numPipelinedPasses + numIsolatedPasses * numNestingLevels + NVPA_Status NVPW_Config_GetNumPasses(NVPW_Config_GetNumPasses_Params* pParams); + + typedef struct NVPW_Config_GetNumPasses_V2_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const uint8_t* pConfig; + /// [out] + size_t numPasses; + } NVPW_Config_GetNumPasses_V2_Params; +#define NVPW_Config_GetNumPasses_V2_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Config_GetNumPasses_V2_Params, numPasses) + + /// Total num passes = numPasses * numNestingLevels + NVPA_Status NVPW_Config_GetNumPasses_V2(NVPW_Config_GetNumPasses_V2_Params* pParams); + +#define NVPW_API_SET_CUDA_PROFILER 0x18209d0775b2f89dULL + +#define NVPW_API_SET_D3D11_PROFILER 0xca55c6738445db2bULL + +#define NVPW_API_SET_D3D12_PROFILER 0xc0c2d46dd7c7ad78ULL + +#define NVPW_API_SET_EGL_PROFILER 0x3c3747dae1f9565cULL + +#define NVPW_API_SET_GPU_PERIODICSAMPLER 0x9f4c2571fc0b2e8aULL + +#define NVPW_API_SET_METRICSEVALUATOR 0x0368a8768d811af9ULL + +#define NVPW_API_SET_METRICS_AD10X_COMP 0xbe57278e12cb5288ULL + +#define NVPW_API_SET_METRICS_AD10X_GRFX 0x5cbf0774f81bf491ULL + +#define NVPW_API_SET_METRICS_GA100_COMP 0x16b7d8c20d8b4915ULL + +#define NVPW_API_SET_METRICS_GA100_GRFX 0xc94eaabec04a94faULL + +#define NVPW_API_SET_METRICS_GA10X_COMP 0xb5d6391c2e299ab5ULL + +#define NVPW_API_SET_METRICS_GA10X_GRFX 0x6ebc121178b5ce0bULL + +#define NVPW_API_SET_METRICS_GV100_COMP 0x863705cc57919f72ULL + +#define NVPW_API_SET_METRICS_GV100_GRFX 0x9900da75d164fecfULL + +#define NVPW_API_SET_METRICS_GV11B_COMP 0xd3f79a859235848fULL + +#define NVPW_API_SET_METRICS_GV11B_GRFX 0xeb8e26220106e227ULL + +#define NVPW_API_SET_METRICS_TU10X_COMP 0x70f40be0afd35da8ULL + +#define NVPW_API_SET_METRICS_TU10X_GRFX 0xdf219cb838db6968ULL + +#define NVPW_API_SET_METRICS_TU11X_COMP 0xeb0069d7d0956678ULL + +#define NVPW_API_SET_METRICS_TU11X_GRFX 0x0977d9342bd62743ULL + +#define NVPW_API_SET_OPENGL_PROFILER 0xe4cd9ea40f2ee777ULL + +#define NVPW_API_SET_VULKAN_PROFILER 0x8c56b6a03d779689ULL + +#define NVPW_SDK_VERSION 0x1e128b6f001423fcULL + + typedef struct NVPW_QueryVersionNumber_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + uint64_t apiSet; + /// [out] + uint32_t major; + /// [out] + uint32_t minor; + /// [out] + uint32_t patch; + /// [out] + uint32_t relMajor; + /// [out] + uint32_t relMinor; + /// [out] + uint32_t relPatch; + } NVPW_QueryVersionNumber_Params; +#define NVPW_QueryVersionNumber_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_QueryVersionNumber_Params, relPatch) + + /// Query version number of an API set + NVPA_Status NVPW_QueryVersionNumber(NVPW_QueryVersionNumber_Params* pParams); + + typedef enum NVPW_Device_ClockStatus + { + /// clock status is unknown + NVPW_DEVICE_CLOCK_STATUS_UNKNOWN, + /// clocks are locked to rated tdp values + NVPW_DEVICE_CLOCK_STATUS_LOCKED_TO_RATED_TDP, + /// clocks are not locked and can boost above rated tdp + NVPW_DEVICE_CLOCK_STATUS_BOOST_ENABLED, + /// clocks are not locked and will not go above rated tdp + NVPW_DEVICE_CLOCK_STATUS_BOOST_DISABLED, + NVPW_DEVICE_CLOCK_STATUS__COUNT + } NVPW_Device_ClockStatus; + + typedef struct NVPW_Device_GetClockStatus_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + size_t deviceIndex; + /// [in] + NVPW_Device_ClockStatus clockStatus; + } NVPW_Device_GetClockStatus_Params; +#define NVPW_Device_GetClockStatus_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Device_GetClockStatus_Params, clockStatus) + + NVPA_Status NVPW_Device_GetClockStatus(NVPW_Device_GetClockStatus_Params* pParams); + + typedef enum NVPW_Device_ClockSetting + { + /// invalid op, specify valid clocks operation during profiling + NVPW_DEVICE_CLOCK_SETTING_INVALID, + /// default to driver/application config (normally unlocked and not boosted, but could be unlocked boosted, or + /// locked to rated TDP) + NVPW_DEVICE_CLOCK_SETTING_DEFAULT, + /// lock clocks at rated tdp base values + NVPW_DEVICE_CLOCK_SETTING_LOCK_TO_RATED_TDP, + NVPW_DEVICE_CLOCK_SETTING__COUNT + } NVPW_Device_ClockSetting; + + typedef struct NVPW_Device_SetClockSetting_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + size_t deviceIndex; + /// [in] + NVPW_Device_ClockSetting clockSetting; + } NVPW_Device_SetClockSetting_Params; +#define NVPW_Device_SetClockSetting_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Device_SetClockSetting_Params, clockSetting) + + NVPA_Status NVPW_Device_SetClockSetting(NVPW_Device_SetClockSetting_Params* pParams); + + typedef struct NVPW_CounterData_GetRangeDescriptions_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const uint8_t* pCounterDataImage; + size_t rangeIndex; + /// [inout] Number of descriptions allocated in ppDescriptions + size_t numDescriptions; + const char** ppDescriptions; + } NVPW_CounterData_GetRangeDescriptions_Params; +#define NVPW_CounterData_GetRangeDescriptions_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_CounterData_GetRangeDescriptions_Params, ppDescriptions) + + NVPA_Status NVPW_CounterData_GetRangeDescriptions(NVPW_CounterData_GetRangeDescriptions_Params* pParams); + + typedef struct NVPW_Profiler_CounterData_GetRangeDescriptions_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + const uint8_t* pCounterDataImage; + size_t rangeIndex; + /// [inout] Number of descriptions allocated in ppDescriptions + size_t numDescriptions; + const char** ppDescriptions; + } NVPW_Profiler_CounterData_GetRangeDescriptions_Params; +#define NVPW_Profiler_CounterData_GetRangeDescriptions_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_Profiler_CounterData_GetRangeDescriptions_Params, ppDescriptions) + + NVPA_Status NVPW_Profiler_CounterData_GetRangeDescriptions(NVPW_Profiler_CounterData_GetRangeDescriptions_Params* pParams); + +#ifndef NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE_DEFINED +#define NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE_DEFINED + typedef enum NVPW_PeriodicSampler_CounterData_AppendMode + { + NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE_LINEAR = 0, + NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE_CIRCULAR = 1, + NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE__COUNT + } NVPW_PeriodicSampler_CounterData_AppendMode; +#endif //NVPW_PERIODIC_SAMPLER_COUNTER_DATA_APPEND_MODE_DEFINED + + typedef struct NVPW_PeriodicSampler_CounterData_GetSampleTime_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const uint8_t* pCounterDataImage; + /// [in] + size_t rangeIndex; + /// [out] + uint64_t timestampStart; + /// [out] + uint64_t timestampEnd; + } NVPW_PeriodicSampler_CounterData_GetSampleTime_Params; +#define NVPW_PeriodicSampler_CounterData_GetSampleTime_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_CounterData_GetSampleTime_Params, timestampEnd) + + NVPA_Status NVPW_PeriodicSampler_CounterData_GetSampleTime(NVPW_PeriodicSampler_CounterData_GetSampleTime_Params* pParams); + + typedef struct NVPW_PeriodicSampler_CounterData_TrimInPlace_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + uint8_t* pCounterDataImage; + /// [in] + size_t counterDataImageSize; + /// [out] + size_t counterDataImageTrimmedSize; + } NVPW_PeriodicSampler_CounterData_TrimInPlace_Params; +#define NVPW_PeriodicSampler_CounterData_TrimInPlace_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_CounterData_TrimInPlace_Params, counterDataImageTrimmedSize) + + NVPA_Status NVPW_PeriodicSampler_CounterData_TrimInPlace(NVPW_PeriodicSampler_CounterData_TrimInPlace_Params* pParams); + + typedef struct NVPW_PeriodicSampler_CounterData_GetInfo_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const uint8_t* pCounterDataImage; + /// [in] + size_t counterDataImageSize; + /// [out] total number of ranges in the counter data + size_t numTotalRanges; + /// [out] if in "linear" mode, this API returns the number of "populated" ranges; if it's in "circular" mode, + /// then it returns the last "populated" range index + 1, when there is no such range, it returns 0. + size_t numPopulatedRanges; + /// [out] if in "linear" mode, this API returns the number of "completed" ranges; if it's in "circular" mode, + /// then it returns the last "completed" range index + 1, when there is no such range, it returns 0. + size_t numCompletedRanges; + } NVPW_PeriodicSampler_CounterData_GetInfo_Params; +#define NVPW_PeriodicSampler_CounterData_GetInfo_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_CounterData_GetInfo_Params, numCompletedRanges) + + /// In periodic sampler, a range in counter data stores exactly one sample's data. For better performance, periodic + /// sampler may operate in an out-of-order fashion when populating sample data, i.e. it may not fully populate all + /// counters of a sample/range before starting to populate the next sample/range. As a result, we have two concepts + /// here, "populated" & "completed": a range is considered "populated" even if only partial counters have been + /// written; on the other hand, a range is only considered "completed" if all the collecting counters have been + /// written. + NVPA_Status NVPW_PeriodicSampler_CounterData_GetInfo(NVPW_PeriodicSampler_CounterData_GetInfo_Params* pParams); + + typedef struct NVPW_PeriodicSampler_CounterData_GetTriggerCount_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const uint8_t* pCounterDataImage; + /// [in] + size_t counterDataImageSize; + /// [in] + size_t rangeIndex; + /// [out] + uint32_t triggerCount; + } NVPW_PeriodicSampler_CounterData_GetTriggerCount_Params; +#define NVPW_PeriodicSampler_CounterData_GetTriggerCount_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_CounterData_GetTriggerCount_Params, triggerCount) + + NVPA_Status NVPW_PeriodicSampler_CounterData_GetTriggerCount(NVPW_PeriodicSampler_CounterData_GetTriggerCount_Params* pParams); + + typedef struct NVPW_PeriodicSampler_CounterData_IsDataComplete_Params + { + /// [in] + size_t structSize; + /// [in] assign to NULL + void* pPriv; + /// [in] + const uint8_t* pCounterDataImage; + /// [in] + size_t counterDataImageSize; + /// [in] + size_t rangeIndex; + /// [out] + NVPA_Bool isComplete; + } NVPW_PeriodicSampler_CounterData_IsDataComplete_Params; +#define NVPW_PeriodicSampler_CounterData_IsDataComplete_Params_STRUCT_SIZE NVPA_STRUCT_SIZE(NVPW_PeriodicSampler_CounterData_IsDataComplete_Params, isComplete) + + /// Checks whether a given sample's data is complete. See also 'NVPW_PeriodicSampler_CounterData_GetInfo' + NVPA_Status NVPW_PeriodicSampler_CounterData_IsDataComplete(NVPW_PeriodicSampler_CounterData_IsDataComplete_Params* pParams); + + + typedef struct NVPW_TimestampReport + { + uint32_t payload; + uint8_t reserved0004[4]; + uint64_t timestamp; + } NVPW_TimestampReport; + + + + +#ifdef __cplusplus +} // extern "C" +#endif + +#if defined(__GNUC__) && defined(NVPA_SHARED_LIB) + #pragma GCC visibility pop +#endif + +#endif // NVPERF_TARGET_H diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/__init__.py b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b968077b7cbbce8c9d80af3a4be6ee262be94816 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so new file mode 100644 index 0000000000000000000000000000000000000000..d007bdbcaadf10e4354d1c0b86abb9cabcf224d5 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3d830ed3cdddee9686033e4c185801704770fa82b78e23923d56bc044006ee2 +size 1571144 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 new file mode 100644 index 0000000000000000000000000000000000000000..d64aaca3caf1097590b1b3aabfdf167e5b2fcf81 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76dcad94420820ea9aee9094c346cc34c6cb1ff630cef9b14e04db298a86305d +size 8067920 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so new file mode 100644 index 0000000000000000000000000000000000000000..f82ed9116893c460c084f93c6426aafed71e04aa --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a19ab37d5bea3a778b9c94b4535b6c656c910131fce19be98395827cf686713a +size 18842864 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so new file mode 100644 index 0000000000000000000000000000000000000000..a2feb9cf3c3c6d74c71ac649b86f96d164ee049f --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:401c2c8f61bda1a6c8e116e1d20e29ccc7789cd5498f6abc9c1080ca0f954437 +size 4231664 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libpcsamplingutil.so b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libpcsamplingutil.so new file mode 100644 index 0000000000000000000000000000000000000000..4dcf914441ef17f77fc607d8d6dab7e5219e9d35 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libpcsamplingutil.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6fa2783077974fb2765bfa9cbaebf980736df74eebf994c59c7596988aec83b +size 970064 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2ecb2644c302e92e11b5dc1d9727fe2e738a915 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/__init__.py b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfc3cda731605bc2f749e00f0ad185a4263ac42f Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn.h new file mode 100644 index 0000000000000000000000000000000000000000..7e08847c95f1294bc99e96e737a53cc6ebb7a458 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn.h @@ -0,0 +1,68 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* cudnn : Neural Networks Library */ + +#if !defined(CUDNN_H_) +#define CUDNN_H_ +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include "cudnn_version.h" +#include "cudnn_graph.h" +#include "cudnn_ops.h" +#include "cudnn_adv.h" +#include "cudnn_cnn.h" + +#if defined(__cplusplus) +} +#endif +#endif /* CUDNN_H_ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_adv.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_adv.h new file mode 100644 index 0000000000000000000000000000000000000000..350522a8f8545725e23114378c1ffd43a559c672 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_adv.h @@ -0,0 +1,669 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* cudnn_adv : cuDNN's advanced and experimental features. + +*/ + +#if !defined(CUDNN_ADV_H_) +#define CUDNN_ADV_H_ + +#include + +#include "cudnn_version.h" +#include "cudnn_ops.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_ADV_MAJOR 9 +#define CUDNN_ADV_MINOR 5 +#define CUDNN_ADV_PATCH 1 + +#if (CUDNN_ADV_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_MINOR != CUDNN_MINOR) || (CUDNN_ADV_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN ADV INFER!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* BASIC RNN API */ + +typedef enum { + CUDNN_RNN_ALGO_STANDARD = 0, + CUDNN_RNN_ALGO_PERSIST_STATIC = 1, + CUDNN_RNN_ALGO_PERSIST_DYNAMIC = 2, + CUDNN_RNN_ALGO_PERSIST_STATIC_SMALL_H = 3, + CUDNN_RNN_ALGO_COUNT = 4, +} cudnnRNNAlgo_t; + +typedef enum { + CUDNN_FWD_MODE_INFERENCE = 0, + CUDNN_FWD_MODE_TRAINING = 1, +} cudnnForwardMode_t; + +typedef enum { + CUDNN_RNN_RELU = 0, /* basic RNN cell type with ReLu activation */ + CUDNN_RNN_TANH = 1, /* basic RNN cell type with tanh activation */ + CUDNN_LSTM = 2, /* LSTM with optional recurrent projection and clipping */ + CUDNN_GRU = 3, /* Using h' = tanh(r * Uh(t-1) + Wx) and h = (1 - z) * h' + z * h(t-1); */ +} cudnnRNNMode_t; + +typedef enum { + CUDNN_RNN_NO_BIAS = 0, /* rnn cell formulas do not use biases */ + CUDNN_RNN_SINGLE_INP_BIAS = 1, /* rnn cell formulas use one input bias in input GEMM */ + CUDNN_RNN_DOUBLE_BIAS = 2, /* default, rnn cell formulas use two bias vectors */ + CUDNN_RNN_SINGLE_REC_BIAS = 3 /* rnn cell formulas use one recurrent bias in recurrent GEMM */ +} cudnnRNNBiasMode_t; + +typedef enum { + CUDNN_UNIDIRECTIONAL = 0, /* single direction network */ + CUDNN_BIDIRECTIONAL = 1, /* output concatination at each layer */ +} cudnnDirectionMode_t; + +typedef enum { + CUDNN_LINEAR_INPUT = 0, /* adjustable weight matrix in first layer input GEMM */ + CUDNN_SKIP_INPUT = 1, /* fixed identity matrix in the first layer input GEMM */ +} cudnnRNNInputMode_t; + +typedef enum { + CUDNN_RNN_CLIP_NONE = 0, /* disables LSTM cell clipping */ + CUDNN_RNN_CLIP_MINMAX = 1, /* enables LSTM cell clipping */ +} cudnnRNNClipMode_t; + +typedef enum { + CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_UNPACKED = 0, /* padded, outer stride from one time-step to the next */ + CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_PACKED = 1, /* sequence length sorted and packed as in basic RNN api */ + CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED = 2, /* padded, outer stride from one batch to the next */ +} cudnnRNNDataLayout_t; + +/* For auxFlags in cudnnSetRNNDescriptor_v8() */ +#define CUDNN_RNN_PADDED_IO_DISABLED 0 +#define CUDNN_RNN_PADDED_IO_ENABLED (1U << 0) + +struct cudnnRNNStruct; +typedef struct cudnnRNNStruct *cudnnRNNDescriptor_t; + +struct cudnnRNNDataStruct; +typedef struct cudnnRNNDataStruct *cudnnRNNDataDescriptor_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateRNNDescriptor(cudnnRNNDescriptor_t *rnnDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyRNNDescriptor(cudnnRNNDescriptor_t rnnDesc); + +/* + * mathPrec in cudnnSetRNNDescriptor_v8() specifies compute precision. + * Compute precision is further modified by mathType that sets the + * preferred option for using NVIDIA Tensor Cores. dataType specify + * input/output data type and weight/bias type. + */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNAlgo_t algo, + cudnnRNNMode_t cellMode, + cudnnRNNBiasMode_t biasMode, + cudnnDirectionMode_t dirMode, + cudnnRNNInputMode_t inputMode, + cudnnDataType_t dataType, + cudnnDataType_t mathPrec, + cudnnMathType_t mathType, + int32_t inputSize, + int32_t hiddenSize, + int32_t projSize, + int32_t numLayers, + cudnnDropoutDescriptor_t dropoutDesc, + uint32_t auxFlags); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNAlgo_t *algo, + cudnnRNNMode_t *cellMode, + cudnnRNNBiasMode_t *biasMode, + cudnnDirectionMode_t *dirMode, + cudnnRNNInputMode_t *inputMode, + cudnnDataType_t *dataType, + cudnnDataType_t *mathPrec, + cudnnMathType_t *mathType, + int32_t *inputSize, + int32_t *hiddenSize, + int32_t *projSize, + int32_t *numLayers, + cudnnDropoutDescriptor_t *dropoutDesc, + uint32_t *auxFlags); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNSetClip_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNClipMode_t clipMode, + cudnnNanPropagation_t clipNanOpt, + double lclip, + double rclip); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNSetClip_v9(cudnnRNNDescriptor_t rnnDesc, cudnnRNNClipMode_t clipMode, double lclip, double rclip); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNGetClip_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNClipMode_t *clipMode, + cudnnNanPropagation_t *clipNanOpt, + double *lclip, + double *rclip); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNGetClip_v9(cudnnRNNDescriptor_t rnnDesc, cudnnRNNClipMode_t *clipMode, double *lclip, double *rclip); + +cudnnStatus_t CUDNNWINAPI +cudnnBuildRNNDynamic(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, int miniBatch); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNTempSpaceSizes(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnForwardMode_t fwdMode, + cudnnRNNDataDescriptor_t xDesc, + size_t *workSpaceSize, + size_t *reserveSpaceSize); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNWeightSpaceSize(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, size_t *weightSpaceSize); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNWeightParams(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + int32_t pseudoLayer, + size_t weightSpaceSize, + const void *weightSpace, + int32_t linLayerID, + cudnnTensorDescriptor_t mDesc, + void **mAddr, + cudnnTensorDescriptor_t bDesc, + void **bAddr); + +cudnnStatus_t CUDNNWINAPI +cudnnCreateRNNDataDescriptor(cudnnRNNDataDescriptor_t *rnnDataDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc, + cudnnDataType_t dataType, + cudnnRNNDataLayout_t layout, + int maxSeqLength, + int batchSize, + int vectorSize, + const int seqLengthArray[], /* length of each sequence in the batch */ + void *paddingFill); /* symbol for filling padding position in output */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc, + cudnnDataType_t *dataType, + cudnnRNNDataLayout_t *layout, + int *maxSeqLength, + int *batchSize, + int *vectorSize, + int arrayLengthRequested, + int seqLengthArray[], + void *paddingFill); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNForward(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnForwardMode_t fwdMode, + const int32_t devSeqLengths[], + cudnnRNNDataDescriptor_t xDesc, + const void *x, + cudnnRNNDataDescriptor_t yDesc, + void *y, + cudnnTensorDescriptor_t hDesc, + const void *hx, + void *hy, + cudnnTensorDescriptor_t cDesc, + const void *cx, + void *cy, + size_t weightSpaceSize, + const void *weightSpace, + size_t workSpaceSize, + void *workSpace, + size_t reserveSpaceSize, + void *reserveSpace); + +/* Sequence data descriptor */ + +typedef enum { + CUDNN_SEQDATA_TIME_DIM = 0, /* index in time */ + CUDNN_SEQDATA_BATCH_DIM = 1, /* index in batch */ + CUDNN_SEQDATA_BEAM_DIM = 2, /* index in beam */ + CUDNN_SEQDATA_VECT_DIM = 3 /* index in vector */ +} cudnnSeqDataAxis_t; + +struct cudnnSeqDataStruct; +typedef struct cudnnSeqDataStruct *cudnnSeqDataDescriptor_t CUDNN_DEPRECATED; + +#define CUDNN_SEQDATA_DIM_COUNT 4 /* dimension count */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateSeqDataDescriptor(cudnnSeqDataDescriptor_t *seqDataDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroySeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetSeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc, + cudnnDataType_t dataType, + int nbDims, + const int dimA[], + const cudnnSeqDataAxis_t axes[], + size_t seqLengthArraySize, + const int seqLengthArray[], + void *paddingFill); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetSeqDataDescriptor(const cudnnSeqDataDescriptor_t seqDataDesc, + cudnnDataType_t *dataType, + int *nbDims, + int nbDimsRequested, + int dimA[], + cudnnSeqDataAxis_t axes[], + size_t *seqLengthArraySize, + size_t seqLengthSizeRequested, + int seqLengthArray[], + void *paddingFill); + +/* Multihead Attention */ + +/* + * Multi-head attention options passed via 'attnMode' in cudnnSetAttnDescriptor(). + * Use the bitwise OR operator to combine several settings listed below. Additional + * minor options can be added here w/o changing or introducing new API functions. + */ +#define CUDNN_ATTN_QUERYMAP_ALL_TO_ONE 0 /* multiple Q-s map to a single (K,V) set when beam size > 1 */ +#define CUDNN_ATTN_QUERYMAP_ONE_TO_ONE (1U << 0) /* multiple Q-s map to multiple (K,V) sets when beam size > 1 */ +#define CUDNN_ATTN_DISABLE_PROJ_BIASES 0 /* no biases in attention input and output projections */ +#define CUDNN_ATTN_ENABLE_PROJ_BIASES (1U << 1) /* use biases in attention input and output projections */ + +struct cudnnAttnStruct; +typedef struct cudnnAttnStruct *cudnnAttnDescriptor_t CUDNN_DEPRECATED; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateAttnDescriptor(cudnnAttnDescriptor_t *attnDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyAttnDescriptor(cudnnAttnDescriptor_t attnDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetAttnDescriptor(cudnnAttnDescriptor_t attnDesc, + unsigned attnMode, + int nHeads, + double smScaler, + cudnnDataType_t dataType, + cudnnDataType_t computePrec, + cudnnMathType_t mathType, + cudnnDropoutDescriptor_t attnDropoutDesc, + cudnnDropoutDescriptor_t postDropoutDesc, + int qSize, + int kSize, + int vSize, + int qProjSize, + int kProjSize, + int vProjSize, + int oProjSize, + int qoMaxSeqLength, + int kvMaxSeqLength, + int maxBatchSize, + int maxBeamSize); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetAttnDescriptor(cudnnAttnDescriptor_t attnDesc, + unsigned *attnMode, + int *nHeads, + double *smScaler, + cudnnDataType_t *dataType, + cudnnDataType_t *computePrec, + cudnnMathType_t *mathType, + cudnnDropoutDescriptor_t *attnDropoutDesc, + cudnnDropoutDescriptor_t *postDropoutDesc, + int *qSize, + int *kSize, + int *vSize, + int *qProjSize, + int *kProjSize, + int *vProjSize, + int *oProjSize, + int *qoMaxSeqLength, + int *kvMaxSeqLength, + int *maxBatchSize, + int *maxBeamSize); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetMultiHeadAttnBuffers(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + size_t *weightSizeInBytes, + size_t *workSpaceSizeInBytes, + size_t *reserveSpaceSizeInBytes); + +typedef enum { + CUDNN_MH_ATTN_Q_WEIGHTS = 0, /* input projection weights for 'queries' */ + CUDNN_MH_ATTN_K_WEIGHTS = 1, /* input projection weights for 'keys' */ + CUDNN_MH_ATTN_V_WEIGHTS = 2, /* input projection weights for 'values' */ + CUDNN_MH_ATTN_O_WEIGHTS = 3, /* output projection weights */ + CUDNN_MH_ATTN_Q_BIASES = 4, /* input projection bias tensor for 'queries' */ + CUDNN_MH_ATTN_K_BIASES = 5, /* input projection bias for 'keys' */ + CUDNN_MH_ATTN_V_BIASES = 6, /* input projection bias for 'values' */ + CUDNN_MH_ATTN_O_BIASES = 7, /* output projection biases */ +} cudnnMultiHeadAttnWeightKind_t; + +#define CUDNN_ATTN_WKIND_COUNT 8 /* Number of attention weight/bias tensors */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetMultiHeadAttnWeights(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + cudnnMultiHeadAttnWeightKind_t wKind, + size_t weightSizeInBytes, + const void *weights, + cudnnTensorDescriptor_t wDesc, + void **wAddr); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnMultiHeadAttnForward(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + int currIdx, + const int loWinIdx[], + const int hiWinIdx[], + const int devSeqLengthsQO[], + const int devSeqLengthsKV[], + const cudnnSeqDataDescriptor_t qDesc, + const void *queries, + const void *residuals, + const cudnnSeqDataDescriptor_t kDesc, + const void *keys, + const cudnnSeqDataDescriptor_t vDesc, + const void *values, + const cudnnSeqDataDescriptor_t oDesc, + void *out, + size_t weightSizeInBytes, + const void *weights, + size_t workSpaceSizeInBytes, + void *workSpace, + size_t reserveSpaceSizeInBytes, + void *reserveSpace); + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_SUBLIBRARY_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnAdvVersionCheck(void); + +typedef enum { + CUDNN_WGRAD_MODE_ADD = 0, /* add partial gradients to wgrad output buffers */ + CUDNN_WGRAD_MODE_SET = 1, /* write partial gradients to wgrad output buffers */ +} cudnnWgradMode_t; + +cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardData_v8(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + const int32_t devSeqLengths[], + cudnnRNNDataDescriptor_t yDesc, + const void *y, + const void *dy, + cudnnRNNDataDescriptor_t xDesc, + void *dx, + cudnnTensorDescriptor_t hDesc, + const void *hx, + const void *dhy, + void *dhx, + cudnnTensorDescriptor_t cDesc, + const void *cx, + const void *dcy, + void *dcx, + size_t weightSpaceSize, + const void *weightSpace, + size_t workSpaceSize, + void *workSpace, + size_t reserveSpaceSize, + void *reserveSpace); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardWeights_v8(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnWgradMode_t addGrad, + const int32_t devSeqLengths[], + cudnnRNNDataDescriptor_t xDesc, + const void *x, + cudnnTensorDescriptor_t hDesc, + const void *hx, + cudnnRNNDataDescriptor_t yDesc, + const void *y, + size_t weightSpaceSize, + void *dweightSpace, + size_t workSpaceSize, + void *workSpace, + size_t reserveSpaceSize, + void *reserveSpace); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnMultiHeadAttnBackwardData(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + const int loWinIdx[], + const int hiWinIdx[], + const int devSeqLengthsDQDO[], + const int devSeqLengthsDKDV[], + const cudnnSeqDataDescriptor_t doDesc, + const void *dout, + const cudnnSeqDataDescriptor_t dqDesc, + void *dqueries, + const void *queries, + const cudnnSeqDataDescriptor_t dkDesc, + void *dkeys, + const void *keys, + const cudnnSeqDataDescriptor_t dvDesc, + void *dvalues, + const void *values, + size_t weightSizeInBytes, + const void *weights, + size_t workSpaceSizeInBytes, + void *workSpace, + size_t reserveSpaceSizeInBytes, + void *reserveSpace); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnMultiHeadAttnBackwardWeights(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + cudnnWgradMode_t addGrad, + const cudnnSeqDataDescriptor_t qDesc, + const void *queries, + const cudnnSeqDataDescriptor_t kDesc, + const void *keys, + const cudnnSeqDataDescriptor_t vDesc, + const void *values, + const cudnnSeqDataDescriptor_t doDesc, + const void *dout, + size_t weightSizeInBytes, + const void *weights, + void *dweights, + size_t workSpaceSizeInBytes, + void *workSpace, + size_t reserveSpaceSizeInBytes, + void *reserveSpace); + +/* + * CTC (Connectionist Temporal Classification) loss descriptor create/destory/set/get functions + */ +/* Input normalization mode for loss function */ +typedef enum { + CUDNN_LOSS_NORMALIZATION_NONE = 0, + CUDNN_LOSS_NORMALIZATION_SOFTMAX = 1, +} cudnnLossNormalizationMode_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateCTCLossDescriptor(cudnnCTCLossDescriptor_t *ctcLossDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t compType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t compType, + cudnnLossNormalizationMode_t normMode, + cudnnNanPropagation_t gradMode); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t compType, + cudnnLossNormalizationMode_t normMode, + cudnnNanPropagation_t gradMode, + int maxLabelLength); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptor_v9(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t compType, + cudnnLossNormalizationMode_t normMode, + cudnnCTCGradMode_t ctcGradMode, + int maxLabelLength); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t *compType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t *compType, + cudnnLossNormalizationMode_t *normMode, + cudnnNanPropagation_t *gradMode); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t *compType, + cudnnLossNormalizationMode_t *normMode, + cudnnNanPropagation_t *gradMode, + int *maxLabelLength); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptor_v9(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t *compType, + cudnnLossNormalizationMode_t *normMode, + cudnnCTCGradMode_t *ctcGradMode, + int *maxLabelLength); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc); + +/* return the ctc costs and gradients, given the probabilities and labels */ +cudnnStatus_t CUDNNWINAPI +cudnnCTCLoss( + cudnnHandle_t handle, + const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the + timing steps, N is the mini batch size, A is the alphabet size) */ + const void *probs, /* probabilities after softmax, in GPU memory */ + const int hostLabels[], /* labels, in CPU memory */ + const int hostLabelLengths[], /* the length of each label, in CPU memory */ + const int hostInputLengths[], /* the lengths of timing steps in each batch, in CPU memory */ + void *costs, /* the returned costs of CTC, in GPU memory */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */ + void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */ + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + void *workspace, /* pointer to the workspace, in GPU memory */ + size_t workSpaceSizeInBytes); /* size of the workspace */ + +/* return the ctc costs and gradients, given the probabilities and labels */ +cudnnStatus_t CUDNNWINAPI +cudnnCTCLoss_v8( + cudnnHandle_t handle, + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the + timing steps, N is the mini batch size, A is the alphabet size) */ + const void *probs, /* probabilities after softmax, in GPU memory */ + const int labels[], /* labels, in GPU memory */ + const int labelLengths[], /* the length of each label, in GPU memory */ + const int inputLengths[], /* the lengths of timing steps in each batch, in GPU memory */ + void *costs, /* the returned costs of CTC, in GPU memory */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */ + void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */ + size_t workSpaceSizeInBytes, /* size of the workspace */ + void *workspace); /* pointer to the workspace, in GPU memory */ + +/* return the workspace size needed for ctc */ +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossWorkspaceSize( + cudnnHandle_t handle, + const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the + timing steps, N is the mini batch size, A is the alphabet size) */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the + dimensions are T,N,A. To compute costs + only, set it to NULL */ + const int *labels, /* labels, in CPU memory */ + const int *labelLengths, /* the length of each label, in CPU memory */ + const int *inputLengths, /* the lengths of timing steps in each batch, in CPU memory */ + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + size_t *sizeInBytes); /* pointer to the returned workspace size */ + +/* return the workspace size needed for ctc */ +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossWorkspaceSize_v8( + cudnnHandle_t handle, + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the + timing steps, N is the mini batch size, A is the alphabet size) */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the + dimensions are T,N,A. To compute costs + only, set it to NULL */ + size_t *sizeInBytes); /* pointer to the returned workspace size */ + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_ADV_H_ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_adv_v9.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_adv_v9.h new file mode 100644 index 0000000000000000000000000000000000000000..350522a8f8545725e23114378c1ffd43a559c672 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_adv_v9.h @@ -0,0 +1,669 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* cudnn_adv : cuDNN's advanced and experimental features. + +*/ + +#if !defined(CUDNN_ADV_H_) +#define CUDNN_ADV_H_ + +#include + +#include "cudnn_version.h" +#include "cudnn_ops.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_ADV_MAJOR 9 +#define CUDNN_ADV_MINOR 5 +#define CUDNN_ADV_PATCH 1 + +#if (CUDNN_ADV_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_MINOR != CUDNN_MINOR) || (CUDNN_ADV_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN ADV INFER!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* BASIC RNN API */ + +typedef enum { + CUDNN_RNN_ALGO_STANDARD = 0, + CUDNN_RNN_ALGO_PERSIST_STATIC = 1, + CUDNN_RNN_ALGO_PERSIST_DYNAMIC = 2, + CUDNN_RNN_ALGO_PERSIST_STATIC_SMALL_H = 3, + CUDNN_RNN_ALGO_COUNT = 4, +} cudnnRNNAlgo_t; + +typedef enum { + CUDNN_FWD_MODE_INFERENCE = 0, + CUDNN_FWD_MODE_TRAINING = 1, +} cudnnForwardMode_t; + +typedef enum { + CUDNN_RNN_RELU = 0, /* basic RNN cell type with ReLu activation */ + CUDNN_RNN_TANH = 1, /* basic RNN cell type with tanh activation */ + CUDNN_LSTM = 2, /* LSTM with optional recurrent projection and clipping */ + CUDNN_GRU = 3, /* Using h' = tanh(r * Uh(t-1) + Wx) and h = (1 - z) * h' + z * h(t-1); */ +} cudnnRNNMode_t; + +typedef enum { + CUDNN_RNN_NO_BIAS = 0, /* rnn cell formulas do not use biases */ + CUDNN_RNN_SINGLE_INP_BIAS = 1, /* rnn cell formulas use one input bias in input GEMM */ + CUDNN_RNN_DOUBLE_BIAS = 2, /* default, rnn cell formulas use two bias vectors */ + CUDNN_RNN_SINGLE_REC_BIAS = 3 /* rnn cell formulas use one recurrent bias in recurrent GEMM */ +} cudnnRNNBiasMode_t; + +typedef enum { + CUDNN_UNIDIRECTIONAL = 0, /* single direction network */ + CUDNN_BIDIRECTIONAL = 1, /* output concatination at each layer */ +} cudnnDirectionMode_t; + +typedef enum { + CUDNN_LINEAR_INPUT = 0, /* adjustable weight matrix in first layer input GEMM */ + CUDNN_SKIP_INPUT = 1, /* fixed identity matrix in the first layer input GEMM */ +} cudnnRNNInputMode_t; + +typedef enum { + CUDNN_RNN_CLIP_NONE = 0, /* disables LSTM cell clipping */ + CUDNN_RNN_CLIP_MINMAX = 1, /* enables LSTM cell clipping */ +} cudnnRNNClipMode_t; + +typedef enum { + CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_UNPACKED = 0, /* padded, outer stride from one time-step to the next */ + CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_PACKED = 1, /* sequence length sorted and packed as in basic RNN api */ + CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED = 2, /* padded, outer stride from one batch to the next */ +} cudnnRNNDataLayout_t; + +/* For auxFlags in cudnnSetRNNDescriptor_v8() */ +#define CUDNN_RNN_PADDED_IO_DISABLED 0 +#define CUDNN_RNN_PADDED_IO_ENABLED (1U << 0) + +struct cudnnRNNStruct; +typedef struct cudnnRNNStruct *cudnnRNNDescriptor_t; + +struct cudnnRNNDataStruct; +typedef struct cudnnRNNDataStruct *cudnnRNNDataDescriptor_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateRNNDescriptor(cudnnRNNDescriptor_t *rnnDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyRNNDescriptor(cudnnRNNDescriptor_t rnnDesc); + +/* + * mathPrec in cudnnSetRNNDescriptor_v8() specifies compute precision. + * Compute precision is further modified by mathType that sets the + * preferred option for using NVIDIA Tensor Cores. dataType specify + * input/output data type and weight/bias type. + */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNAlgo_t algo, + cudnnRNNMode_t cellMode, + cudnnRNNBiasMode_t biasMode, + cudnnDirectionMode_t dirMode, + cudnnRNNInputMode_t inputMode, + cudnnDataType_t dataType, + cudnnDataType_t mathPrec, + cudnnMathType_t mathType, + int32_t inputSize, + int32_t hiddenSize, + int32_t projSize, + int32_t numLayers, + cudnnDropoutDescriptor_t dropoutDesc, + uint32_t auxFlags); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNAlgo_t *algo, + cudnnRNNMode_t *cellMode, + cudnnRNNBiasMode_t *biasMode, + cudnnDirectionMode_t *dirMode, + cudnnRNNInputMode_t *inputMode, + cudnnDataType_t *dataType, + cudnnDataType_t *mathPrec, + cudnnMathType_t *mathType, + int32_t *inputSize, + int32_t *hiddenSize, + int32_t *projSize, + int32_t *numLayers, + cudnnDropoutDescriptor_t *dropoutDesc, + uint32_t *auxFlags); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNSetClip_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNClipMode_t clipMode, + cudnnNanPropagation_t clipNanOpt, + double lclip, + double rclip); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNSetClip_v9(cudnnRNNDescriptor_t rnnDesc, cudnnRNNClipMode_t clipMode, double lclip, double rclip); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNGetClip_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNClipMode_t *clipMode, + cudnnNanPropagation_t *clipNanOpt, + double *lclip, + double *rclip); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNGetClip_v9(cudnnRNNDescriptor_t rnnDesc, cudnnRNNClipMode_t *clipMode, double *lclip, double *rclip); + +cudnnStatus_t CUDNNWINAPI +cudnnBuildRNNDynamic(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, int miniBatch); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNTempSpaceSizes(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnForwardMode_t fwdMode, + cudnnRNNDataDescriptor_t xDesc, + size_t *workSpaceSize, + size_t *reserveSpaceSize); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNWeightSpaceSize(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, size_t *weightSpaceSize); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNWeightParams(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + int32_t pseudoLayer, + size_t weightSpaceSize, + const void *weightSpace, + int32_t linLayerID, + cudnnTensorDescriptor_t mDesc, + void **mAddr, + cudnnTensorDescriptor_t bDesc, + void **bAddr); + +cudnnStatus_t CUDNNWINAPI +cudnnCreateRNNDataDescriptor(cudnnRNNDataDescriptor_t *rnnDataDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc, + cudnnDataType_t dataType, + cudnnRNNDataLayout_t layout, + int maxSeqLength, + int batchSize, + int vectorSize, + const int seqLengthArray[], /* length of each sequence in the batch */ + void *paddingFill); /* symbol for filling padding position in output */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc, + cudnnDataType_t *dataType, + cudnnRNNDataLayout_t *layout, + int *maxSeqLength, + int *batchSize, + int *vectorSize, + int arrayLengthRequested, + int seqLengthArray[], + void *paddingFill); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNForward(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnForwardMode_t fwdMode, + const int32_t devSeqLengths[], + cudnnRNNDataDescriptor_t xDesc, + const void *x, + cudnnRNNDataDescriptor_t yDesc, + void *y, + cudnnTensorDescriptor_t hDesc, + const void *hx, + void *hy, + cudnnTensorDescriptor_t cDesc, + const void *cx, + void *cy, + size_t weightSpaceSize, + const void *weightSpace, + size_t workSpaceSize, + void *workSpace, + size_t reserveSpaceSize, + void *reserveSpace); + +/* Sequence data descriptor */ + +typedef enum { + CUDNN_SEQDATA_TIME_DIM = 0, /* index in time */ + CUDNN_SEQDATA_BATCH_DIM = 1, /* index in batch */ + CUDNN_SEQDATA_BEAM_DIM = 2, /* index in beam */ + CUDNN_SEQDATA_VECT_DIM = 3 /* index in vector */ +} cudnnSeqDataAxis_t; + +struct cudnnSeqDataStruct; +typedef struct cudnnSeqDataStruct *cudnnSeqDataDescriptor_t CUDNN_DEPRECATED; + +#define CUDNN_SEQDATA_DIM_COUNT 4 /* dimension count */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateSeqDataDescriptor(cudnnSeqDataDescriptor_t *seqDataDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroySeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetSeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc, + cudnnDataType_t dataType, + int nbDims, + const int dimA[], + const cudnnSeqDataAxis_t axes[], + size_t seqLengthArraySize, + const int seqLengthArray[], + void *paddingFill); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetSeqDataDescriptor(const cudnnSeqDataDescriptor_t seqDataDesc, + cudnnDataType_t *dataType, + int *nbDims, + int nbDimsRequested, + int dimA[], + cudnnSeqDataAxis_t axes[], + size_t *seqLengthArraySize, + size_t seqLengthSizeRequested, + int seqLengthArray[], + void *paddingFill); + +/* Multihead Attention */ + +/* + * Multi-head attention options passed via 'attnMode' in cudnnSetAttnDescriptor(). + * Use the bitwise OR operator to combine several settings listed below. Additional + * minor options can be added here w/o changing or introducing new API functions. + */ +#define CUDNN_ATTN_QUERYMAP_ALL_TO_ONE 0 /* multiple Q-s map to a single (K,V) set when beam size > 1 */ +#define CUDNN_ATTN_QUERYMAP_ONE_TO_ONE (1U << 0) /* multiple Q-s map to multiple (K,V) sets when beam size > 1 */ +#define CUDNN_ATTN_DISABLE_PROJ_BIASES 0 /* no biases in attention input and output projections */ +#define CUDNN_ATTN_ENABLE_PROJ_BIASES (1U << 1) /* use biases in attention input and output projections */ + +struct cudnnAttnStruct; +typedef struct cudnnAttnStruct *cudnnAttnDescriptor_t CUDNN_DEPRECATED; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateAttnDescriptor(cudnnAttnDescriptor_t *attnDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyAttnDescriptor(cudnnAttnDescriptor_t attnDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetAttnDescriptor(cudnnAttnDescriptor_t attnDesc, + unsigned attnMode, + int nHeads, + double smScaler, + cudnnDataType_t dataType, + cudnnDataType_t computePrec, + cudnnMathType_t mathType, + cudnnDropoutDescriptor_t attnDropoutDesc, + cudnnDropoutDescriptor_t postDropoutDesc, + int qSize, + int kSize, + int vSize, + int qProjSize, + int kProjSize, + int vProjSize, + int oProjSize, + int qoMaxSeqLength, + int kvMaxSeqLength, + int maxBatchSize, + int maxBeamSize); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetAttnDescriptor(cudnnAttnDescriptor_t attnDesc, + unsigned *attnMode, + int *nHeads, + double *smScaler, + cudnnDataType_t *dataType, + cudnnDataType_t *computePrec, + cudnnMathType_t *mathType, + cudnnDropoutDescriptor_t *attnDropoutDesc, + cudnnDropoutDescriptor_t *postDropoutDesc, + int *qSize, + int *kSize, + int *vSize, + int *qProjSize, + int *kProjSize, + int *vProjSize, + int *oProjSize, + int *qoMaxSeqLength, + int *kvMaxSeqLength, + int *maxBatchSize, + int *maxBeamSize); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetMultiHeadAttnBuffers(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + size_t *weightSizeInBytes, + size_t *workSpaceSizeInBytes, + size_t *reserveSpaceSizeInBytes); + +typedef enum { + CUDNN_MH_ATTN_Q_WEIGHTS = 0, /* input projection weights for 'queries' */ + CUDNN_MH_ATTN_K_WEIGHTS = 1, /* input projection weights for 'keys' */ + CUDNN_MH_ATTN_V_WEIGHTS = 2, /* input projection weights for 'values' */ + CUDNN_MH_ATTN_O_WEIGHTS = 3, /* output projection weights */ + CUDNN_MH_ATTN_Q_BIASES = 4, /* input projection bias tensor for 'queries' */ + CUDNN_MH_ATTN_K_BIASES = 5, /* input projection bias for 'keys' */ + CUDNN_MH_ATTN_V_BIASES = 6, /* input projection bias for 'values' */ + CUDNN_MH_ATTN_O_BIASES = 7, /* output projection biases */ +} cudnnMultiHeadAttnWeightKind_t; + +#define CUDNN_ATTN_WKIND_COUNT 8 /* Number of attention weight/bias tensors */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetMultiHeadAttnWeights(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + cudnnMultiHeadAttnWeightKind_t wKind, + size_t weightSizeInBytes, + const void *weights, + cudnnTensorDescriptor_t wDesc, + void **wAddr); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnMultiHeadAttnForward(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + int currIdx, + const int loWinIdx[], + const int hiWinIdx[], + const int devSeqLengthsQO[], + const int devSeqLengthsKV[], + const cudnnSeqDataDescriptor_t qDesc, + const void *queries, + const void *residuals, + const cudnnSeqDataDescriptor_t kDesc, + const void *keys, + const cudnnSeqDataDescriptor_t vDesc, + const void *values, + const cudnnSeqDataDescriptor_t oDesc, + void *out, + size_t weightSizeInBytes, + const void *weights, + size_t workSpaceSizeInBytes, + void *workSpace, + size_t reserveSpaceSizeInBytes, + void *reserveSpace); + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_SUBLIBRARY_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnAdvVersionCheck(void); + +typedef enum { + CUDNN_WGRAD_MODE_ADD = 0, /* add partial gradients to wgrad output buffers */ + CUDNN_WGRAD_MODE_SET = 1, /* write partial gradients to wgrad output buffers */ +} cudnnWgradMode_t; + +cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardData_v8(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + const int32_t devSeqLengths[], + cudnnRNNDataDescriptor_t yDesc, + const void *y, + const void *dy, + cudnnRNNDataDescriptor_t xDesc, + void *dx, + cudnnTensorDescriptor_t hDesc, + const void *hx, + const void *dhy, + void *dhx, + cudnnTensorDescriptor_t cDesc, + const void *cx, + const void *dcy, + void *dcx, + size_t weightSpaceSize, + const void *weightSpace, + size_t workSpaceSize, + void *workSpace, + size_t reserveSpaceSize, + void *reserveSpace); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardWeights_v8(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnWgradMode_t addGrad, + const int32_t devSeqLengths[], + cudnnRNNDataDescriptor_t xDesc, + const void *x, + cudnnTensorDescriptor_t hDesc, + const void *hx, + cudnnRNNDataDescriptor_t yDesc, + const void *y, + size_t weightSpaceSize, + void *dweightSpace, + size_t workSpaceSize, + void *workSpace, + size_t reserveSpaceSize, + void *reserveSpace); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnMultiHeadAttnBackwardData(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + const int loWinIdx[], + const int hiWinIdx[], + const int devSeqLengthsDQDO[], + const int devSeqLengthsDKDV[], + const cudnnSeqDataDescriptor_t doDesc, + const void *dout, + const cudnnSeqDataDescriptor_t dqDesc, + void *dqueries, + const void *queries, + const cudnnSeqDataDescriptor_t dkDesc, + void *dkeys, + const void *keys, + const cudnnSeqDataDescriptor_t dvDesc, + void *dvalues, + const void *values, + size_t weightSizeInBytes, + const void *weights, + size_t workSpaceSizeInBytes, + void *workSpace, + size_t reserveSpaceSizeInBytes, + void *reserveSpace); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnMultiHeadAttnBackwardWeights(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + cudnnWgradMode_t addGrad, + const cudnnSeqDataDescriptor_t qDesc, + const void *queries, + const cudnnSeqDataDescriptor_t kDesc, + const void *keys, + const cudnnSeqDataDescriptor_t vDesc, + const void *values, + const cudnnSeqDataDescriptor_t doDesc, + const void *dout, + size_t weightSizeInBytes, + const void *weights, + void *dweights, + size_t workSpaceSizeInBytes, + void *workSpace, + size_t reserveSpaceSizeInBytes, + void *reserveSpace); + +/* + * CTC (Connectionist Temporal Classification) loss descriptor create/destory/set/get functions + */ +/* Input normalization mode for loss function */ +typedef enum { + CUDNN_LOSS_NORMALIZATION_NONE = 0, + CUDNN_LOSS_NORMALIZATION_SOFTMAX = 1, +} cudnnLossNormalizationMode_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateCTCLossDescriptor(cudnnCTCLossDescriptor_t *ctcLossDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t compType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t compType, + cudnnLossNormalizationMode_t normMode, + cudnnNanPropagation_t gradMode); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t compType, + cudnnLossNormalizationMode_t normMode, + cudnnNanPropagation_t gradMode, + int maxLabelLength); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptor_v9(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t compType, + cudnnLossNormalizationMode_t normMode, + cudnnCTCGradMode_t ctcGradMode, + int maxLabelLength); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t *compType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t *compType, + cudnnLossNormalizationMode_t *normMode, + cudnnNanPropagation_t *gradMode); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t *compType, + cudnnLossNormalizationMode_t *normMode, + cudnnNanPropagation_t *gradMode, + int *maxLabelLength); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptor_v9(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t *compType, + cudnnLossNormalizationMode_t *normMode, + cudnnCTCGradMode_t *ctcGradMode, + int *maxLabelLength); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc); + +/* return the ctc costs and gradients, given the probabilities and labels */ +cudnnStatus_t CUDNNWINAPI +cudnnCTCLoss( + cudnnHandle_t handle, + const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the + timing steps, N is the mini batch size, A is the alphabet size) */ + const void *probs, /* probabilities after softmax, in GPU memory */ + const int hostLabels[], /* labels, in CPU memory */ + const int hostLabelLengths[], /* the length of each label, in CPU memory */ + const int hostInputLengths[], /* the lengths of timing steps in each batch, in CPU memory */ + void *costs, /* the returned costs of CTC, in GPU memory */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */ + void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */ + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + void *workspace, /* pointer to the workspace, in GPU memory */ + size_t workSpaceSizeInBytes); /* size of the workspace */ + +/* return the ctc costs and gradients, given the probabilities and labels */ +cudnnStatus_t CUDNNWINAPI +cudnnCTCLoss_v8( + cudnnHandle_t handle, + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the + timing steps, N is the mini batch size, A is the alphabet size) */ + const void *probs, /* probabilities after softmax, in GPU memory */ + const int labels[], /* labels, in GPU memory */ + const int labelLengths[], /* the length of each label, in GPU memory */ + const int inputLengths[], /* the lengths of timing steps in each batch, in GPU memory */ + void *costs, /* the returned costs of CTC, in GPU memory */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */ + void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */ + size_t workSpaceSizeInBytes, /* size of the workspace */ + void *workspace); /* pointer to the workspace, in GPU memory */ + +/* return the workspace size needed for ctc */ +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossWorkspaceSize( + cudnnHandle_t handle, + const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the + timing steps, N is the mini batch size, A is the alphabet size) */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the + dimensions are T,N,A. To compute costs + only, set it to NULL */ + const int *labels, /* labels, in CPU memory */ + const int *labelLengths, /* the length of each label, in CPU memory */ + const int *inputLengths, /* the lengths of timing steps in each batch, in CPU memory */ + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + size_t *sizeInBytes); /* pointer to the returned workspace size */ + +/* return the workspace size needed for ctc */ +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossWorkspaceSize_v8( + cudnnHandle_t handle, + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the + timing steps, N is the mini batch size, A is the alphabet size) */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the + dimensions are T,N,A. To compute costs + only, set it to NULL */ + size_t *sizeInBytes); /* pointer to the returned workspace size */ + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_ADV_H_ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_backend.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_backend.h new file mode 100644 index 0000000000000000000000000000000000000000..5a378e2087f7a45c423f65d213d98c4fa20f3a52 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_backend.h @@ -0,0 +1,60 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDNN_BACKEND_H_ +#define _CUDNN_BACKEND_H_ + +/* + * The content of this header has been moved into cudnn_graph.h. + * This header is kept for the backward compatibility purpose. + */ + +#include "cudnn_graph.h" + +#endif /* _CUDNN_BACKEND_H_ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_backend_v9.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_backend_v9.h new file mode 100644 index 0000000000000000000000000000000000000000..5a378e2087f7a45c423f65d213d98c4fa20f3a52 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_backend_v9.h @@ -0,0 +1,60 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDNN_BACKEND_H_ +#define _CUDNN_BACKEND_H_ + +/* + * The content of this header has been moved into cudnn_graph.h. + * This header is kept for the backward compatibility purpose. + */ + +#include "cudnn_graph.h" + +#endif /* _CUDNN_BACKEND_H_ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_cnn.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_cnn.h new file mode 100644 index 0000000000000000000000000000000000000000..3cfb9719ece52151e54b4c3d455ebe38a8f088fe --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_cnn.h @@ -0,0 +1,693 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_cnn : cuDNN's basic definitions and CNN functions. + */ + +#if !defined(CUDNN_CNN_H_) +#define CUDNN_CNN_H_ + +#pragma once +#include + +#include "cudnn_version.h" +#include "cudnn_ops.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_CNN_MAJOR 9 +#define CUDNN_CNN_MINOR 5 +#define CUDNN_CNN_PATCH 1 + +#if (CUDNN_CNN_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_MINOR != CUDNN_MINOR) || (CUDNN_CNN_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN CNN INFER!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +typedef struct cudnnConvolutionStruct *cudnnConvolutionDescriptor_t CUDNN_DEPRECATED; + +typedef struct cudnnConvolutionFwdAlgoPerfStruct { + cudnnConvolutionFwdAlgo_t algo; + cudnnStatus_t status; + float time; + size_t memory; + cudnnDeterminism_t determinism; + cudnnMathType_t mathType; + int reserved[3]; +} cudnnConvolutionFwdAlgoPerf_t CUDNN_DEPRECATED; + +/* Create an instance of convolution descriptor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateConvolutionDescriptor(cudnnConvolutionDescriptor_t *convDesc); + +/* Destroy an instance of convolution descriptor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyConvolutionDescriptor(cudnnConvolutionDescriptor_t convDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t mathType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t *mathType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int groupCount); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int *groupCount); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t reorderType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t *reorderType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetConvolution2dDescriptor(cudnnConvolutionDescriptor_t convDesc, + int pad_h, /* zero-padding height */ + int pad_w, /* zero-padding width */ + int u, /* vertical filter stride */ + int v, /* horizontal filter stride */ + int dilation_h, /* filter dilation in the vertical dimension */ + int dilation_w, /* filter dilation in the horizontal dimension */ + cudnnConvolutionMode_t mode, + cudnnDataType_t computeType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolution2dDescriptor(const cudnnConvolutionDescriptor_t convDesc, + int *pad_h, /* zero-padding height */ + int *pad_w, /* zero-padding width */ + int *u, /* vertical filter stride */ + int *v, /* horizontal filter stride */ + int *dilation_h, /* filter dilation in the vertical dimension */ + int *dilation_w, /* filter dilation in the horizontal dimension */ + cudnnConvolutionMode_t *mode, + cudnnDataType_t *computeType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionNdDescriptor(cudnnConvolutionDescriptor_t convDesc, + int arrayLength, /* nbDims-2 size */ + const int padA[], + const int filterStrideA[], + const int dilationA[], + cudnnConvolutionMode_t mode, + cudnnDataType_t computeType); /* convolution data type */ + +/* Helper function to return the dimensions of the output tensor given a convolution descriptor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionNdDescriptor(const cudnnConvolutionDescriptor_t convDesc, + int arrayLengthRequested, + int *arrayLength, + int padA[], + int strideA[], + int dilationA[], + cudnnConvolutionMode_t *mode, + cudnnDataType_t *computeType); /* convolution data type */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolution2dForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + const cudnnFilterDescriptor_t filterDesc, + int *n, + int *c, + int *h, + int *w); + +/* Helper function to return the dimensions of the output tensor given a convolution descriptor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionNdForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + const cudnnFilterDescriptor_t filterDesc, + int nbDims, + int tensorOuputDimA[]); + +/* helper function to provide the convolution forward algo that fit best the requirement */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionForwardAlgorithmMaxCount(cudnnHandle_t handle, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionForwardAlgorithm_v7(cudnnHandle_t handle, + const cudnnTensorDescriptor_t srcDesc, + const cudnnFilterDescriptor_t filterDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t destDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionFwdAlgoPerf_t *perfResults); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionForwardAlgorithm(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnFilterDescriptor_t wDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t yDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionFwdAlgoPerf_t *perfResults); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionForwardAlgorithmEx(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t yDesc, + void *y, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionFwdAlgoPerf_t *perfResults, + void *workSpace, + size_t workSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnIm2Col(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const cudnnConvolutionDescriptor_t convDesc, + void *colBuffer); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnReorderFilterAndBias(cudnnHandle_t handle, + const cudnnFilterDescriptor_t filterDesc, + cudnnReorderType_t reorderType, + const void *filterData, + void *reorderedFilterData, + int reorderBias, + const void *biasData, + void *reorderedBiasData); + +/* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnFilterDescriptor_t wDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t yDesc, + cudnnConvolutionFwdAlgo_t algo, + size_t *sizeInBytes); + +/* Convolution functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform the forward pass for batch convolution */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnConvolutionForward(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionFwdAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* Fused conv/bias/activation operation : y = Act( alpha1 * conv(x) + alpha2 * z + bias ) */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBiasActivationForward(cudnnHandle_t handle, + const void *alpha1, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionFwdAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *alpha2, + const cudnnTensorDescriptor_t zDesc, + const void *z, + const cudnnTensorDescriptor_t biasDesc, + const void *bias, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* helper function to provide the convolution backward data algo that fit best the requirement */ + +typedef struct cudnnConvolutionBwdDataAlgoPerfStruct { + cudnnConvolutionBwdDataAlgo_t algo; + cudnnStatus_t status; + float time; + size_t memory; + cudnnDeterminism_t determinism; + cudnnMathType_t mathType; + int reserved[3]; +} cudnnConvolutionBwdDataAlgoPerf_t CUDNN_DEPRECATED; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardDataAlgorithm(cudnnHandle_t handle, + const cudnnFilterDescriptor_t wDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t dxDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdDataAlgoPerf_t *perfResults); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardDataAlgorithmEx(cudnnHandle_t handle, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t dxDesc, + void *dx, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdDataAlgoPerf_t *perfResults, + void *workSpace, + size_t workSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardDataAlgorithm_v7(cudnnHandle_t handle, + const cudnnFilterDescriptor_t filterDesc, + const cudnnTensorDescriptor_t diffDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t gradDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdDataAlgoPerf_t *perfResults); + +/* + * convolution algorithm (which requires potentially some workspace) + */ + +/* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardDataWorkspaceSize(cudnnHandle_t handle, + const cudnnFilterDescriptor_t wDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t dxDesc, + cudnnConvolutionBwdDataAlgo_t algo, + size_t *sizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBackwardData(cudnnHandle_t handle, + const void *alpha, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionBwdDataAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* Helper function to calculate folding descriptors for dgrad */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetFoldedConvBackwardDataDescriptors(const cudnnHandle_t handle, + const cudnnFilterDescriptor_t filterDesc, + const cudnnTensorDescriptor_t diffDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t gradDesc, + const cudnnTensorFormat_t transformFormat, + cudnnFilterDescriptor_t foldedFilterDesc, + cudnnTensorDescriptor_t paddedDiffDesc, + cudnnConvolutionDescriptor_t foldedConvDesc, + cudnnTensorDescriptor_t foldedGradDesc, + cudnnTensorTransformDescriptor_t filterFoldTransDesc, + cudnnTensorTransformDescriptor_t diffPadTransDesc, + cudnnTensorTransformDescriptor_t gradFoldTransDesc, + cudnnTensorTransformDescriptor_t gradUnfoldTransDesc); + +/* cudnnFusedOps... */ +struct cudnnFusedOpsConstParamStruct; +typedef struct cudnnFusedOpsConstParamStruct *cudnnFusedOpsConstParamPack_t CUDNN_DEPRECATED; + +struct cudnnFusedOpsVariantParamStruct; +typedef struct cudnnFusedOpsVariantParamStruct *cudnnFusedOpsVariantParamPack_t CUDNN_DEPRECATED; + +struct cudnnFusedOpsPlanStruct; +typedef struct cudnnFusedOpsPlanStruct *cudnnFusedOpsPlan_t CUDNN_DEPRECATED; + +typedef enum { + /* each op in [ ] can be disabled by passing NULL ptr */ + /* [per channel scale], [per channel bias], [activation], convolution, [generate BN stats] */ + CUDNN_FUSED_SCALE_BIAS_ACTIVATION_CONV_BNSTATS = 0, + /* [per channel scale], [per channel bias], [activation], convolutionBackwardWeights */ + CUDNN_FUSED_SCALE_BIAS_ACTIVATION_WGRAD = 1, + /* utility for BN training in BN-conv fusion */ + /* computes the equivalent scale and bias from ySum ySqSum and learned scale, bias */ + /* optionally update running stats and generate saved stats */ + CUDNN_FUSED_BN_FINALIZE_STATISTICS_TRAINING = 2, + /* utility for BN inference in BN-conv fusion */ + /* computes the equivalent scale and bias from learned running stats and learned scale, bias */ + CUDNN_FUSED_BN_FINALIZE_STATISTICS_INFERENCE = 3, + /* reserved for future use: convolution, [per channel scale], [per channel bias], [residual add], [activation] */ + CUDNN_FUSED_CONV_SCALE_BIAS_ADD_ACTIVATION = 4, + /* reserved for future use: [per channel scale], [per channel bias], [residual add], activation, bitmask */ + CUDNN_FUSED_SCALE_BIAS_ADD_ACTIVATION_GEN_BITMASK = 5, + /* reserved for future use */ + CUDNN_FUSED_DACTIVATION_FORK_DBATCHNORM = 6, +} cudnnFusedOps_t CUDNN_DEPRECATED; + +typedef enum { + /* set XDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get XDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_XDESC = 0, + /* set/get XDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_XDATA_PLACEHOLDER = 1, + /* set/get BN_MODE: pass cudnnBatchNormMode_t* */ + CUDNN_PARAM_BN_MODE = 2, + /* set CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_BN_EQSCALEBIAS_DESC = 3, + /* set/get BN_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_EQSCALE_PLACEHOLDER = 4, + /* set/get BN_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_EQBIAS_PLACEHOLDER = 5, + /* set ACTIVATION_DESC: pass previously initialized cudnnActivationDescriptor_t */ + /* get ACTIVATION_DESC: pass previously created cudnnActivationDescriptor_t */ + CUDNN_PARAM_ACTIVATION_DESC = 6, + /* set CONV_DESC: pass previously initialized cudnnConvolutionDescriptor_t */ + /* get CONV_DESC: pass previously created cudnnConvolutionDescriptor_t */ + CUDNN_PARAM_CONV_DESC = 7, + /* set WDESC: pass previously initialized cudnnFilterDescriptor_t */ + /* get WDESC: pass previously created cudnnFilterDescriptor_t */ + CUDNN_PARAM_WDESC = 8, + /* set/get WDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_WDATA_PLACEHOLDER = 9, + /* set DWDESC: pass previously initialized cudnnFilterDescriptor_t */ + /* get DWDESC: pass previously created cudnnFilterDescriptor_t */ + CUDNN_PARAM_DWDESC = 10, + /* set/get DWDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DWDATA_PLACEHOLDER = 11, + /* set YDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get YDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_YDESC = 12, + /* set/get YDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_YDATA_PLACEHOLDER = 13, + /* set DYDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get DYDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_DYDESC = 14, + /* set/get DYDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DYDATA_PLACEHOLDER = 15, + /* set YSTATS_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get YSTATS_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_YSTATS_DESC = 16, + /* set/get YSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_YSUM_PLACEHOLDER = 17, + /* set/get YSQSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_YSQSUM_PLACEHOLDER = 18, + /* set CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC = 19, + /* set/get CUDNN_PARAM_BN_SCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_SCALE_PLACEHOLDER = 20, + /* set/get CUDNN_PARAM_BN_BIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_BIAS_PLACEHOLDER = 21, + /* set/get CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER = 22, + /* set/get CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER = 23, + /* set/get CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER = 24, + /* set/get CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER = 25, + + /* set ZDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get ZDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_ZDESC = 26, + /* set/get ZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_ZDATA_PLACEHOLDER = 27, + /* set BN_Z_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get BN_Z_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_BN_Z_EQSCALEBIAS_DESC = 28, + /* set/get BN_Z_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_Z_EQSCALE_PLACEHOLDER = 29, + /* set/get BN_Z_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_Z_EQBIAS_PLACEHOLDER = 30, + + /* set ACTIVATION_BITMASK_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get ACTIVATION_BITMASK_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_ACTIVATION_BITMASK_DESC = 31, + /* set/get ACTIVATION_BITMASK_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_ACTIVATION_BITMASK_PLACEHOLDER = 32, + + /* set DXDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get DXDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_DXDESC = 33, + /* set/get DXDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DXDATA_PLACEHOLDER = 34, + /* set DZDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get DZDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_DZDESC = 35, + /* set/get DZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DZDATA_PLACEHOLDER = 36, + /* set/get CUDNN_PARAM_BN_DSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_DSCALE_PLACEHOLDER = 37, + /* set/get CUDNN_PARAM_BN_DBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_DBIAS_PLACEHOLDER = 38, +} cudnnFusedOpsConstParamLabel_t CUDNN_DEPRECATED; + +typedef enum { + CUDNN_PTR_NULL = 0, + CUDNN_PTR_ELEM_ALIGNED = 1, + CUDNN_PTR_16B_ALIGNED = 2, +} cudnnFusedOpsPointerPlaceHolder_t CUDNN_DEPRECATED; + +typedef enum { + /* set: pass void* pointing to dev memory */ + /* get: pass void** pointing to host memory */ + CUDNN_PTR_XDATA = 0, + CUDNN_PTR_BN_EQSCALE = 1, + CUDNN_PTR_BN_EQBIAS = 2, + CUDNN_PTR_WDATA = 3, + CUDNN_PTR_DWDATA = 4, + CUDNN_PTR_YDATA = 5, + CUDNN_PTR_DYDATA = 6, + CUDNN_PTR_YSUM = 7, + CUDNN_PTR_YSQSUM = 8, + CUDNN_PTR_WORKSPACE = 9, + CUDNN_PTR_BN_SCALE = 10, + CUDNN_PTR_BN_BIAS = 11, + CUDNN_PTR_BN_SAVED_MEAN = 12, + CUDNN_PTR_BN_SAVED_INVSTD = 13, + CUDNN_PTR_BN_RUNNING_MEAN = 14, + CUDNN_PTR_BN_RUNNING_VAR = 15, + CUDNN_PTR_ZDATA = 16, + CUDNN_PTR_BN_Z_EQSCALE = 17, + CUDNN_PTR_BN_Z_EQBIAS = 18, + CUDNN_PTR_ACTIVATION_BITMASK = 19, + CUDNN_PTR_DXDATA = 20, + CUDNN_PTR_DZDATA = 21, + CUDNN_PTR_BN_DSCALE = 22, + CUDNN_PTR_BN_DBIAS = 23, + + /* set/get: pass size_t* pointing to host memory */ + CUDNN_SCALAR_SIZE_T_WORKSPACE_SIZE_IN_BYTES = 100, + /* set/get: pass int64_t* pointing to host memory */ + CUDNN_SCALAR_INT64_T_BN_ACCUMULATION_COUNT = 101, + /* set/get: pass double* pointing to host memory */ + CUDNN_SCALAR_DOUBLE_BN_EXP_AVG_FACTOR = 102, + /* set/get: pass double* pointing to host memory */ + CUDNN_SCALAR_DOUBLE_BN_EPSILON = 103, +} cudnnFusedOpsVariantParamLabel_t CUDNN_DEPRECATED; + +cudnnStatus_t CUDNNWINAPI +cudnnCnnVersionCheck(void); + +/* helper function to provide the convolution backward filter algo that fit best the requirement */ + +typedef struct cudnnConvolutionBwdFilterAlgoPerfStruct { + cudnnConvolutionBwdFilterAlgo_t algo; + cudnnStatus_t status; + float time; + size_t memory; + cudnnDeterminism_t determinism; + cudnnMathType_t mathType; + int reserved[3]; +} cudnnConvolutionBwdFilterAlgoPerf_t CUDNN_DEPRECATED; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardFilterAlgorithmMaxCount(cudnnHandle_t handle, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t dwDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdFilterAlgoPerf_t *perfResults); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardFilterAlgorithmEx(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t dyDesc, + const void *y, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t dwDesc, + void *dw, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdFilterAlgoPerf_t *perfResults, + void *workSpace, + size_t workSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardFilterAlgorithm_v7(cudnnHandle_t handle, + const cudnnTensorDescriptor_t srcDesc, + const cudnnTensorDescriptor_t diffDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t gradDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdFilterAlgoPerf_t *perfResults); + +/* + * convolution algorithm (which requires potentially some workspace) + */ + +/* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t gradDesc, + cudnnConvolutionBwdFilterAlgo_t algo, + size_t *sizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBackwardFilter(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionBwdFilterAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *beta, + const cudnnFilterDescriptor_t dwDesc, + void *dw); + +/* Function to compute the bias gradient for batch convolution */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBackwardBias(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const void *beta, + const cudnnTensorDescriptor_t dbDesc, + void *db); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t *constPack, cudnnFusedOps_t ops); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t constPack); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetFusedOpsConstParamPackAttribute(cudnnFusedOpsConstParamPack_t constPack, + cudnnFusedOpsConstParamLabel_t paramLabel, + const void *param); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetFusedOpsConstParamPackAttribute(const cudnnFusedOpsConstParamPack_t constPack, + cudnnFusedOpsConstParamLabel_t paramLabel, + void *param, + int *isNULL); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t *varPack, cudnnFusedOps_t ops); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t varPack); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetFusedOpsVariantParamPackAttribute(cudnnFusedOpsVariantParamPack_t varPack, + cudnnFusedOpsVariantParamLabel_t paramLabel, + void *ptr); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetFusedOpsVariantParamPackAttribute(const cudnnFusedOpsVariantParamPack_t varPack, + cudnnFusedOpsVariantParamLabel_t paramLabel, + void *ptr); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateFusedOpsPlan(cudnnFusedOpsPlan_t *plan, cudnnFusedOps_t ops); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyFusedOpsPlan(cudnnFusedOpsPlan_t plan); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnMakeFusedOpsPlan(cudnnHandle_t handle, + cudnnFusedOpsPlan_t plan, + const cudnnFusedOpsConstParamPack_t constPack, + size_t *workspaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFusedOpsExecute(cudnnHandle_t handle, const cudnnFusedOpsPlan_t plan, cudnnFusedOpsVariantParamPack_t varPack); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_CNN_H_ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_cnn_v9.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_cnn_v9.h new file mode 100644 index 0000000000000000000000000000000000000000..3cfb9719ece52151e54b4c3d455ebe38a8f088fe --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_cnn_v9.h @@ -0,0 +1,693 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_cnn : cuDNN's basic definitions and CNN functions. + */ + +#if !defined(CUDNN_CNN_H_) +#define CUDNN_CNN_H_ + +#pragma once +#include + +#include "cudnn_version.h" +#include "cudnn_ops.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_CNN_MAJOR 9 +#define CUDNN_CNN_MINOR 5 +#define CUDNN_CNN_PATCH 1 + +#if (CUDNN_CNN_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_MINOR != CUDNN_MINOR) || (CUDNN_CNN_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN CNN INFER!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +typedef struct cudnnConvolutionStruct *cudnnConvolutionDescriptor_t CUDNN_DEPRECATED; + +typedef struct cudnnConvolutionFwdAlgoPerfStruct { + cudnnConvolutionFwdAlgo_t algo; + cudnnStatus_t status; + float time; + size_t memory; + cudnnDeterminism_t determinism; + cudnnMathType_t mathType; + int reserved[3]; +} cudnnConvolutionFwdAlgoPerf_t CUDNN_DEPRECATED; + +/* Create an instance of convolution descriptor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateConvolutionDescriptor(cudnnConvolutionDescriptor_t *convDesc); + +/* Destroy an instance of convolution descriptor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyConvolutionDescriptor(cudnnConvolutionDescriptor_t convDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t mathType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t *mathType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int groupCount); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int *groupCount); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t reorderType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t *reorderType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetConvolution2dDescriptor(cudnnConvolutionDescriptor_t convDesc, + int pad_h, /* zero-padding height */ + int pad_w, /* zero-padding width */ + int u, /* vertical filter stride */ + int v, /* horizontal filter stride */ + int dilation_h, /* filter dilation in the vertical dimension */ + int dilation_w, /* filter dilation in the horizontal dimension */ + cudnnConvolutionMode_t mode, + cudnnDataType_t computeType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolution2dDescriptor(const cudnnConvolutionDescriptor_t convDesc, + int *pad_h, /* zero-padding height */ + int *pad_w, /* zero-padding width */ + int *u, /* vertical filter stride */ + int *v, /* horizontal filter stride */ + int *dilation_h, /* filter dilation in the vertical dimension */ + int *dilation_w, /* filter dilation in the horizontal dimension */ + cudnnConvolutionMode_t *mode, + cudnnDataType_t *computeType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionNdDescriptor(cudnnConvolutionDescriptor_t convDesc, + int arrayLength, /* nbDims-2 size */ + const int padA[], + const int filterStrideA[], + const int dilationA[], + cudnnConvolutionMode_t mode, + cudnnDataType_t computeType); /* convolution data type */ + +/* Helper function to return the dimensions of the output tensor given a convolution descriptor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionNdDescriptor(const cudnnConvolutionDescriptor_t convDesc, + int arrayLengthRequested, + int *arrayLength, + int padA[], + int strideA[], + int dilationA[], + cudnnConvolutionMode_t *mode, + cudnnDataType_t *computeType); /* convolution data type */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolution2dForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + const cudnnFilterDescriptor_t filterDesc, + int *n, + int *c, + int *h, + int *w); + +/* Helper function to return the dimensions of the output tensor given a convolution descriptor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionNdForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + const cudnnFilterDescriptor_t filterDesc, + int nbDims, + int tensorOuputDimA[]); + +/* helper function to provide the convolution forward algo that fit best the requirement */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionForwardAlgorithmMaxCount(cudnnHandle_t handle, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionForwardAlgorithm_v7(cudnnHandle_t handle, + const cudnnTensorDescriptor_t srcDesc, + const cudnnFilterDescriptor_t filterDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t destDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionFwdAlgoPerf_t *perfResults); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionForwardAlgorithm(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnFilterDescriptor_t wDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t yDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionFwdAlgoPerf_t *perfResults); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionForwardAlgorithmEx(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t yDesc, + void *y, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionFwdAlgoPerf_t *perfResults, + void *workSpace, + size_t workSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnIm2Col(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const cudnnConvolutionDescriptor_t convDesc, + void *colBuffer); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnReorderFilterAndBias(cudnnHandle_t handle, + const cudnnFilterDescriptor_t filterDesc, + cudnnReorderType_t reorderType, + const void *filterData, + void *reorderedFilterData, + int reorderBias, + const void *biasData, + void *reorderedBiasData); + +/* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnFilterDescriptor_t wDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t yDesc, + cudnnConvolutionFwdAlgo_t algo, + size_t *sizeInBytes); + +/* Convolution functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform the forward pass for batch convolution */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnConvolutionForward(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionFwdAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* Fused conv/bias/activation operation : y = Act( alpha1 * conv(x) + alpha2 * z + bias ) */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBiasActivationForward(cudnnHandle_t handle, + const void *alpha1, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionFwdAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *alpha2, + const cudnnTensorDescriptor_t zDesc, + const void *z, + const cudnnTensorDescriptor_t biasDesc, + const void *bias, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* helper function to provide the convolution backward data algo that fit best the requirement */ + +typedef struct cudnnConvolutionBwdDataAlgoPerfStruct { + cudnnConvolutionBwdDataAlgo_t algo; + cudnnStatus_t status; + float time; + size_t memory; + cudnnDeterminism_t determinism; + cudnnMathType_t mathType; + int reserved[3]; +} cudnnConvolutionBwdDataAlgoPerf_t CUDNN_DEPRECATED; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardDataAlgorithm(cudnnHandle_t handle, + const cudnnFilterDescriptor_t wDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t dxDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdDataAlgoPerf_t *perfResults); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardDataAlgorithmEx(cudnnHandle_t handle, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t dxDesc, + void *dx, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdDataAlgoPerf_t *perfResults, + void *workSpace, + size_t workSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardDataAlgorithm_v7(cudnnHandle_t handle, + const cudnnFilterDescriptor_t filterDesc, + const cudnnTensorDescriptor_t diffDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t gradDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdDataAlgoPerf_t *perfResults); + +/* + * convolution algorithm (which requires potentially some workspace) + */ + +/* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardDataWorkspaceSize(cudnnHandle_t handle, + const cudnnFilterDescriptor_t wDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t dxDesc, + cudnnConvolutionBwdDataAlgo_t algo, + size_t *sizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBackwardData(cudnnHandle_t handle, + const void *alpha, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionBwdDataAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* Helper function to calculate folding descriptors for dgrad */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetFoldedConvBackwardDataDescriptors(const cudnnHandle_t handle, + const cudnnFilterDescriptor_t filterDesc, + const cudnnTensorDescriptor_t diffDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t gradDesc, + const cudnnTensorFormat_t transformFormat, + cudnnFilterDescriptor_t foldedFilterDesc, + cudnnTensorDescriptor_t paddedDiffDesc, + cudnnConvolutionDescriptor_t foldedConvDesc, + cudnnTensorDescriptor_t foldedGradDesc, + cudnnTensorTransformDescriptor_t filterFoldTransDesc, + cudnnTensorTransformDescriptor_t diffPadTransDesc, + cudnnTensorTransformDescriptor_t gradFoldTransDesc, + cudnnTensorTransformDescriptor_t gradUnfoldTransDesc); + +/* cudnnFusedOps... */ +struct cudnnFusedOpsConstParamStruct; +typedef struct cudnnFusedOpsConstParamStruct *cudnnFusedOpsConstParamPack_t CUDNN_DEPRECATED; + +struct cudnnFusedOpsVariantParamStruct; +typedef struct cudnnFusedOpsVariantParamStruct *cudnnFusedOpsVariantParamPack_t CUDNN_DEPRECATED; + +struct cudnnFusedOpsPlanStruct; +typedef struct cudnnFusedOpsPlanStruct *cudnnFusedOpsPlan_t CUDNN_DEPRECATED; + +typedef enum { + /* each op in [ ] can be disabled by passing NULL ptr */ + /* [per channel scale], [per channel bias], [activation], convolution, [generate BN stats] */ + CUDNN_FUSED_SCALE_BIAS_ACTIVATION_CONV_BNSTATS = 0, + /* [per channel scale], [per channel bias], [activation], convolutionBackwardWeights */ + CUDNN_FUSED_SCALE_BIAS_ACTIVATION_WGRAD = 1, + /* utility for BN training in BN-conv fusion */ + /* computes the equivalent scale and bias from ySum ySqSum and learned scale, bias */ + /* optionally update running stats and generate saved stats */ + CUDNN_FUSED_BN_FINALIZE_STATISTICS_TRAINING = 2, + /* utility for BN inference in BN-conv fusion */ + /* computes the equivalent scale and bias from learned running stats and learned scale, bias */ + CUDNN_FUSED_BN_FINALIZE_STATISTICS_INFERENCE = 3, + /* reserved for future use: convolution, [per channel scale], [per channel bias], [residual add], [activation] */ + CUDNN_FUSED_CONV_SCALE_BIAS_ADD_ACTIVATION = 4, + /* reserved for future use: [per channel scale], [per channel bias], [residual add], activation, bitmask */ + CUDNN_FUSED_SCALE_BIAS_ADD_ACTIVATION_GEN_BITMASK = 5, + /* reserved for future use */ + CUDNN_FUSED_DACTIVATION_FORK_DBATCHNORM = 6, +} cudnnFusedOps_t CUDNN_DEPRECATED; + +typedef enum { + /* set XDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get XDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_XDESC = 0, + /* set/get XDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_XDATA_PLACEHOLDER = 1, + /* set/get BN_MODE: pass cudnnBatchNormMode_t* */ + CUDNN_PARAM_BN_MODE = 2, + /* set CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_BN_EQSCALEBIAS_DESC = 3, + /* set/get BN_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_EQSCALE_PLACEHOLDER = 4, + /* set/get BN_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_EQBIAS_PLACEHOLDER = 5, + /* set ACTIVATION_DESC: pass previously initialized cudnnActivationDescriptor_t */ + /* get ACTIVATION_DESC: pass previously created cudnnActivationDescriptor_t */ + CUDNN_PARAM_ACTIVATION_DESC = 6, + /* set CONV_DESC: pass previously initialized cudnnConvolutionDescriptor_t */ + /* get CONV_DESC: pass previously created cudnnConvolutionDescriptor_t */ + CUDNN_PARAM_CONV_DESC = 7, + /* set WDESC: pass previously initialized cudnnFilterDescriptor_t */ + /* get WDESC: pass previously created cudnnFilterDescriptor_t */ + CUDNN_PARAM_WDESC = 8, + /* set/get WDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_WDATA_PLACEHOLDER = 9, + /* set DWDESC: pass previously initialized cudnnFilterDescriptor_t */ + /* get DWDESC: pass previously created cudnnFilterDescriptor_t */ + CUDNN_PARAM_DWDESC = 10, + /* set/get DWDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DWDATA_PLACEHOLDER = 11, + /* set YDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get YDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_YDESC = 12, + /* set/get YDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_YDATA_PLACEHOLDER = 13, + /* set DYDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get DYDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_DYDESC = 14, + /* set/get DYDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DYDATA_PLACEHOLDER = 15, + /* set YSTATS_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get YSTATS_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_YSTATS_DESC = 16, + /* set/get YSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_YSUM_PLACEHOLDER = 17, + /* set/get YSQSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_YSQSUM_PLACEHOLDER = 18, + /* set CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC = 19, + /* set/get CUDNN_PARAM_BN_SCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_SCALE_PLACEHOLDER = 20, + /* set/get CUDNN_PARAM_BN_BIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_BIAS_PLACEHOLDER = 21, + /* set/get CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER = 22, + /* set/get CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER = 23, + /* set/get CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER = 24, + /* set/get CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER = 25, + + /* set ZDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get ZDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_ZDESC = 26, + /* set/get ZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_ZDATA_PLACEHOLDER = 27, + /* set BN_Z_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get BN_Z_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_BN_Z_EQSCALEBIAS_DESC = 28, + /* set/get BN_Z_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_Z_EQSCALE_PLACEHOLDER = 29, + /* set/get BN_Z_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_Z_EQBIAS_PLACEHOLDER = 30, + + /* set ACTIVATION_BITMASK_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get ACTIVATION_BITMASK_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_ACTIVATION_BITMASK_DESC = 31, + /* set/get ACTIVATION_BITMASK_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_ACTIVATION_BITMASK_PLACEHOLDER = 32, + + /* set DXDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get DXDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_DXDESC = 33, + /* set/get DXDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DXDATA_PLACEHOLDER = 34, + /* set DZDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get DZDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_DZDESC = 35, + /* set/get DZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DZDATA_PLACEHOLDER = 36, + /* set/get CUDNN_PARAM_BN_DSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_DSCALE_PLACEHOLDER = 37, + /* set/get CUDNN_PARAM_BN_DBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_DBIAS_PLACEHOLDER = 38, +} cudnnFusedOpsConstParamLabel_t CUDNN_DEPRECATED; + +typedef enum { + CUDNN_PTR_NULL = 0, + CUDNN_PTR_ELEM_ALIGNED = 1, + CUDNN_PTR_16B_ALIGNED = 2, +} cudnnFusedOpsPointerPlaceHolder_t CUDNN_DEPRECATED; + +typedef enum { + /* set: pass void* pointing to dev memory */ + /* get: pass void** pointing to host memory */ + CUDNN_PTR_XDATA = 0, + CUDNN_PTR_BN_EQSCALE = 1, + CUDNN_PTR_BN_EQBIAS = 2, + CUDNN_PTR_WDATA = 3, + CUDNN_PTR_DWDATA = 4, + CUDNN_PTR_YDATA = 5, + CUDNN_PTR_DYDATA = 6, + CUDNN_PTR_YSUM = 7, + CUDNN_PTR_YSQSUM = 8, + CUDNN_PTR_WORKSPACE = 9, + CUDNN_PTR_BN_SCALE = 10, + CUDNN_PTR_BN_BIAS = 11, + CUDNN_PTR_BN_SAVED_MEAN = 12, + CUDNN_PTR_BN_SAVED_INVSTD = 13, + CUDNN_PTR_BN_RUNNING_MEAN = 14, + CUDNN_PTR_BN_RUNNING_VAR = 15, + CUDNN_PTR_ZDATA = 16, + CUDNN_PTR_BN_Z_EQSCALE = 17, + CUDNN_PTR_BN_Z_EQBIAS = 18, + CUDNN_PTR_ACTIVATION_BITMASK = 19, + CUDNN_PTR_DXDATA = 20, + CUDNN_PTR_DZDATA = 21, + CUDNN_PTR_BN_DSCALE = 22, + CUDNN_PTR_BN_DBIAS = 23, + + /* set/get: pass size_t* pointing to host memory */ + CUDNN_SCALAR_SIZE_T_WORKSPACE_SIZE_IN_BYTES = 100, + /* set/get: pass int64_t* pointing to host memory */ + CUDNN_SCALAR_INT64_T_BN_ACCUMULATION_COUNT = 101, + /* set/get: pass double* pointing to host memory */ + CUDNN_SCALAR_DOUBLE_BN_EXP_AVG_FACTOR = 102, + /* set/get: pass double* pointing to host memory */ + CUDNN_SCALAR_DOUBLE_BN_EPSILON = 103, +} cudnnFusedOpsVariantParamLabel_t CUDNN_DEPRECATED; + +cudnnStatus_t CUDNNWINAPI +cudnnCnnVersionCheck(void); + +/* helper function to provide the convolution backward filter algo that fit best the requirement */ + +typedef struct cudnnConvolutionBwdFilterAlgoPerfStruct { + cudnnConvolutionBwdFilterAlgo_t algo; + cudnnStatus_t status; + float time; + size_t memory; + cudnnDeterminism_t determinism; + cudnnMathType_t mathType; + int reserved[3]; +} cudnnConvolutionBwdFilterAlgoPerf_t CUDNN_DEPRECATED; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardFilterAlgorithmMaxCount(cudnnHandle_t handle, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t dwDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdFilterAlgoPerf_t *perfResults); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardFilterAlgorithmEx(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t dyDesc, + const void *y, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t dwDesc, + void *dw, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdFilterAlgoPerf_t *perfResults, + void *workSpace, + size_t workSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardFilterAlgorithm_v7(cudnnHandle_t handle, + const cudnnTensorDescriptor_t srcDesc, + const cudnnTensorDescriptor_t diffDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t gradDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdFilterAlgoPerf_t *perfResults); + +/* + * convolution algorithm (which requires potentially some workspace) + */ + +/* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t gradDesc, + cudnnConvolutionBwdFilterAlgo_t algo, + size_t *sizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBackwardFilter(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionBwdFilterAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *beta, + const cudnnFilterDescriptor_t dwDesc, + void *dw); + +/* Function to compute the bias gradient for batch convolution */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBackwardBias(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const void *beta, + const cudnnTensorDescriptor_t dbDesc, + void *db); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t *constPack, cudnnFusedOps_t ops); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t constPack); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetFusedOpsConstParamPackAttribute(cudnnFusedOpsConstParamPack_t constPack, + cudnnFusedOpsConstParamLabel_t paramLabel, + const void *param); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetFusedOpsConstParamPackAttribute(const cudnnFusedOpsConstParamPack_t constPack, + cudnnFusedOpsConstParamLabel_t paramLabel, + void *param, + int *isNULL); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t *varPack, cudnnFusedOps_t ops); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t varPack); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetFusedOpsVariantParamPackAttribute(cudnnFusedOpsVariantParamPack_t varPack, + cudnnFusedOpsVariantParamLabel_t paramLabel, + void *ptr); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetFusedOpsVariantParamPackAttribute(const cudnnFusedOpsVariantParamPack_t varPack, + cudnnFusedOpsVariantParamLabel_t paramLabel, + void *ptr); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateFusedOpsPlan(cudnnFusedOpsPlan_t *plan, cudnnFusedOps_t ops); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyFusedOpsPlan(cudnnFusedOpsPlan_t plan); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnMakeFusedOpsPlan(cudnnHandle_t handle, + cudnnFusedOpsPlan_t plan, + const cudnnFusedOpsConstParamPack_t constPack, + size_t *workspaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFusedOpsExecute(cudnnHandle_t handle, const cudnnFusedOpsPlan_t plan, cudnnFusedOpsVariantParamPack_t varPack); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_CNN_H_ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_graph.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_graph.h new file mode 100644 index 0000000000000000000000000000000000000000..2baed61ce65dac3d5a3c2fe667a5dbafe9eec7e0 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_graph.h @@ -0,0 +1,940 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_graph : cuDNN's basic definitions operations. + */ + +#if !defined(CUDNN_GRAPH_H_) +#define CUDNN_GRAPH_H_ + +#include +#include + +#include + +#include "cudnn_version.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_GRAPH_MAJOR 9 +#define CUDNN_GRAPH_MINOR 5 +#define CUDNN_GRAPH_PATCH 1 + +#if (CUDNN_GRAPH_MAJOR != CUDNN_MAJOR) || (CUDNN_GRAPH_MINOR != CUDNN_MINOR) || (CUDNN_GRAPH_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN GRAPH!!! +#endif + +#ifndef CUDNNWINAPI +#ifdef _WIN32 +#define CUDNNWINAPI __stdcall +#else +#define CUDNNWINAPI +#endif +#endif + +/* Warnings for deprecated API-s are enabled using the CUDNN_WARN_DEPRECATED macro */ +#if defined(CUDNN_WARN_DEPRECATED) && (defined(__GNUC__) || defined(__clang__)) +/* GCC, Intel C/C++, Cray C/C++, CLANG, IBM XL C/C++ little endian */ +#define CUDNN_DEPRECATED __attribute__((deprecated)) +#define CUDNN_DEPRECATED_ENUM __attribute__((deprecated)) +#elif defined(CUDNN_WARN_DEPRECATED) && defined(_MSC_VER) +/* Microsoft Visual C++ */ +#define CUDNN_DEPRECATED __declspec(deprecated) +#define CUDNN_DEPRECATED_ENUM __declspec(deprecated) +#elif defined(CUDNN_WARN_DEPRECATED) && (__cplusplus >= 201402L) +/* C++14 compilers */ +#define CUDNN_DEPRECATED [[deprecated]] +#define CUDNN_DEPRECATED_ENUM [[deprecated]] +#else +/* No support for the deprecated attribute */ +#define CUDNN_DEPRECATED +#define CUDNN_DEPRECATED_ENUM +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +struct cudnnContext; +typedef struct cudnnContext *cudnnHandle_t; + +size_t CUDNNWINAPI +cudnnGetVersion(void); + +size_t CUDNNWINAPI +cudnnGetMaxDeviceVersion(void); + +/* Returns CUDA Runtime version statically linked against cudnn */ +size_t CUDNNWINAPI +cudnnGetCudartVersion(void); + +/* + * CUDNN return codes + */ +typedef enum { + CUDNN_STATUS_SUCCESS = 0, + + /* Uncategorized errors */ + CUDNN_STATUS_NOT_INITIALIZED = 1001, + CUDNN_STATUS_SUBLIBRARY_VERSION_MISMATCH = 1002, + CUDNN_STATUS_SERIALIZATION_VERSION_MISMATCH = 1003, + CUDNN_STATUS_DEPRECATED = 1004, + CUDNN_STATUS_LICENSE_ERROR = 1005, + CUDNN_STATUS_RUNTIME_IN_PROGRESS = 1006, + CUDNN_STATUS_RUNTIME_FP_OVERFLOW = 1007, + CUDNN_STATUS_SUBLIBRARY_LOADING_FAILED = 1008, + + CUDNN_STATUS_BAD_PARAM = 2000, + CUDNN_STATUS_BAD_PARAM_NULL_POINTER = 2002, + CUDNN_STATUS_BAD_PARAM_MISALIGNED_POINTER = 2003, + CUDNN_STATUS_BAD_PARAM_NOT_FINALIZED = 2004, + CUDNN_STATUS_BAD_PARAM_OUT_OF_BOUND = 2005, + CUDNN_STATUS_BAD_PARAM_SIZE_INSUFFICIENT = 2006, + CUDNN_STATUS_BAD_PARAM_STREAM_MISMATCH = 2007, + CUDNN_STATUS_BAD_PARAM_SHAPE_MISMATCH = 2008, + CUDNN_STATUS_BAD_PARAM_DUPLICATED_ENTRIES = 2009, + CUDNN_STATUS_BAD_PARAM_ATTRIBUTE_TYPE = 2010, + CUDNN_STATUS_BAD_PARAM_CUDA_GRAPH_MISMATCH = 2011, + + CUDNN_STATUS_NOT_SUPPORTED = 3000, + CUDNN_STATUS_NOT_SUPPORTED_GRAPH_PATTERN = 3001, + CUDNN_STATUS_NOT_SUPPORTED_SHAPE = 3002, + CUDNN_STATUS_NOT_SUPPORTED_DATA_TYPE = 3003, + CUDNN_STATUS_NOT_SUPPORTED_LAYOUT = 3004, + CUDNN_STATUS_NOT_SUPPORTED_INCOMPATIBLE_CUDA_DRIVER = 3005, + CUDNN_STATUS_NOT_SUPPORTED_INCOMPATIBLE_CUDART = 3006, + CUDNN_STATUS_NOT_SUPPORTED_ARCH_MISMATCH = 3007, + CUDNN_STATUS_NOT_SUPPORTED_RUNTIME_PREREQUISITE_MISSING = 3008, + CUDNN_STATUS_NOT_SUPPORTED_SUBLIBRARY_UNAVAILABLE = 3009, + CUDNN_STATUS_NOT_SUPPORTED_SHARED_MEMORY_INSUFFICIENT = 3010, + CUDNN_STATUS_NOT_SUPPORTED_PADDING = 3011, + CUDNN_STATUS_NOT_SUPPORTED_BAD_LAUNCH_PARAM = 3012, + CUDNN_STATUS_NOT_SUPPORTED_CUDA_GRAPH_NATIVE_API = 3013, + + CUDNN_STATUS_INTERNAL_ERROR = 4000, + CUDNN_STATUS_INTERNAL_ERROR_COMPILATION_FAILED = 4001, + CUDNN_STATUS_INTERNAL_ERROR_UNEXPECTED_VALUE = 4002, + CUDNN_STATUS_INTERNAL_ERROR_HOST_ALLOCATION_FAILED = 4003, + CUDNN_STATUS_INTERNAL_ERROR_DEVICE_ALLOCATION_FAILED = 4004, + CUDNN_STATUS_INTERNAL_ERROR_BAD_LAUNCH_PARAM = 4005, + CUDNN_STATUS_INTERNAL_ERROR_TEXTURE_CREATION_FAILED = 4006, + + CUDNN_STATUS_EXECUTION_FAILED = 5000, + CUDNN_STATUS_EXECUTION_FAILED_CUDA_DRIVER = 5001, + CUDNN_STATUS_EXECUTION_FAILED_CUBLAS = 5002, + CUDNN_STATUS_EXECUTION_FAILED_CUDART = 5003, + CUDNN_STATUS_EXECUTION_FAILED_CURAND = 5004, + + CUDNN_STATUS_ALLOC_FAILED CUDNN_DEPRECATED_ENUM = CUDNN_STATUS_INTERNAL_ERROR_HOST_ALLOCATION_FAILED, + CUDNN_STATUS_INVALID_VALUE CUDNN_DEPRECATED_ENUM = 2001 /* please transition to CUDNN_STATUS_BAD_PARAM instead */, + CUDNN_STATUS_ARCH_MISMATCH CUDNN_DEPRECATED_ENUM = CUDNN_STATUS_NOT_SUPPORTED_ARCH_MISMATCH, + CUDNN_STATUS_MAPPING_ERROR CUDNN_DEPRECATED_ENUM = CUDNN_STATUS_INTERNAL_ERROR_TEXTURE_CREATION_FAILED, + CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING CUDNN_DEPRECATED_ENUM = + CUDNN_STATUS_NOT_SUPPORTED_RUNTIME_PREREQUISITE_MISSING, + CUDNN_STATUS_VERSION_MISMATCH CUDNN_DEPRECATED_ENUM = CUDNN_STATUS_SUBLIBRARY_VERSION_MISMATCH, +} cudnnStatus_t; + +#define CUDNN_STATUS_FULL_ERROR_CODE(category, specific_err) ((cudnnStatus_t)(0 + (category) + (specific_err))) +#define CUDNN_STATUS_CATEGORY(full_error_code) ((full_error_code) / 1000 * 1000) +#define CUDNN_STATUS_SPECIFIC_ERROR(full_error_code) ((full_error_code) % 1000) + +/* human-readable error messages */ +const char *CUDNNWINAPI +cudnnGetErrorString(cudnnStatus_t status); + +void CUDNNWINAPI +cudnnGetLastErrorString(char *message, size_t max_size); + +/* Forward definition in this version only */ +typedef struct cudnnRuntimeTag_t cudnnRuntimeTag_t CUDNN_DEPRECATED; + +typedef enum { + CUDNN_ERRQUERY_RAWCODE = 0, + CUDNN_ERRQUERY_NONBLOCKING = 1, + CUDNN_ERRQUERY_BLOCKING = 2, +} cudnnErrQueryMode_t; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnQueryRuntimeError(cudnnHandle_t handle, cudnnStatus_t *rstatus, cudnnErrQueryMode_t mode, cudnnRuntimeTag_t *tag); + +cudnnStatus_t CUDNNWINAPI +cudnnGetProperty(libraryPropertyType type, int *value); + +cudnnStatus_t CUDNNWINAPI +cudnnCreate(cudnnHandle_t *handle); +cudnnStatus_t CUDNNWINAPI +cudnnDestroy(cudnnHandle_t handle); +cudnnStatus_t CUDNNWINAPI +cudnnSetStream(cudnnHandle_t handle, cudaStream_t streamId); +cudnnStatus_t CUDNNWINAPI +cudnnGetStream(cudnnHandle_t handle, cudaStream_t *streamId); +/* + * CUDNN data type + */ +typedef enum { + CUDNN_DATA_FLOAT = 0, + CUDNN_DATA_DOUBLE = 1, + CUDNN_DATA_HALF = 2, + CUDNN_DATA_INT8 = 3, + CUDNN_DATA_INT32 = 4, + CUDNN_DATA_INT8x4 CUDNN_DEPRECATED_ENUM = 5, + CUDNN_DATA_UINT8 = 6, + CUDNN_DATA_UINT8x4 CUDNN_DEPRECATED_ENUM = 7, + CUDNN_DATA_INT8x32 CUDNN_DEPRECATED_ENUM = 8, + CUDNN_DATA_BFLOAT16 = 9, + CUDNN_DATA_INT64 = 10, + CUDNN_DATA_BOOLEAN = 11, + CUDNN_DATA_FP8_E4M3 = 12, + CUDNN_DATA_FP8_E5M2 = 13, + CUDNN_DATA_FAST_FLOAT_FOR_FP8 = 14, +} cudnnDataType_t; + +/* + * CUDNN math type + */ +typedef enum { + CUDNN_DEFAULT_MATH = 0, + CUDNN_TENSOR_OP_MATH = 1, + CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION = 2, + CUDNN_FMA_MATH = 3, +} cudnnMathType_t; + +/* + * CUDNN propagate Nan + */ +typedef enum { + CUDNN_NOT_PROPAGATE_NAN CUDNN_DEPRECATED_ENUM = 0, + CUDNN_PROPAGATE_NAN CUDNN_DEPRECATED_ENUM = 1, +} cudnnNanPropagation_t; + +/* + * Behavior for OOB samples. OOB samples are samples where L+R > T is encountered during the gradient calculation. If + * gradMode is set to CUDNN_CTC_SKIP_OOB_GRADIENTS, then the CTC loss function does not write to the gradient buffer for + * that sample. Instead, the current values, even not finite, are retained. If gradMode is set to + * CUDNN_CTC_ZERO_OOB_GRADIENTS, then the gradient for that sample is set to zero. This guarantees a finite gradient. + */ +typedef enum { + CUDNN_CTC_ZERO_OOB_GRADIENTS = 0, + CUDNN_CTC_SKIP_OOB_GRADIENTS = 1, +} cudnnCTCGradMode_t; + +typedef enum { + CUDNN_TENSOR_NCHW = 0, /* row major (wStride = 1, hStride = w) */ + CUDNN_TENSOR_NHWC = 1, /* feature maps interleaved ( cStride = 1 )*/ + CUDNN_TENSOR_NCHW_VECT_C = 2, /* each image point is vector of element of C, vector length in data type */ +} cudnnTensorFormat_t; + +/* + * CUDNN ReduceTensor op type + */ +typedef enum { + CUDNN_REDUCE_TENSOR_ADD = 0, + CUDNN_REDUCE_TENSOR_MUL = 1, + CUDNN_REDUCE_TENSOR_MIN = 2, + CUDNN_REDUCE_TENSOR_MAX = 3, + CUDNN_REDUCE_TENSOR_AMAX = 4, + CUDNN_REDUCE_TENSOR_AVG = 5, + CUDNN_REDUCE_TENSOR_NORM1 = 6, + CUDNN_REDUCE_TENSOR_NORM2 = 7, + CUDNN_REDUCE_TENSOR_MUL_NO_ZEROS = 8, +} cudnnReduceTensorOp_t; + +/* + * activation mode + */ +typedef enum { + CUDNN_ACTIVATION_SIGMOID = 0, + CUDNN_ACTIVATION_RELU = 1, + CUDNN_ACTIVATION_TANH = 2, + CUDNN_ACTIVATION_CLIPPED_RELU = 3, + CUDNN_ACTIVATION_ELU = 4, + CUDNN_ACTIVATION_IDENTITY = 5, + CUDNN_ACTIVATION_SWISH = 6 +} cudnnActivationMode_t CUDNN_DEPRECATED; + +typedef enum { + CUDNN_SEV_FATAL = 0, + CUDNN_SEV_ERROR = 1, + CUDNN_SEV_WARNING = 2, + CUDNN_SEV_INFO = 3, +} cudnnSeverity_t; + +/* Message masks to be used with cudnnSetCallback() */ +#define CUDNN_SEV_ERROR_EN (1U << CUDNN_SEV_ERROR) +#define CUDNN_SEV_WARNING_EN (1U << CUDNN_SEV_WARNING) +#define CUDNN_SEV_INFO_EN (1U << CUDNN_SEV_INFO) + +/* struct containing useful informaiton for each API call */ +typedef struct cudnnDebugStruct { + unsigned cudnn_version; + cudnnStatus_t cudnnStatus; + unsigned time_sec; /* epoch time in seconds */ + unsigned time_usec; /* microseconds part of epoch time */ + unsigned time_delta; /* time since start in seconds */ + cudnnHandle_t handle; /* cudnn handle */ + cudaStream_t stream; /* cuda stream ID */ + unsigned long long pid; /* process ID */ + unsigned long long tid; /* thread ID */ + int cudaDeviceId; /* CUDA device ID */ + int reserved[15]; /* reserved for future use */ +} cudnnDebug_t; + +typedef void (*cudnnCallback_t)(cudnnSeverity_t sev, void *udata, const cudnnDebug_t *dbg, const char *msg); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCallback(unsigned mask, void *udata, cudnnCallback_t fptr); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCallback(unsigned *mask, void **udata, cudnnCallback_t *fptr); + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_SUBLIBRARY_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnGraphVersionCheck(void); + +/* Maximum supported number of tensor dimensions */ +#define CUDNN_DIM_MAX 8 + +/* + * convolution mode + */ +typedef enum { CUDNN_CONVOLUTION = 0, CUDNN_CROSS_CORRELATION = 1 } cudnnConvolutionMode_t; + +/* + * CUDNN Reorder + */ +typedef enum { + CUDNN_DEFAULT_REORDER = 0, + CUDNN_NO_REORDER = 1, +} cudnnReorderType_t CUDNN_DEPRECATED; + +typedef void *cudnnBackendDescriptor_t; + +typedef struct cudnnFractionStruct { + int64_t numerator; + int64_t denominator; +} cudnnFraction_t; + +typedef enum { + CUDNN_POINTWISE_ADD = 0, + CUDNN_POINTWISE_ADD_SQUARE = 5, + CUDNN_POINTWISE_DIV = 6, + CUDNN_POINTWISE_MAX = 3, + CUDNN_POINTWISE_MIN = 2, + CUDNN_POINTWISE_MOD = 7, + CUDNN_POINTWISE_MUL = 1, + CUDNN_POINTWISE_POW = 8, + CUDNN_POINTWISE_SUB = 9, + + CUDNN_POINTWISE_ABS = 10, + CUDNN_POINTWISE_CEIL = 11, + CUDNN_POINTWISE_COS = 12, + CUDNN_POINTWISE_EXP = 13, + CUDNN_POINTWISE_FLOOR = 14, + CUDNN_POINTWISE_LOG = 15, + CUDNN_POINTWISE_NEG = 16, + CUDNN_POINTWISE_RSQRT = 17, + CUDNN_POINTWISE_SIN = 18, + CUDNN_POINTWISE_SQRT = 4, + CUDNN_POINTWISE_TAN = 19, + CUDNN_POINTWISE_ERF = 20, + CUDNN_POINTWISE_IDENTITY = 21, + CUDNN_POINTWISE_RECIPROCAL = 22, + CUDNN_POINTWISE_ATAN2 = 23, + + CUDNN_POINTWISE_RELU_FWD = 100, + CUDNN_POINTWISE_TANH_FWD = 101, + CUDNN_POINTWISE_SIGMOID_FWD = 102, + CUDNN_POINTWISE_ELU_FWD = 103, + CUDNN_POINTWISE_GELU_FWD = 104, + CUDNN_POINTWISE_SOFTPLUS_FWD = 105, + CUDNN_POINTWISE_SWISH_FWD = 106, + CUDNN_POINTWISE_GELU_APPROX_TANH_FWD = 107, + + CUDNN_POINTWISE_RELU_BWD = 200, + CUDNN_POINTWISE_TANH_BWD = 201, + CUDNN_POINTWISE_SIGMOID_BWD = 202, + CUDNN_POINTWISE_ELU_BWD = 203, + CUDNN_POINTWISE_GELU_BWD = 204, + CUDNN_POINTWISE_SOFTPLUS_BWD = 205, + CUDNN_POINTWISE_SWISH_BWD = 206, + CUDNN_POINTWISE_GELU_APPROX_TANH_BWD = 207, + + CUDNN_POINTWISE_CMP_EQ = 300, + CUDNN_POINTWISE_CMP_NEQ = 301, + CUDNN_POINTWISE_CMP_GT = 302, + CUDNN_POINTWISE_CMP_GE = 303, + CUDNN_POINTWISE_CMP_LT = 304, + CUDNN_POINTWISE_CMP_LE = 305, + + CUDNN_POINTWISE_LOGICAL_AND = 400, + CUDNN_POINTWISE_LOGICAL_OR = 401, + CUDNN_POINTWISE_LOGICAL_NOT = 402, + + CUDNN_POINTWISE_GEN_INDEX = 501, + + CUDNN_POINTWISE_BINARY_SELECT = 601, +} cudnnPointwiseMode_t; + +typedef enum { + CUDNN_RESAMPLE_NEAREST = 0, + CUDNN_RESAMPLE_BILINEAR = 1, + CUDNN_RESAMPLE_AVGPOOL = 2, + CUDNN_RESAMPLE_AVGPOOL_INCLUDE_PADDING = 2, + CUDNN_RESAMPLE_AVGPOOL_EXCLUDE_PADDING = 4, + CUDNN_RESAMPLE_MAXPOOL = 3, +} cudnnResampleMode_t; + +typedef enum { + CUDNN_SIGNAL_SET = 0, + CUDNN_SIGNAL_WAIT = 1, +} cudnnSignalMode_t; + +typedef enum { + CUDNN_GENSTATS_SUM_SQSUM = 0, +} cudnnGenStatsMode_t; + +typedef enum { + CUDNN_BN_FINALIZE_STATISTICS_TRAINING = 0, + CUDNN_BN_FINALIZE_STATISTICS_INFERENCE = 1, +} cudnnBnFinalizeStatsMode_t; + +typedef enum { + CUDNN_RNG_DISTRIBUTION_BERNOULLI, + CUDNN_RNG_DISTRIBUTION_UNIFORM, + CUDNN_RNG_DISTRIBUTION_NORMAL, +} cudnnRngDistribution_t; + +typedef enum { + CUDNN_ATTR_POINTWISE_MODE = 0, + CUDNN_ATTR_POINTWISE_MATH_PREC = 1, + CUDNN_ATTR_POINTWISE_NAN_PROPAGATION CUDNN_DEPRECATED_ENUM = 2, + CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP = 3, + CUDNN_ATTR_POINTWISE_RELU_UPPER_CLIP = 4, + CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP_SLOPE = 5, + CUDNN_ATTR_POINTWISE_ELU_ALPHA = 6, + CUDNN_ATTR_POINTWISE_SOFTPLUS_BETA = 7, + CUDNN_ATTR_POINTWISE_SWISH_BETA = 8, + CUDNN_ATTR_POINTWISE_AXIS = 9, + + CUDNN_ATTR_CONVOLUTION_COMP_TYPE = 100, + CUDNN_ATTR_CONVOLUTION_CONV_MODE = 101, + CUDNN_ATTR_CONVOLUTION_DILATIONS = 102, + CUDNN_ATTR_CONVOLUTION_FILTER_STRIDES = 103, + CUDNN_ATTR_CONVOLUTION_POST_PADDINGS = 104, + CUDNN_ATTR_CONVOLUTION_PRE_PADDINGS = 105, + CUDNN_ATTR_CONVOLUTION_SPATIAL_DIMS = 106, + + CUDNN_ATTR_ENGINEHEUR_MODE = 200, + CUDNN_ATTR_ENGINEHEUR_OPERATION_GRAPH = 201, + CUDNN_ATTR_ENGINEHEUR_RESULTS = 202, + CUDNN_ATTR_ENGINEHEUR_SM_COUNT_TARGET = 203, + + CUDNN_ATTR_ENGINECFG_ENGINE = 300, + CUDNN_ATTR_ENGINECFG_INTERMEDIATE_INFO = 301, + CUDNN_ATTR_ENGINECFG_KNOB_CHOICES = 302, + CUDNN_ATTR_ENGINECFG_WORKSPACE_SIZE = 303, + CUDNN_ATTR_ENGINECFG_SHARED_MEMORY_USED = 304, + + CUDNN_ATTR_EXECUTION_PLAN_HANDLE = 400, + CUDNN_ATTR_EXECUTION_PLAN_ENGINE_CONFIG = 401, + CUDNN_ATTR_EXECUTION_PLAN_WORKSPACE_SIZE = 402, + CUDNN_ATTR_EXECUTION_PLAN_COMPUTED_INTERMEDIATE_UIDS = 403, + CUDNN_ATTR_EXECUTION_PLAN_RUN_ONLY_INTERMEDIATE_UIDS = 404, + CUDNN_ATTR_EXECUTION_PLAN_JSON_REPRESENTATION = 405, + CUDNN_ATTR_EXECUTION_PLAN_KERNEL_CACHE = 406, + + CUDNN_ATTR_INTERMEDIATE_INFO_UNIQUE_ID = 500, + CUDNN_ATTR_INTERMEDIATE_INFO_SIZE = 501, + CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_DATA_UIDS = 502, + CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_ATTRIBUTES = 503, + + CUDNN_ATTR_KNOB_CHOICE_KNOB_TYPE = 600, + CUDNN_ATTR_KNOB_CHOICE_KNOB_VALUE = 601, + + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_ALPHA = 700, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_BETA = 701, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_CONV_DESC = 702, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_W = 703, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_X = 704, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_Y = 705, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_ALPHA = 706, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_BETA = 707, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_CONV_DESC = 708, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_W = 709, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DX = 710, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DY = 711, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_ALPHA = 712, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_BETA = 713, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_CONV_DESC = 714, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DW = 715, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_X = 716, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DY = 717, + + CUDNN_ATTR_OPERATION_POINTWISE_PW_DESCRIPTOR = 750, + CUDNN_ATTR_OPERATION_POINTWISE_XDESC = 751, + CUDNN_ATTR_OPERATION_POINTWISE_BDESC = 752, + CUDNN_ATTR_OPERATION_POINTWISE_YDESC = 753, + CUDNN_ATTR_OPERATION_POINTWISE_ALPHA1 = 754, + CUDNN_ATTR_OPERATION_POINTWISE_ALPHA2 = 755, + CUDNN_ATTR_OPERATION_POINTWISE_DXDESC = 756, + CUDNN_ATTR_OPERATION_POINTWISE_DYDESC = 757, + CUDNN_ATTR_OPERATION_POINTWISE_TDESC = 758, + + CUDNN_ATTR_OPERATION_GENSTATS_MODE = 770, + CUDNN_ATTR_OPERATION_GENSTATS_MATH_PREC = 771, + CUDNN_ATTR_OPERATION_GENSTATS_XDESC = 772, + CUDNN_ATTR_OPERATION_GENSTATS_SUMDESC = 773, + CUDNN_ATTR_OPERATION_GENSTATS_SQSUMDESC = 774, + + CUDNN_ATTR_OPERATION_BN_FINALIZE_STATS_MODE = 780, + CUDNN_ATTR_OPERATION_BN_FINALIZE_MATH_PREC = 781, + CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SUM_DESC = 782, + CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SQ_SUM_DESC = 783, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SCALE_DESC = 784, + CUDNN_ATTR_OPERATION_BN_FINALIZE_BIAS_DESC = 785, + CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_MEAN_DESC = 786, + CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_VAR_DESC = 787, + CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_MEAN_DESC = 788, + CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_VAR_DESC = 789, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_MEAN_DESC = 790, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_INV_STD_DESC = 791, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_SCALE_DESC = 792, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_BIAS_DESC = 793, + CUDNN_ATTR_OPERATION_BN_FINALIZE_ACCUM_COUNT_DESC = 794, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EPSILON_DESC = 795, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EXP_AVERATE_FACTOR_DESC = 796, + + CUDNN_ATTR_OPERATIONGRAPH_HANDLE = 800, + CUDNN_ATTR_OPERATIONGRAPH_OPS = 801, + CUDNN_ATTR_OPERATIONGRAPH_ENGINE_GLOBAL_COUNT = 802, + CUDNN_ATTR_OPERATIONGRAPH_IS_DYNAMIC_SHAPE_ENABLED = 803, + + CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT = 900, + CUDNN_ATTR_TENSOR_DATA_TYPE = 901, + CUDNN_ATTR_TENSOR_DIMENSIONS = 902, + CUDNN_ATTR_TENSOR_STRIDES = 903, + CUDNN_ATTR_TENSOR_VECTOR_COUNT = 904, + CUDNN_ATTR_TENSOR_VECTORIZED_DIMENSION = 905, + CUDNN_ATTR_TENSOR_UNIQUE_ID = 906, + CUDNN_ATTR_TENSOR_IS_VIRTUAL = 907, + CUDNN_ATTR_TENSOR_IS_BY_VALUE = 908, + CUDNN_ATTR_TENSOR_REORDERING_MODE = 909, + CUDNN_ATTR_TENSOR_RAGGED_OFFSET_DESC = 913, + + CUDNN_ATTR_VARIANT_PACK_UNIQUE_IDS = 1000, + CUDNN_ATTR_VARIANT_PACK_DATA_POINTERS = 1001, + CUDNN_ATTR_VARIANT_PACK_INTERMEDIATES = 1002, + CUDNN_ATTR_VARIANT_PACK_WORKSPACE = 1003, + + CUDNN_ATTR_LAYOUT_INFO_TENSOR_UID = 1100, + CUDNN_ATTR_LAYOUT_INFO_TYPES = 1101, + + CUDNN_ATTR_KNOB_INFO_TYPE = 1200, + CUDNN_ATTR_KNOB_INFO_MAXIMUM_VALUE = 1201, + CUDNN_ATTR_KNOB_INFO_MINIMUM_VALUE = 1202, + CUDNN_ATTR_KNOB_INFO_STRIDE = 1203, + + CUDNN_ATTR_ENGINE_OPERATION_GRAPH = 1300, + CUDNN_ATTR_ENGINE_GLOBAL_INDEX = 1301, + CUDNN_ATTR_ENGINE_KNOB_INFO = 1302, + CUDNN_ATTR_ENGINE_NUMERICAL_NOTE = 1303, + CUDNN_ATTR_ENGINE_LAYOUT_INFO = 1304, + CUDNN_ATTR_ENGINE_BEHAVIOR_NOTE = 1305, + CUDNN_ATTR_ENGINE_SM_COUNT_TARGET = 1306, + + CUDNN_ATTR_MATMUL_COMP_TYPE = 1500, + CUDNN_ATTR_MATMUL_PADDING_VALUE = 1503, + + CUDNN_ATTR_OPERATION_MATMUL_ADESC = 1520, + CUDNN_ATTR_OPERATION_MATMUL_BDESC = 1521, + CUDNN_ATTR_OPERATION_MATMUL_CDESC = 1522, + CUDNN_ATTR_OPERATION_MATMUL_DESC = 1523, + CUDNN_ATTR_OPERATION_MATMUL_IRREGULARLY_STRIDED_BATCH_COUNT CUDNN_DEPRECATED_ENUM = 1524, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_M_OVERRIDE_DESC = 1525, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_N_OVERRIDE_DESC = 1526, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_K_OVERRIDE_DESC = 1527, + + CUDNN_ATTR_REDUCTION_OPERATOR = 1600, + CUDNN_ATTR_REDUCTION_COMP_TYPE = 1601, + + CUDNN_ATTR_OPERATION_REDUCTION_XDESC = 1610, + CUDNN_ATTR_OPERATION_REDUCTION_YDESC = 1611, + CUDNN_ATTR_OPERATION_REDUCTION_DESC = 1612, + + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MATH_PREC = 1620, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MEAN_DESC = 1621, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_INVSTD_DESC = 1622, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_BN_SCALE_DESC = 1623, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_X_DESC = 1624, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DY_DESC = 1625, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_SCALE_DESC = 1626, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_BIAS_DESC = 1627, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_DY_SCALE_DESC = 1628, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_X_SCALE_DESC = 1629, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_BIAS = 1630, + + CUDNN_ATTR_RESAMPLE_MODE = 1700, + CUDNN_ATTR_RESAMPLE_COMP_TYPE = 1701, + CUDNN_ATTR_RESAMPLE_SPATIAL_DIMS = 1702, + CUDNN_ATTR_RESAMPLE_POST_PADDINGS = 1703, + CUDNN_ATTR_RESAMPLE_PRE_PADDINGS = 1704, + CUDNN_ATTR_RESAMPLE_STRIDES = 1705, + CUDNN_ATTR_RESAMPLE_WINDOW_DIMS = 1706, + CUDNN_ATTR_RESAMPLE_NAN_PROPAGATION = 1707, + CUDNN_ATTR_RESAMPLE_PADDING_MODE = 1708, + + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_XDESC = 1710, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_YDESC = 1711, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_IDXDESC = 1712, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_ALPHA CUDNN_DEPRECATED_ENUM = 1713, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_BETA CUDNN_DEPRECATED_ENUM = 1714, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_DESC = 1716, + + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DXDESC = 1720, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DYDESC = 1721, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_IDXDESC = 1722, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_ALPHA CUDNN_DEPRECATED_ENUM = 1723, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_BETA CUDNN_DEPRECATED_ENUM = 1724, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DESC = 1725, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_XDESC = 1726, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_YDESC = 1727, + + CUDNN_ATTR_OPERATION_CONCAT_AXIS = 1800, + CUDNN_ATTR_OPERATION_CONCAT_INPUT_DESCS = 1801, + CUDNN_ATTR_OPERATION_CONCAT_INPLACE_INDEX = 1802, + CUDNN_ATTR_OPERATION_CONCAT_OUTPUT_DESC = 1803, + + CUDNN_ATTR_OPERATION_SIGNAL_MODE = 1900, + CUDNN_ATTR_OPERATION_SIGNAL_FLAGDESC = 1901, + CUDNN_ATTR_OPERATION_SIGNAL_VALUE = 1902, + CUDNN_ATTR_OPERATION_SIGNAL_XDESC = 1903, + CUDNN_ATTR_OPERATION_SIGNAL_YDESC = 1904, + + CUDNN_ATTR_OPERATION_PAGED_CACHE_LOAD_CONTAINER_DESC = 1950, + CUDNN_ATTR_OPERATION_PAGED_CACHE_LOAD_YDESC = 1951, + CUDNN_ATTR_OPERATION_PAGED_CACHE_LOAD_SEQUENCE_DESC = 1952, + CUDNN_ATTR_OPERATION_PAGED_CACHE_LOAD_PAGE_TABLE_DESC = 1953, + + CUDNN_ATTR_OPERATION_NORM_FWD_MODE = 2000, + CUDNN_ATTR_OPERATION_NORM_FWD_PHASE = 2001, + CUDNN_ATTR_OPERATION_NORM_FWD_XDESC = 2002, + CUDNN_ATTR_OPERATION_NORM_FWD_MEAN_DESC = 2003, + CUDNN_ATTR_OPERATION_NORM_FWD_INV_VARIANCE_DESC = 2004, + CUDNN_ATTR_OPERATION_NORM_FWD_SCALE_DESC = 2005, + CUDNN_ATTR_OPERATION_NORM_FWD_BIAS_DESC = 2006, + CUDNN_ATTR_OPERATION_NORM_FWD_EPSILON_DESC = 2007, + CUDNN_ATTR_OPERATION_NORM_FWD_EXP_AVG_FACTOR_DESC = 2008, + CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_MEAN_DESC = 2009, + CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_VAR_DESC = 2010, + CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_MEAN_DESC = 2011, + CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_VAR_DESC = 2012, + CUDNN_ATTR_OPERATION_NORM_FWD_YDESC = 2013, + CUDNN_ATTR_OPERATION_NORM_FWD_PEER_STAT_DESCS = 2014, + + CUDNN_ATTR_OPERATION_NORM_BWD_MODE = 2100, + CUDNN_ATTR_OPERATION_NORM_BWD_XDESC = 2101, + CUDNN_ATTR_OPERATION_NORM_BWD_MEAN_DESC = 2102, + CUDNN_ATTR_OPERATION_NORM_BWD_INV_VARIANCE_DESC = 2103, + CUDNN_ATTR_OPERATION_NORM_BWD_DYDESC = 2104, + CUDNN_ATTR_OPERATION_NORM_BWD_SCALE_DESC = 2105, + CUDNN_ATTR_OPERATION_NORM_BWD_EPSILON_DESC = 2106, + CUDNN_ATTR_OPERATION_NORM_BWD_DSCALE_DESC = 2107, + CUDNN_ATTR_OPERATION_NORM_BWD_DBIAS_DESC = 2108, + CUDNN_ATTR_OPERATION_NORM_BWD_DXDESC = 2109, + CUDNN_ATTR_OPERATION_NORM_BWD_PEER_STAT_DESCS = 2110, + + CUDNN_ATTR_OPERATION_RESHAPE_XDESC = 2200, + CUDNN_ATTR_OPERATION_RESHAPE_YDESC = 2201, + + CUDNN_ATTR_RNG_DISTRIBUTION = 2300, + CUDNN_ATTR_RNG_NORMAL_DIST_MEAN = 2301, + CUDNN_ATTR_RNG_NORMAL_DIST_STANDARD_DEVIATION = 2302, + CUDNN_ATTR_RNG_UNIFORM_DIST_MAXIMUM = 2303, + CUDNN_ATTR_RNG_UNIFORM_DIST_MINIMUM = 2304, + CUDNN_ATTR_RNG_BERNOULLI_DIST_PROBABILITY = 2305, + + CUDNN_ATTR_OPERATION_RNG_YDESC = 2310, + CUDNN_ATTR_OPERATION_RNG_SEED = 2311, + CUDNN_ATTR_OPERATION_RNG_DESC = 2312, + CUDNN_ATTR_OPERATION_RNG_OFFSET_DESC = 2313, + + CUDNN_ATTR_KERNEL_CACHE_OPERATION_GRAPH = 2400, + CUDNN_ATTR_KERNEL_CACHE_IS_ENGINECFG_KERNEL_CACHED = 2401 + +} cudnnBackendAttributeName_t; + +typedef enum { + CUDNN_TYPE_HANDLE = 0, + CUDNN_TYPE_DATA_TYPE, + CUDNN_TYPE_BOOLEAN, + CUDNN_TYPE_INT64, + CUDNN_TYPE_FLOAT, + CUDNN_TYPE_DOUBLE, + CUDNN_TYPE_VOID_PTR, + CUDNN_TYPE_CONVOLUTION_MODE, + CUDNN_TYPE_HEUR_MODE, + CUDNN_TYPE_KNOB_TYPE, + CUDNN_TYPE_NAN_PROPOGATION CUDNN_DEPRECATED_ENUM, + CUDNN_TYPE_NUMERICAL_NOTE, + CUDNN_TYPE_LAYOUT_TYPE, + CUDNN_TYPE_ATTRIB_NAME, + CUDNN_TYPE_POINTWISE_MODE, + CUDNN_TYPE_BACKEND_DESCRIPTOR, + CUDNN_TYPE_GENSTATS_MODE, + CUDNN_TYPE_BN_FINALIZE_STATS_MODE, + CUDNN_TYPE_REDUCTION_OPERATOR_TYPE, + CUDNN_TYPE_BEHAVIOR_NOTE, + CUDNN_TYPE_TENSOR_REORDERING_MODE, + CUDNN_TYPE_RESAMPLE_MODE, + CUDNN_TYPE_PADDING_MODE, + CUDNN_TYPE_INT32, + CUDNN_TYPE_CHAR, + CUDNN_TYPE_SIGNAL_MODE, + CUDNN_TYPE_FRACTION, + CUDNN_TYPE_NORM_MODE, + CUDNN_TYPE_NORM_FWD_PHASE, + CUDNN_TYPE_RNG_DISTRIBUTION +} cudnnBackendAttributeType_t; + +typedef enum { + CUDNN_BACKEND_POINTWISE_DESCRIPTOR = 0, + CUDNN_BACKEND_CONVOLUTION_DESCRIPTOR, + CUDNN_BACKEND_ENGINE_DESCRIPTOR, + CUDNN_BACKEND_ENGINECFG_DESCRIPTOR, + CUDNN_BACKEND_ENGINEHEUR_DESCRIPTOR, + CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR, + CUDNN_BACKEND_INTERMEDIATE_INFO_DESCRIPTOR, + CUDNN_BACKEND_KNOB_CHOICE_DESCRIPTOR, + CUDNN_BACKEND_KNOB_INFO_DESCRIPTOR, + CUDNN_BACKEND_LAYOUT_INFO_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_FILTER_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_DATA_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_POINTWISE_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_GEN_STATS_DESCRIPTOR, + CUDNN_BACKEND_OPERATIONGRAPH_DESCRIPTOR, + CUDNN_BACKEND_VARIANT_PACK_DESCRIPTOR, + CUDNN_BACKEND_TENSOR_DESCRIPTOR, + CUDNN_BACKEND_MATMUL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_MATMUL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_BN_FINALIZE_STATISTICS_DESCRIPTOR, + CUDNN_BACKEND_REDUCTION_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_REDUCTION_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_BN_BWD_WEIGHTS_DESCRIPTOR, + CUDNN_BACKEND_RESAMPLE_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESAMPLE_FWD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESAMPLE_BWD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONCAT_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_SIGNAL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_NORM_FORWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_NORM_BACKWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESHAPE_DESCRIPTOR, + CUDNN_BACKEND_RNG_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RNG_DESCRIPTOR, + CUDNN_BACKEND_KERNEL_CACHE_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_PAGED_CACHE_LOAD_DESCRIPTOR, +} cudnnBackendDescriptorType_t; + +typedef enum { + CUDNN_NUMERICAL_NOTE_TENSOR_CORE = 0, + CUDNN_NUMERICAL_NOTE_DOWN_CONVERT_INPUTS, + CUDNN_NUMERICAL_NOTE_REDUCED_PRECISION_REDUCTION, + CUDNN_NUMERICAL_NOTE_FFT, + CUDNN_NUMERICAL_NOTE_NONDETERMINISTIC, + CUDNN_NUMERICAL_NOTE_WINOGRAD, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_4x4, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_6x6, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_13x13, + CUDNN_NUMERICAL_NOTE_STRICT_NAN_PROP, + CUDNN_NUMERICAL_NOTE_TYPE_COUNT, +} cudnnBackendNumericalNote_t; + +typedef enum { + CUDNN_BEHAVIOR_NOTE_RUNTIME_COMPILATION = 0, + CUDNN_BEHAVIOR_NOTE_REQUIRES_FILTER_INT8x32_REORDER = 1, + CUDNN_BEHAVIOR_NOTE_REQUIRES_BIAS_INT8x32_REORDER = 2, + CUDNN_BEHAVIOR_NOTE_SUPPORTS_CUDA_GRAPH_NATIVE_API = 3, + CUDNN_BEHAVIOR_NOTE_TYPE_COUNT, +} cudnnBackendBehaviorNote_t; + +typedef enum { + CUDNN_KNOB_TYPE_SPLIT_K CUDNN_DEPRECATED_ENUM = 0, + CUDNN_KNOB_TYPE_SWIZZLE = 1, + CUDNN_KNOB_TYPE_TILE_SIZE = 2, + CUDNN_KNOB_TYPE_USE_TEX CUDNN_DEPRECATED_ENUM = 3, + CUDNN_KNOB_TYPE_EDGE = 4, + CUDNN_KNOB_TYPE_KBLOCK CUDNN_DEPRECATED_ENUM = 5, + CUDNN_KNOB_TYPE_LDGA CUDNN_DEPRECATED_ENUM = 6, + CUDNN_KNOB_TYPE_LDGB CUDNN_DEPRECATED_ENUM = 7, + CUDNN_KNOB_TYPE_CHUNK_K CUDNN_DEPRECATED_ENUM = 8, + CUDNN_KNOB_TYPE_SPLIT_H CUDNN_DEPRECATED_ENUM = 9, + CUDNN_KNOB_TYPE_WINO_TILE CUDNN_DEPRECATED_ENUM = 10, + CUDNN_KNOB_TYPE_MULTIPLY = 11, + CUDNN_KNOB_TYPE_SPLIT_K_BUF = 12, + CUDNN_KNOB_TYPE_TILEK = 13, + CUDNN_KNOB_TYPE_STAGES = 14, + CUDNN_KNOB_TYPE_REDUCTION_MODE = 15, + CUDNN_KNOB_TYPE_CTA_SPLIT_K_MODE CUDNN_DEPRECATED_ENUM = 16, + CUDNN_KNOB_TYPE_SPLIT_K_SLC = 17, + CUDNN_KNOB_TYPE_IDX_MODE CUDNN_DEPRECATED_ENUM = 18, + CUDNN_KNOB_TYPE_SLICED CUDNN_DEPRECATED_ENUM = 19, + CUDNN_KNOB_TYPE_SPLIT_RS CUDNN_DEPRECATED_ENUM = 20, + CUDNN_KNOB_TYPE_SINGLEBUFFER CUDNN_DEPRECATED_ENUM = 21, + CUDNN_KNOB_TYPE_LDGC CUDNN_DEPRECATED_ENUM = 22, + CUDNN_KNOB_TYPE_SPECFILT = 23, + CUDNN_KNOB_TYPE_KERNEL_CFG = 24, + CUDNN_KNOB_TYPE_WORKSPACE = 25, + CUDNN_KNOB_TYPE_TILE_CGA CUDNN_DEPRECATED_ENUM = 26, + CUDNN_KNOB_TYPE_TILE_CGA_M = 27, + CUDNN_KNOB_TYPE_TILE_CGA_N = 28, + CUDNN_KNOB_TYPE_BLOCK_SIZE = 29, + CUDNN_KNOB_TYPE_OCCUPANCY = 30, + CUDNN_KNOB_TYPE_ARRAY_SIZE_PER_THREAD = 31, + CUDNN_KNOB_TYPE_NUM_C_PER_BLOCK CUDNN_DEPRECATED_ENUM = 32, + CUDNN_KNOB_TYPE_SPLIT_COLS = 33, + CUDNN_KNOB_TYPE_TILE_ROWS = 34, + CUDNN_KNOB_TYPE_TILE_COLS = 35, + CUDNN_KNOB_TYPE_LOAD_SIZE = 36, + CUDNN_KNOB_TYPE_COUNTS, +} cudnnBackendKnobType_t; + +typedef enum { + CUDNN_LAYOUT_TYPE_PREFERRED_NCHW = 0, + CUDNN_LAYOUT_TYPE_PREFERRED_NHWC = 1, + CUDNN_LAYOUT_TYPE_PREFERRED_PAD4CK = 2, + CUDNN_LAYOUT_TYPE_PREFERRED_PAD8CK = 3, + CUDNN_LAYOUT_TYPE_COUNT = 4, +} cudnnBackendLayoutType_t; + +typedef enum { + CUDNN_HEUR_MODE_INSTANT = 0, + CUDNN_HEUR_MODE_B = 1, + CUDNN_HEUR_MODE_FALLBACK = 2, + CUDNN_HEUR_MODE_A = 3, + CUDNN_HEUR_MODES_COUNT = 4, +} cudnnBackendHeurMode_t; + +typedef enum { + CUDNN_TENSOR_REORDERING_NONE = 0, + CUDNN_TENSOR_REORDERING_INT8x32 = 1, + CUDNN_TENSOR_REORDERING_F16x16 = 2, +} cudnnBackendTensorReordering_t; + +typedef enum { + CUDNN_ZERO_PAD = 0, + CUDNN_NEG_INF_PAD = 1, + CUDNN_EDGE_VAL_PAD = 2, +} cudnnPaddingMode_t; + +typedef enum { + CUDNN_LAYER_NORM = 0, + CUDNN_INSTANCE_NORM = 1, + CUDNN_BATCH_NORM = 2, + CUDNN_GROUP_NORM = 3, + CUDNN_RMS_NORM = 4, +} cudnnBackendNormMode_t; + +typedef enum { + CUDNN_NORM_FWD_INFERENCE = 0, + CUDNN_NORM_FWD_TRAINING = 1, +} cudnnBackendNormFwdPhase_t; + +cudnnStatus_t CUDNNWINAPI +cudnnBackendCreateDescriptor(cudnnBackendDescriptorType_t descriptorType, cudnnBackendDescriptor_t *descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendDestroyDescriptor(cudnnBackendDescriptor_t descriptor); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnBackendInitialize(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendFinalize(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendSetAttribute(cudnnBackendDescriptor_t descriptor, + cudnnBackendAttributeName_t attributeName, + cudnnBackendAttributeType_t attributeType, + int64_t elementCount, + const void *arrayOfElements); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendGetAttribute(cudnnBackendDescriptor_t const descriptor, + cudnnBackendAttributeName_t attributeName, + cudnnBackendAttributeType_t attributeType, + int64_t requestedElementCount, + int64_t *elementCount, + void *arrayOfElements); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendExecute(cudnnHandle_t handle, cudnnBackendDescriptor_t executionPlan, cudnnBackendDescriptor_t variantPack); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendPopulateCudaGraph(cudnnHandle_t handle, + cudnnBackendDescriptor_t executionPlan, + cudnnBackendDescriptor_t variantPack, + cudaGraph_t graph); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendUpdateCudaGraph(cudnnHandle_t handle, + cudnnBackendDescriptor_t executionPlan, + cudnnBackendDescriptor_t variantPack, + cudaGraph_t graph); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_GRAPH_H_ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_graph_v9.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_graph_v9.h new file mode 100644 index 0000000000000000000000000000000000000000..2baed61ce65dac3d5a3c2fe667a5dbafe9eec7e0 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_graph_v9.h @@ -0,0 +1,940 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_graph : cuDNN's basic definitions operations. + */ + +#if !defined(CUDNN_GRAPH_H_) +#define CUDNN_GRAPH_H_ + +#include +#include + +#include + +#include "cudnn_version.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_GRAPH_MAJOR 9 +#define CUDNN_GRAPH_MINOR 5 +#define CUDNN_GRAPH_PATCH 1 + +#if (CUDNN_GRAPH_MAJOR != CUDNN_MAJOR) || (CUDNN_GRAPH_MINOR != CUDNN_MINOR) || (CUDNN_GRAPH_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN GRAPH!!! +#endif + +#ifndef CUDNNWINAPI +#ifdef _WIN32 +#define CUDNNWINAPI __stdcall +#else +#define CUDNNWINAPI +#endif +#endif + +/* Warnings for deprecated API-s are enabled using the CUDNN_WARN_DEPRECATED macro */ +#if defined(CUDNN_WARN_DEPRECATED) && (defined(__GNUC__) || defined(__clang__)) +/* GCC, Intel C/C++, Cray C/C++, CLANG, IBM XL C/C++ little endian */ +#define CUDNN_DEPRECATED __attribute__((deprecated)) +#define CUDNN_DEPRECATED_ENUM __attribute__((deprecated)) +#elif defined(CUDNN_WARN_DEPRECATED) && defined(_MSC_VER) +/* Microsoft Visual C++ */ +#define CUDNN_DEPRECATED __declspec(deprecated) +#define CUDNN_DEPRECATED_ENUM __declspec(deprecated) +#elif defined(CUDNN_WARN_DEPRECATED) && (__cplusplus >= 201402L) +/* C++14 compilers */ +#define CUDNN_DEPRECATED [[deprecated]] +#define CUDNN_DEPRECATED_ENUM [[deprecated]] +#else +/* No support for the deprecated attribute */ +#define CUDNN_DEPRECATED +#define CUDNN_DEPRECATED_ENUM +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +struct cudnnContext; +typedef struct cudnnContext *cudnnHandle_t; + +size_t CUDNNWINAPI +cudnnGetVersion(void); + +size_t CUDNNWINAPI +cudnnGetMaxDeviceVersion(void); + +/* Returns CUDA Runtime version statically linked against cudnn */ +size_t CUDNNWINAPI +cudnnGetCudartVersion(void); + +/* + * CUDNN return codes + */ +typedef enum { + CUDNN_STATUS_SUCCESS = 0, + + /* Uncategorized errors */ + CUDNN_STATUS_NOT_INITIALIZED = 1001, + CUDNN_STATUS_SUBLIBRARY_VERSION_MISMATCH = 1002, + CUDNN_STATUS_SERIALIZATION_VERSION_MISMATCH = 1003, + CUDNN_STATUS_DEPRECATED = 1004, + CUDNN_STATUS_LICENSE_ERROR = 1005, + CUDNN_STATUS_RUNTIME_IN_PROGRESS = 1006, + CUDNN_STATUS_RUNTIME_FP_OVERFLOW = 1007, + CUDNN_STATUS_SUBLIBRARY_LOADING_FAILED = 1008, + + CUDNN_STATUS_BAD_PARAM = 2000, + CUDNN_STATUS_BAD_PARAM_NULL_POINTER = 2002, + CUDNN_STATUS_BAD_PARAM_MISALIGNED_POINTER = 2003, + CUDNN_STATUS_BAD_PARAM_NOT_FINALIZED = 2004, + CUDNN_STATUS_BAD_PARAM_OUT_OF_BOUND = 2005, + CUDNN_STATUS_BAD_PARAM_SIZE_INSUFFICIENT = 2006, + CUDNN_STATUS_BAD_PARAM_STREAM_MISMATCH = 2007, + CUDNN_STATUS_BAD_PARAM_SHAPE_MISMATCH = 2008, + CUDNN_STATUS_BAD_PARAM_DUPLICATED_ENTRIES = 2009, + CUDNN_STATUS_BAD_PARAM_ATTRIBUTE_TYPE = 2010, + CUDNN_STATUS_BAD_PARAM_CUDA_GRAPH_MISMATCH = 2011, + + CUDNN_STATUS_NOT_SUPPORTED = 3000, + CUDNN_STATUS_NOT_SUPPORTED_GRAPH_PATTERN = 3001, + CUDNN_STATUS_NOT_SUPPORTED_SHAPE = 3002, + CUDNN_STATUS_NOT_SUPPORTED_DATA_TYPE = 3003, + CUDNN_STATUS_NOT_SUPPORTED_LAYOUT = 3004, + CUDNN_STATUS_NOT_SUPPORTED_INCOMPATIBLE_CUDA_DRIVER = 3005, + CUDNN_STATUS_NOT_SUPPORTED_INCOMPATIBLE_CUDART = 3006, + CUDNN_STATUS_NOT_SUPPORTED_ARCH_MISMATCH = 3007, + CUDNN_STATUS_NOT_SUPPORTED_RUNTIME_PREREQUISITE_MISSING = 3008, + CUDNN_STATUS_NOT_SUPPORTED_SUBLIBRARY_UNAVAILABLE = 3009, + CUDNN_STATUS_NOT_SUPPORTED_SHARED_MEMORY_INSUFFICIENT = 3010, + CUDNN_STATUS_NOT_SUPPORTED_PADDING = 3011, + CUDNN_STATUS_NOT_SUPPORTED_BAD_LAUNCH_PARAM = 3012, + CUDNN_STATUS_NOT_SUPPORTED_CUDA_GRAPH_NATIVE_API = 3013, + + CUDNN_STATUS_INTERNAL_ERROR = 4000, + CUDNN_STATUS_INTERNAL_ERROR_COMPILATION_FAILED = 4001, + CUDNN_STATUS_INTERNAL_ERROR_UNEXPECTED_VALUE = 4002, + CUDNN_STATUS_INTERNAL_ERROR_HOST_ALLOCATION_FAILED = 4003, + CUDNN_STATUS_INTERNAL_ERROR_DEVICE_ALLOCATION_FAILED = 4004, + CUDNN_STATUS_INTERNAL_ERROR_BAD_LAUNCH_PARAM = 4005, + CUDNN_STATUS_INTERNAL_ERROR_TEXTURE_CREATION_FAILED = 4006, + + CUDNN_STATUS_EXECUTION_FAILED = 5000, + CUDNN_STATUS_EXECUTION_FAILED_CUDA_DRIVER = 5001, + CUDNN_STATUS_EXECUTION_FAILED_CUBLAS = 5002, + CUDNN_STATUS_EXECUTION_FAILED_CUDART = 5003, + CUDNN_STATUS_EXECUTION_FAILED_CURAND = 5004, + + CUDNN_STATUS_ALLOC_FAILED CUDNN_DEPRECATED_ENUM = CUDNN_STATUS_INTERNAL_ERROR_HOST_ALLOCATION_FAILED, + CUDNN_STATUS_INVALID_VALUE CUDNN_DEPRECATED_ENUM = 2001 /* please transition to CUDNN_STATUS_BAD_PARAM instead */, + CUDNN_STATUS_ARCH_MISMATCH CUDNN_DEPRECATED_ENUM = CUDNN_STATUS_NOT_SUPPORTED_ARCH_MISMATCH, + CUDNN_STATUS_MAPPING_ERROR CUDNN_DEPRECATED_ENUM = CUDNN_STATUS_INTERNAL_ERROR_TEXTURE_CREATION_FAILED, + CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING CUDNN_DEPRECATED_ENUM = + CUDNN_STATUS_NOT_SUPPORTED_RUNTIME_PREREQUISITE_MISSING, + CUDNN_STATUS_VERSION_MISMATCH CUDNN_DEPRECATED_ENUM = CUDNN_STATUS_SUBLIBRARY_VERSION_MISMATCH, +} cudnnStatus_t; + +#define CUDNN_STATUS_FULL_ERROR_CODE(category, specific_err) ((cudnnStatus_t)(0 + (category) + (specific_err))) +#define CUDNN_STATUS_CATEGORY(full_error_code) ((full_error_code) / 1000 * 1000) +#define CUDNN_STATUS_SPECIFIC_ERROR(full_error_code) ((full_error_code) % 1000) + +/* human-readable error messages */ +const char *CUDNNWINAPI +cudnnGetErrorString(cudnnStatus_t status); + +void CUDNNWINAPI +cudnnGetLastErrorString(char *message, size_t max_size); + +/* Forward definition in this version only */ +typedef struct cudnnRuntimeTag_t cudnnRuntimeTag_t CUDNN_DEPRECATED; + +typedef enum { + CUDNN_ERRQUERY_RAWCODE = 0, + CUDNN_ERRQUERY_NONBLOCKING = 1, + CUDNN_ERRQUERY_BLOCKING = 2, +} cudnnErrQueryMode_t; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnQueryRuntimeError(cudnnHandle_t handle, cudnnStatus_t *rstatus, cudnnErrQueryMode_t mode, cudnnRuntimeTag_t *tag); + +cudnnStatus_t CUDNNWINAPI +cudnnGetProperty(libraryPropertyType type, int *value); + +cudnnStatus_t CUDNNWINAPI +cudnnCreate(cudnnHandle_t *handle); +cudnnStatus_t CUDNNWINAPI +cudnnDestroy(cudnnHandle_t handle); +cudnnStatus_t CUDNNWINAPI +cudnnSetStream(cudnnHandle_t handle, cudaStream_t streamId); +cudnnStatus_t CUDNNWINAPI +cudnnGetStream(cudnnHandle_t handle, cudaStream_t *streamId); +/* + * CUDNN data type + */ +typedef enum { + CUDNN_DATA_FLOAT = 0, + CUDNN_DATA_DOUBLE = 1, + CUDNN_DATA_HALF = 2, + CUDNN_DATA_INT8 = 3, + CUDNN_DATA_INT32 = 4, + CUDNN_DATA_INT8x4 CUDNN_DEPRECATED_ENUM = 5, + CUDNN_DATA_UINT8 = 6, + CUDNN_DATA_UINT8x4 CUDNN_DEPRECATED_ENUM = 7, + CUDNN_DATA_INT8x32 CUDNN_DEPRECATED_ENUM = 8, + CUDNN_DATA_BFLOAT16 = 9, + CUDNN_DATA_INT64 = 10, + CUDNN_DATA_BOOLEAN = 11, + CUDNN_DATA_FP8_E4M3 = 12, + CUDNN_DATA_FP8_E5M2 = 13, + CUDNN_DATA_FAST_FLOAT_FOR_FP8 = 14, +} cudnnDataType_t; + +/* + * CUDNN math type + */ +typedef enum { + CUDNN_DEFAULT_MATH = 0, + CUDNN_TENSOR_OP_MATH = 1, + CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION = 2, + CUDNN_FMA_MATH = 3, +} cudnnMathType_t; + +/* + * CUDNN propagate Nan + */ +typedef enum { + CUDNN_NOT_PROPAGATE_NAN CUDNN_DEPRECATED_ENUM = 0, + CUDNN_PROPAGATE_NAN CUDNN_DEPRECATED_ENUM = 1, +} cudnnNanPropagation_t; + +/* + * Behavior for OOB samples. OOB samples are samples where L+R > T is encountered during the gradient calculation. If + * gradMode is set to CUDNN_CTC_SKIP_OOB_GRADIENTS, then the CTC loss function does not write to the gradient buffer for + * that sample. Instead, the current values, even not finite, are retained. If gradMode is set to + * CUDNN_CTC_ZERO_OOB_GRADIENTS, then the gradient for that sample is set to zero. This guarantees a finite gradient. + */ +typedef enum { + CUDNN_CTC_ZERO_OOB_GRADIENTS = 0, + CUDNN_CTC_SKIP_OOB_GRADIENTS = 1, +} cudnnCTCGradMode_t; + +typedef enum { + CUDNN_TENSOR_NCHW = 0, /* row major (wStride = 1, hStride = w) */ + CUDNN_TENSOR_NHWC = 1, /* feature maps interleaved ( cStride = 1 )*/ + CUDNN_TENSOR_NCHW_VECT_C = 2, /* each image point is vector of element of C, vector length in data type */ +} cudnnTensorFormat_t; + +/* + * CUDNN ReduceTensor op type + */ +typedef enum { + CUDNN_REDUCE_TENSOR_ADD = 0, + CUDNN_REDUCE_TENSOR_MUL = 1, + CUDNN_REDUCE_TENSOR_MIN = 2, + CUDNN_REDUCE_TENSOR_MAX = 3, + CUDNN_REDUCE_TENSOR_AMAX = 4, + CUDNN_REDUCE_TENSOR_AVG = 5, + CUDNN_REDUCE_TENSOR_NORM1 = 6, + CUDNN_REDUCE_TENSOR_NORM2 = 7, + CUDNN_REDUCE_TENSOR_MUL_NO_ZEROS = 8, +} cudnnReduceTensorOp_t; + +/* + * activation mode + */ +typedef enum { + CUDNN_ACTIVATION_SIGMOID = 0, + CUDNN_ACTIVATION_RELU = 1, + CUDNN_ACTIVATION_TANH = 2, + CUDNN_ACTIVATION_CLIPPED_RELU = 3, + CUDNN_ACTIVATION_ELU = 4, + CUDNN_ACTIVATION_IDENTITY = 5, + CUDNN_ACTIVATION_SWISH = 6 +} cudnnActivationMode_t CUDNN_DEPRECATED; + +typedef enum { + CUDNN_SEV_FATAL = 0, + CUDNN_SEV_ERROR = 1, + CUDNN_SEV_WARNING = 2, + CUDNN_SEV_INFO = 3, +} cudnnSeverity_t; + +/* Message masks to be used with cudnnSetCallback() */ +#define CUDNN_SEV_ERROR_EN (1U << CUDNN_SEV_ERROR) +#define CUDNN_SEV_WARNING_EN (1U << CUDNN_SEV_WARNING) +#define CUDNN_SEV_INFO_EN (1U << CUDNN_SEV_INFO) + +/* struct containing useful informaiton for each API call */ +typedef struct cudnnDebugStruct { + unsigned cudnn_version; + cudnnStatus_t cudnnStatus; + unsigned time_sec; /* epoch time in seconds */ + unsigned time_usec; /* microseconds part of epoch time */ + unsigned time_delta; /* time since start in seconds */ + cudnnHandle_t handle; /* cudnn handle */ + cudaStream_t stream; /* cuda stream ID */ + unsigned long long pid; /* process ID */ + unsigned long long tid; /* thread ID */ + int cudaDeviceId; /* CUDA device ID */ + int reserved[15]; /* reserved for future use */ +} cudnnDebug_t; + +typedef void (*cudnnCallback_t)(cudnnSeverity_t sev, void *udata, const cudnnDebug_t *dbg, const char *msg); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCallback(unsigned mask, void *udata, cudnnCallback_t fptr); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCallback(unsigned *mask, void **udata, cudnnCallback_t *fptr); + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_SUBLIBRARY_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnGraphVersionCheck(void); + +/* Maximum supported number of tensor dimensions */ +#define CUDNN_DIM_MAX 8 + +/* + * convolution mode + */ +typedef enum { CUDNN_CONVOLUTION = 0, CUDNN_CROSS_CORRELATION = 1 } cudnnConvolutionMode_t; + +/* + * CUDNN Reorder + */ +typedef enum { + CUDNN_DEFAULT_REORDER = 0, + CUDNN_NO_REORDER = 1, +} cudnnReorderType_t CUDNN_DEPRECATED; + +typedef void *cudnnBackendDescriptor_t; + +typedef struct cudnnFractionStruct { + int64_t numerator; + int64_t denominator; +} cudnnFraction_t; + +typedef enum { + CUDNN_POINTWISE_ADD = 0, + CUDNN_POINTWISE_ADD_SQUARE = 5, + CUDNN_POINTWISE_DIV = 6, + CUDNN_POINTWISE_MAX = 3, + CUDNN_POINTWISE_MIN = 2, + CUDNN_POINTWISE_MOD = 7, + CUDNN_POINTWISE_MUL = 1, + CUDNN_POINTWISE_POW = 8, + CUDNN_POINTWISE_SUB = 9, + + CUDNN_POINTWISE_ABS = 10, + CUDNN_POINTWISE_CEIL = 11, + CUDNN_POINTWISE_COS = 12, + CUDNN_POINTWISE_EXP = 13, + CUDNN_POINTWISE_FLOOR = 14, + CUDNN_POINTWISE_LOG = 15, + CUDNN_POINTWISE_NEG = 16, + CUDNN_POINTWISE_RSQRT = 17, + CUDNN_POINTWISE_SIN = 18, + CUDNN_POINTWISE_SQRT = 4, + CUDNN_POINTWISE_TAN = 19, + CUDNN_POINTWISE_ERF = 20, + CUDNN_POINTWISE_IDENTITY = 21, + CUDNN_POINTWISE_RECIPROCAL = 22, + CUDNN_POINTWISE_ATAN2 = 23, + + CUDNN_POINTWISE_RELU_FWD = 100, + CUDNN_POINTWISE_TANH_FWD = 101, + CUDNN_POINTWISE_SIGMOID_FWD = 102, + CUDNN_POINTWISE_ELU_FWD = 103, + CUDNN_POINTWISE_GELU_FWD = 104, + CUDNN_POINTWISE_SOFTPLUS_FWD = 105, + CUDNN_POINTWISE_SWISH_FWD = 106, + CUDNN_POINTWISE_GELU_APPROX_TANH_FWD = 107, + + CUDNN_POINTWISE_RELU_BWD = 200, + CUDNN_POINTWISE_TANH_BWD = 201, + CUDNN_POINTWISE_SIGMOID_BWD = 202, + CUDNN_POINTWISE_ELU_BWD = 203, + CUDNN_POINTWISE_GELU_BWD = 204, + CUDNN_POINTWISE_SOFTPLUS_BWD = 205, + CUDNN_POINTWISE_SWISH_BWD = 206, + CUDNN_POINTWISE_GELU_APPROX_TANH_BWD = 207, + + CUDNN_POINTWISE_CMP_EQ = 300, + CUDNN_POINTWISE_CMP_NEQ = 301, + CUDNN_POINTWISE_CMP_GT = 302, + CUDNN_POINTWISE_CMP_GE = 303, + CUDNN_POINTWISE_CMP_LT = 304, + CUDNN_POINTWISE_CMP_LE = 305, + + CUDNN_POINTWISE_LOGICAL_AND = 400, + CUDNN_POINTWISE_LOGICAL_OR = 401, + CUDNN_POINTWISE_LOGICAL_NOT = 402, + + CUDNN_POINTWISE_GEN_INDEX = 501, + + CUDNN_POINTWISE_BINARY_SELECT = 601, +} cudnnPointwiseMode_t; + +typedef enum { + CUDNN_RESAMPLE_NEAREST = 0, + CUDNN_RESAMPLE_BILINEAR = 1, + CUDNN_RESAMPLE_AVGPOOL = 2, + CUDNN_RESAMPLE_AVGPOOL_INCLUDE_PADDING = 2, + CUDNN_RESAMPLE_AVGPOOL_EXCLUDE_PADDING = 4, + CUDNN_RESAMPLE_MAXPOOL = 3, +} cudnnResampleMode_t; + +typedef enum { + CUDNN_SIGNAL_SET = 0, + CUDNN_SIGNAL_WAIT = 1, +} cudnnSignalMode_t; + +typedef enum { + CUDNN_GENSTATS_SUM_SQSUM = 0, +} cudnnGenStatsMode_t; + +typedef enum { + CUDNN_BN_FINALIZE_STATISTICS_TRAINING = 0, + CUDNN_BN_FINALIZE_STATISTICS_INFERENCE = 1, +} cudnnBnFinalizeStatsMode_t; + +typedef enum { + CUDNN_RNG_DISTRIBUTION_BERNOULLI, + CUDNN_RNG_DISTRIBUTION_UNIFORM, + CUDNN_RNG_DISTRIBUTION_NORMAL, +} cudnnRngDistribution_t; + +typedef enum { + CUDNN_ATTR_POINTWISE_MODE = 0, + CUDNN_ATTR_POINTWISE_MATH_PREC = 1, + CUDNN_ATTR_POINTWISE_NAN_PROPAGATION CUDNN_DEPRECATED_ENUM = 2, + CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP = 3, + CUDNN_ATTR_POINTWISE_RELU_UPPER_CLIP = 4, + CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP_SLOPE = 5, + CUDNN_ATTR_POINTWISE_ELU_ALPHA = 6, + CUDNN_ATTR_POINTWISE_SOFTPLUS_BETA = 7, + CUDNN_ATTR_POINTWISE_SWISH_BETA = 8, + CUDNN_ATTR_POINTWISE_AXIS = 9, + + CUDNN_ATTR_CONVOLUTION_COMP_TYPE = 100, + CUDNN_ATTR_CONVOLUTION_CONV_MODE = 101, + CUDNN_ATTR_CONVOLUTION_DILATIONS = 102, + CUDNN_ATTR_CONVOLUTION_FILTER_STRIDES = 103, + CUDNN_ATTR_CONVOLUTION_POST_PADDINGS = 104, + CUDNN_ATTR_CONVOLUTION_PRE_PADDINGS = 105, + CUDNN_ATTR_CONVOLUTION_SPATIAL_DIMS = 106, + + CUDNN_ATTR_ENGINEHEUR_MODE = 200, + CUDNN_ATTR_ENGINEHEUR_OPERATION_GRAPH = 201, + CUDNN_ATTR_ENGINEHEUR_RESULTS = 202, + CUDNN_ATTR_ENGINEHEUR_SM_COUNT_TARGET = 203, + + CUDNN_ATTR_ENGINECFG_ENGINE = 300, + CUDNN_ATTR_ENGINECFG_INTERMEDIATE_INFO = 301, + CUDNN_ATTR_ENGINECFG_KNOB_CHOICES = 302, + CUDNN_ATTR_ENGINECFG_WORKSPACE_SIZE = 303, + CUDNN_ATTR_ENGINECFG_SHARED_MEMORY_USED = 304, + + CUDNN_ATTR_EXECUTION_PLAN_HANDLE = 400, + CUDNN_ATTR_EXECUTION_PLAN_ENGINE_CONFIG = 401, + CUDNN_ATTR_EXECUTION_PLAN_WORKSPACE_SIZE = 402, + CUDNN_ATTR_EXECUTION_PLAN_COMPUTED_INTERMEDIATE_UIDS = 403, + CUDNN_ATTR_EXECUTION_PLAN_RUN_ONLY_INTERMEDIATE_UIDS = 404, + CUDNN_ATTR_EXECUTION_PLAN_JSON_REPRESENTATION = 405, + CUDNN_ATTR_EXECUTION_PLAN_KERNEL_CACHE = 406, + + CUDNN_ATTR_INTERMEDIATE_INFO_UNIQUE_ID = 500, + CUDNN_ATTR_INTERMEDIATE_INFO_SIZE = 501, + CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_DATA_UIDS = 502, + CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_ATTRIBUTES = 503, + + CUDNN_ATTR_KNOB_CHOICE_KNOB_TYPE = 600, + CUDNN_ATTR_KNOB_CHOICE_KNOB_VALUE = 601, + + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_ALPHA = 700, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_BETA = 701, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_CONV_DESC = 702, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_W = 703, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_X = 704, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_Y = 705, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_ALPHA = 706, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_BETA = 707, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_CONV_DESC = 708, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_W = 709, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DX = 710, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DY = 711, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_ALPHA = 712, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_BETA = 713, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_CONV_DESC = 714, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DW = 715, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_X = 716, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DY = 717, + + CUDNN_ATTR_OPERATION_POINTWISE_PW_DESCRIPTOR = 750, + CUDNN_ATTR_OPERATION_POINTWISE_XDESC = 751, + CUDNN_ATTR_OPERATION_POINTWISE_BDESC = 752, + CUDNN_ATTR_OPERATION_POINTWISE_YDESC = 753, + CUDNN_ATTR_OPERATION_POINTWISE_ALPHA1 = 754, + CUDNN_ATTR_OPERATION_POINTWISE_ALPHA2 = 755, + CUDNN_ATTR_OPERATION_POINTWISE_DXDESC = 756, + CUDNN_ATTR_OPERATION_POINTWISE_DYDESC = 757, + CUDNN_ATTR_OPERATION_POINTWISE_TDESC = 758, + + CUDNN_ATTR_OPERATION_GENSTATS_MODE = 770, + CUDNN_ATTR_OPERATION_GENSTATS_MATH_PREC = 771, + CUDNN_ATTR_OPERATION_GENSTATS_XDESC = 772, + CUDNN_ATTR_OPERATION_GENSTATS_SUMDESC = 773, + CUDNN_ATTR_OPERATION_GENSTATS_SQSUMDESC = 774, + + CUDNN_ATTR_OPERATION_BN_FINALIZE_STATS_MODE = 780, + CUDNN_ATTR_OPERATION_BN_FINALIZE_MATH_PREC = 781, + CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SUM_DESC = 782, + CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SQ_SUM_DESC = 783, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SCALE_DESC = 784, + CUDNN_ATTR_OPERATION_BN_FINALIZE_BIAS_DESC = 785, + CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_MEAN_DESC = 786, + CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_VAR_DESC = 787, + CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_MEAN_DESC = 788, + CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_VAR_DESC = 789, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_MEAN_DESC = 790, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_INV_STD_DESC = 791, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_SCALE_DESC = 792, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_BIAS_DESC = 793, + CUDNN_ATTR_OPERATION_BN_FINALIZE_ACCUM_COUNT_DESC = 794, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EPSILON_DESC = 795, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EXP_AVERATE_FACTOR_DESC = 796, + + CUDNN_ATTR_OPERATIONGRAPH_HANDLE = 800, + CUDNN_ATTR_OPERATIONGRAPH_OPS = 801, + CUDNN_ATTR_OPERATIONGRAPH_ENGINE_GLOBAL_COUNT = 802, + CUDNN_ATTR_OPERATIONGRAPH_IS_DYNAMIC_SHAPE_ENABLED = 803, + + CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT = 900, + CUDNN_ATTR_TENSOR_DATA_TYPE = 901, + CUDNN_ATTR_TENSOR_DIMENSIONS = 902, + CUDNN_ATTR_TENSOR_STRIDES = 903, + CUDNN_ATTR_TENSOR_VECTOR_COUNT = 904, + CUDNN_ATTR_TENSOR_VECTORIZED_DIMENSION = 905, + CUDNN_ATTR_TENSOR_UNIQUE_ID = 906, + CUDNN_ATTR_TENSOR_IS_VIRTUAL = 907, + CUDNN_ATTR_TENSOR_IS_BY_VALUE = 908, + CUDNN_ATTR_TENSOR_REORDERING_MODE = 909, + CUDNN_ATTR_TENSOR_RAGGED_OFFSET_DESC = 913, + + CUDNN_ATTR_VARIANT_PACK_UNIQUE_IDS = 1000, + CUDNN_ATTR_VARIANT_PACK_DATA_POINTERS = 1001, + CUDNN_ATTR_VARIANT_PACK_INTERMEDIATES = 1002, + CUDNN_ATTR_VARIANT_PACK_WORKSPACE = 1003, + + CUDNN_ATTR_LAYOUT_INFO_TENSOR_UID = 1100, + CUDNN_ATTR_LAYOUT_INFO_TYPES = 1101, + + CUDNN_ATTR_KNOB_INFO_TYPE = 1200, + CUDNN_ATTR_KNOB_INFO_MAXIMUM_VALUE = 1201, + CUDNN_ATTR_KNOB_INFO_MINIMUM_VALUE = 1202, + CUDNN_ATTR_KNOB_INFO_STRIDE = 1203, + + CUDNN_ATTR_ENGINE_OPERATION_GRAPH = 1300, + CUDNN_ATTR_ENGINE_GLOBAL_INDEX = 1301, + CUDNN_ATTR_ENGINE_KNOB_INFO = 1302, + CUDNN_ATTR_ENGINE_NUMERICAL_NOTE = 1303, + CUDNN_ATTR_ENGINE_LAYOUT_INFO = 1304, + CUDNN_ATTR_ENGINE_BEHAVIOR_NOTE = 1305, + CUDNN_ATTR_ENGINE_SM_COUNT_TARGET = 1306, + + CUDNN_ATTR_MATMUL_COMP_TYPE = 1500, + CUDNN_ATTR_MATMUL_PADDING_VALUE = 1503, + + CUDNN_ATTR_OPERATION_MATMUL_ADESC = 1520, + CUDNN_ATTR_OPERATION_MATMUL_BDESC = 1521, + CUDNN_ATTR_OPERATION_MATMUL_CDESC = 1522, + CUDNN_ATTR_OPERATION_MATMUL_DESC = 1523, + CUDNN_ATTR_OPERATION_MATMUL_IRREGULARLY_STRIDED_BATCH_COUNT CUDNN_DEPRECATED_ENUM = 1524, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_M_OVERRIDE_DESC = 1525, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_N_OVERRIDE_DESC = 1526, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_K_OVERRIDE_DESC = 1527, + + CUDNN_ATTR_REDUCTION_OPERATOR = 1600, + CUDNN_ATTR_REDUCTION_COMP_TYPE = 1601, + + CUDNN_ATTR_OPERATION_REDUCTION_XDESC = 1610, + CUDNN_ATTR_OPERATION_REDUCTION_YDESC = 1611, + CUDNN_ATTR_OPERATION_REDUCTION_DESC = 1612, + + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MATH_PREC = 1620, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MEAN_DESC = 1621, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_INVSTD_DESC = 1622, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_BN_SCALE_DESC = 1623, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_X_DESC = 1624, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DY_DESC = 1625, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_SCALE_DESC = 1626, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_BIAS_DESC = 1627, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_DY_SCALE_DESC = 1628, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_X_SCALE_DESC = 1629, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_BIAS = 1630, + + CUDNN_ATTR_RESAMPLE_MODE = 1700, + CUDNN_ATTR_RESAMPLE_COMP_TYPE = 1701, + CUDNN_ATTR_RESAMPLE_SPATIAL_DIMS = 1702, + CUDNN_ATTR_RESAMPLE_POST_PADDINGS = 1703, + CUDNN_ATTR_RESAMPLE_PRE_PADDINGS = 1704, + CUDNN_ATTR_RESAMPLE_STRIDES = 1705, + CUDNN_ATTR_RESAMPLE_WINDOW_DIMS = 1706, + CUDNN_ATTR_RESAMPLE_NAN_PROPAGATION = 1707, + CUDNN_ATTR_RESAMPLE_PADDING_MODE = 1708, + + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_XDESC = 1710, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_YDESC = 1711, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_IDXDESC = 1712, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_ALPHA CUDNN_DEPRECATED_ENUM = 1713, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_BETA CUDNN_DEPRECATED_ENUM = 1714, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_DESC = 1716, + + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DXDESC = 1720, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DYDESC = 1721, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_IDXDESC = 1722, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_ALPHA CUDNN_DEPRECATED_ENUM = 1723, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_BETA CUDNN_DEPRECATED_ENUM = 1724, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DESC = 1725, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_XDESC = 1726, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_YDESC = 1727, + + CUDNN_ATTR_OPERATION_CONCAT_AXIS = 1800, + CUDNN_ATTR_OPERATION_CONCAT_INPUT_DESCS = 1801, + CUDNN_ATTR_OPERATION_CONCAT_INPLACE_INDEX = 1802, + CUDNN_ATTR_OPERATION_CONCAT_OUTPUT_DESC = 1803, + + CUDNN_ATTR_OPERATION_SIGNAL_MODE = 1900, + CUDNN_ATTR_OPERATION_SIGNAL_FLAGDESC = 1901, + CUDNN_ATTR_OPERATION_SIGNAL_VALUE = 1902, + CUDNN_ATTR_OPERATION_SIGNAL_XDESC = 1903, + CUDNN_ATTR_OPERATION_SIGNAL_YDESC = 1904, + + CUDNN_ATTR_OPERATION_PAGED_CACHE_LOAD_CONTAINER_DESC = 1950, + CUDNN_ATTR_OPERATION_PAGED_CACHE_LOAD_YDESC = 1951, + CUDNN_ATTR_OPERATION_PAGED_CACHE_LOAD_SEQUENCE_DESC = 1952, + CUDNN_ATTR_OPERATION_PAGED_CACHE_LOAD_PAGE_TABLE_DESC = 1953, + + CUDNN_ATTR_OPERATION_NORM_FWD_MODE = 2000, + CUDNN_ATTR_OPERATION_NORM_FWD_PHASE = 2001, + CUDNN_ATTR_OPERATION_NORM_FWD_XDESC = 2002, + CUDNN_ATTR_OPERATION_NORM_FWD_MEAN_DESC = 2003, + CUDNN_ATTR_OPERATION_NORM_FWD_INV_VARIANCE_DESC = 2004, + CUDNN_ATTR_OPERATION_NORM_FWD_SCALE_DESC = 2005, + CUDNN_ATTR_OPERATION_NORM_FWD_BIAS_DESC = 2006, + CUDNN_ATTR_OPERATION_NORM_FWD_EPSILON_DESC = 2007, + CUDNN_ATTR_OPERATION_NORM_FWD_EXP_AVG_FACTOR_DESC = 2008, + CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_MEAN_DESC = 2009, + CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_VAR_DESC = 2010, + CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_MEAN_DESC = 2011, + CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_VAR_DESC = 2012, + CUDNN_ATTR_OPERATION_NORM_FWD_YDESC = 2013, + CUDNN_ATTR_OPERATION_NORM_FWD_PEER_STAT_DESCS = 2014, + + CUDNN_ATTR_OPERATION_NORM_BWD_MODE = 2100, + CUDNN_ATTR_OPERATION_NORM_BWD_XDESC = 2101, + CUDNN_ATTR_OPERATION_NORM_BWD_MEAN_DESC = 2102, + CUDNN_ATTR_OPERATION_NORM_BWD_INV_VARIANCE_DESC = 2103, + CUDNN_ATTR_OPERATION_NORM_BWD_DYDESC = 2104, + CUDNN_ATTR_OPERATION_NORM_BWD_SCALE_DESC = 2105, + CUDNN_ATTR_OPERATION_NORM_BWD_EPSILON_DESC = 2106, + CUDNN_ATTR_OPERATION_NORM_BWD_DSCALE_DESC = 2107, + CUDNN_ATTR_OPERATION_NORM_BWD_DBIAS_DESC = 2108, + CUDNN_ATTR_OPERATION_NORM_BWD_DXDESC = 2109, + CUDNN_ATTR_OPERATION_NORM_BWD_PEER_STAT_DESCS = 2110, + + CUDNN_ATTR_OPERATION_RESHAPE_XDESC = 2200, + CUDNN_ATTR_OPERATION_RESHAPE_YDESC = 2201, + + CUDNN_ATTR_RNG_DISTRIBUTION = 2300, + CUDNN_ATTR_RNG_NORMAL_DIST_MEAN = 2301, + CUDNN_ATTR_RNG_NORMAL_DIST_STANDARD_DEVIATION = 2302, + CUDNN_ATTR_RNG_UNIFORM_DIST_MAXIMUM = 2303, + CUDNN_ATTR_RNG_UNIFORM_DIST_MINIMUM = 2304, + CUDNN_ATTR_RNG_BERNOULLI_DIST_PROBABILITY = 2305, + + CUDNN_ATTR_OPERATION_RNG_YDESC = 2310, + CUDNN_ATTR_OPERATION_RNG_SEED = 2311, + CUDNN_ATTR_OPERATION_RNG_DESC = 2312, + CUDNN_ATTR_OPERATION_RNG_OFFSET_DESC = 2313, + + CUDNN_ATTR_KERNEL_CACHE_OPERATION_GRAPH = 2400, + CUDNN_ATTR_KERNEL_CACHE_IS_ENGINECFG_KERNEL_CACHED = 2401 + +} cudnnBackendAttributeName_t; + +typedef enum { + CUDNN_TYPE_HANDLE = 0, + CUDNN_TYPE_DATA_TYPE, + CUDNN_TYPE_BOOLEAN, + CUDNN_TYPE_INT64, + CUDNN_TYPE_FLOAT, + CUDNN_TYPE_DOUBLE, + CUDNN_TYPE_VOID_PTR, + CUDNN_TYPE_CONVOLUTION_MODE, + CUDNN_TYPE_HEUR_MODE, + CUDNN_TYPE_KNOB_TYPE, + CUDNN_TYPE_NAN_PROPOGATION CUDNN_DEPRECATED_ENUM, + CUDNN_TYPE_NUMERICAL_NOTE, + CUDNN_TYPE_LAYOUT_TYPE, + CUDNN_TYPE_ATTRIB_NAME, + CUDNN_TYPE_POINTWISE_MODE, + CUDNN_TYPE_BACKEND_DESCRIPTOR, + CUDNN_TYPE_GENSTATS_MODE, + CUDNN_TYPE_BN_FINALIZE_STATS_MODE, + CUDNN_TYPE_REDUCTION_OPERATOR_TYPE, + CUDNN_TYPE_BEHAVIOR_NOTE, + CUDNN_TYPE_TENSOR_REORDERING_MODE, + CUDNN_TYPE_RESAMPLE_MODE, + CUDNN_TYPE_PADDING_MODE, + CUDNN_TYPE_INT32, + CUDNN_TYPE_CHAR, + CUDNN_TYPE_SIGNAL_MODE, + CUDNN_TYPE_FRACTION, + CUDNN_TYPE_NORM_MODE, + CUDNN_TYPE_NORM_FWD_PHASE, + CUDNN_TYPE_RNG_DISTRIBUTION +} cudnnBackendAttributeType_t; + +typedef enum { + CUDNN_BACKEND_POINTWISE_DESCRIPTOR = 0, + CUDNN_BACKEND_CONVOLUTION_DESCRIPTOR, + CUDNN_BACKEND_ENGINE_DESCRIPTOR, + CUDNN_BACKEND_ENGINECFG_DESCRIPTOR, + CUDNN_BACKEND_ENGINEHEUR_DESCRIPTOR, + CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR, + CUDNN_BACKEND_INTERMEDIATE_INFO_DESCRIPTOR, + CUDNN_BACKEND_KNOB_CHOICE_DESCRIPTOR, + CUDNN_BACKEND_KNOB_INFO_DESCRIPTOR, + CUDNN_BACKEND_LAYOUT_INFO_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_FILTER_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_DATA_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_POINTWISE_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_GEN_STATS_DESCRIPTOR, + CUDNN_BACKEND_OPERATIONGRAPH_DESCRIPTOR, + CUDNN_BACKEND_VARIANT_PACK_DESCRIPTOR, + CUDNN_BACKEND_TENSOR_DESCRIPTOR, + CUDNN_BACKEND_MATMUL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_MATMUL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_BN_FINALIZE_STATISTICS_DESCRIPTOR, + CUDNN_BACKEND_REDUCTION_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_REDUCTION_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_BN_BWD_WEIGHTS_DESCRIPTOR, + CUDNN_BACKEND_RESAMPLE_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESAMPLE_FWD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESAMPLE_BWD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONCAT_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_SIGNAL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_NORM_FORWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_NORM_BACKWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESHAPE_DESCRIPTOR, + CUDNN_BACKEND_RNG_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RNG_DESCRIPTOR, + CUDNN_BACKEND_KERNEL_CACHE_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_PAGED_CACHE_LOAD_DESCRIPTOR, +} cudnnBackendDescriptorType_t; + +typedef enum { + CUDNN_NUMERICAL_NOTE_TENSOR_CORE = 0, + CUDNN_NUMERICAL_NOTE_DOWN_CONVERT_INPUTS, + CUDNN_NUMERICAL_NOTE_REDUCED_PRECISION_REDUCTION, + CUDNN_NUMERICAL_NOTE_FFT, + CUDNN_NUMERICAL_NOTE_NONDETERMINISTIC, + CUDNN_NUMERICAL_NOTE_WINOGRAD, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_4x4, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_6x6, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_13x13, + CUDNN_NUMERICAL_NOTE_STRICT_NAN_PROP, + CUDNN_NUMERICAL_NOTE_TYPE_COUNT, +} cudnnBackendNumericalNote_t; + +typedef enum { + CUDNN_BEHAVIOR_NOTE_RUNTIME_COMPILATION = 0, + CUDNN_BEHAVIOR_NOTE_REQUIRES_FILTER_INT8x32_REORDER = 1, + CUDNN_BEHAVIOR_NOTE_REQUIRES_BIAS_INT8x32_REORDER = 2, + CUDNN_BEHAVIOR_NOTE_SUPPORTS_CUDA_GRAPH_NATIVE_API = 3, + CUDNN_BEHAVIOR_NOTE_TYPE_COUNT, +} cudnnBackendBehaviorNote_t; + +typedef enum { + CUDNN_KNOB_TYPE_SPLIT_K CUDNN_DEPRECATED_ENUM = 0, + CUDNN_KNOB_TYPE_SWIZZLE = 1, + CUDNN_KNOB_TYPE_TILE_SIZE = 2, + CUDNN_KNOB_TYPE_USE_TEX CUDNN_DEPRECATED_ENUM = 3, + CUDNN_KNOB_TYPE_EDGE = 4, + CUDNN_KNOB_TYPE_KBLOCK CUDNN_DEPRECATED_ENUM = 5, + CUDNN_KNOB_TYPE_LDGA CUDNN_DEPRECATED_ENUM = 6, + CUDNN_KNOB_TYPE_LDGB CUDNN_DEPRECATED_ENUM = 7, + CUDNN_KNOB_TYPE_CHUNK_K CUDNN_DEPRECATED_ENUM = 8, + CUDNN_KNOB_TYPE_SPLIT_H CUDNN_DEPRECATED_ENUM = 9, + CUDNN_KNOB_TYPE_WINO_TILE CUDNN_DEPRECATED_ENUM = 10, + CUDNN_KNOB_TYPE_MULTIPLY = 11, + CUDNN_KNOB_TYPE_SPLIT_K_BUF = 12, + CUDNN_KNOB_TYPE_TILEK = 13, + CUDNN_KNOB_TYPE_STAGES = 14, + CUDNN_KNOB_TYPE_REDUCTION_MODE = 15, + CUDNN_KNOB_TYPE_CTA_SPLIT_K_MODE CUDNN_DEPRECATED_ENUM = 16, + CUDNN_KNOB_TYPE_SPLIT_K_SLC = 17, + CUDNN_KNOB_TYPE_IDX_MODE CUDNN_DEPRECATED_ENUM = 18, + CUDNN_KNOB_TYPE_SLICED CUDNN_DEPRECATED_ENUM = 19, + CUDNN_KNOB_TYPE_SPLIT_RS CUDNN_DEPRECATED_ENUM = 20, + CUDNN_KNOB_TYPE_SINGLEBUFFER CUDNN_DEPRECATED_ENUM = 21, + CUDNN_KNOB_TYPE_LDGC CUDNN_DEPRECATED_ENUM = 22, + CUDNN_KNOB_TYPE_SPECFILT = 23, + CUDNN_KNOB_TYPE_KERNEL_CFG = 24, + CUDNN_KNOB_TYPE_WORKSPACE = 25, + CUDNN_KNOB_TYPE_TILE_CGA CUDNN_DEPRECATED_ENUM = 26, + CUDNN_KNOB_TYPE_TILE_CGA_M = 27, + CUDNN_KNOB_TYPE_TILE_CGA_N = 28, + CUDNN_KNOB_TYPE_BLOCK_SIZE = 29, + CUDNN_KNOB_TYPE_OCCUPANCY = 30, + CUDNN_KNOB_TYPE_ARRAY_SIZE_PER_THREAD = 31, + CUDNN_KNOB_TYPE_NUM_C_PER_BLOCK CUDNN_DEPRECATED_ENUM = 32, + CUDNN_KNOB_TYPE_SPLIT_COLS = 33, + CUDNN_KNOB_TYPE_TILE_ROWS = 34, + CUDNN_KNOB_TYPE_TILE_COLS = 35, + CUDNN_KNOB_TYPE_LOAD_SIZE = 36, + CUDNN_KNOB_TYPE_COUNTS, +} cudnnBackendKnobType_t; + +typedef enum { + CUDNN_LAYOUT_TYPE_PREFERRED_NCHW = 0, + CUDNN_LAYOUT_TYPE_PREFERRED_NHWC = 1, + CUDNN_LAYOUT_TYPE_PREFERRED_PAD4CK = 2, + CUDNN_LAYOUT_TYPE_PREFERRED_PAD8CK = 3, + CUDNN_LAYOUT_TYPE_COUNT = 4, +} cudnnBackendLayoutType_t; + +typedef enum { + CUDNN_HEUR_MODE_INSTANT = 0, + CUDNN_HEUR_MODE_B = 1, + CUDNN_HEUR_MODE_FALLBACK = 2, + CUDNN_HEUR_MODE_A = 3, + CUDNN_HEUR_MODES_COUNT = 4, +} cudnnBackendHeurMode_t; + +typedef enum { + CUDNN_TENSOR_REORDERING_NONE = 0, + CUDNN_TENSOR_REORDERING_INT8x32 = 1, + CUDNN_TENSOR_REORDERING_F16x16 = 2, +} cudnnBackendTensorReordering_t; + +typedef enum { + CUDNN_ZERO_PAD = 0, + CUDNN_NEG_INF_PAD = 1, + CUDNN_EDGE_VAL_PAD = 2, +} cudnnPaddingMode_t; + +typedef enum { + CUDNN_LAYER_NORM = 0, + CUDNN_INSTANCE_NORM = 1, + CUDNN_BATCH_NORM = 2, + CUDNN_GROUP_NORM = 3, + CUDNN_RMS_NORM = 4, +} cudnnBackendNormMode_t; + +typedef enum { + CUDNN_NORM_FWD_INFERENCE = 0, + CUDNN_NORM_FWD_TRAINING = 1, +} cudnnBackendNormFwdPhase_t; + +cudnnStatus_t CUDNNWINAPI +cudnnBackendCreateDescriptor(cudnnBackendDescriptorType_t descriptorType, cudnnBackendDescriptor_t *descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendDestroyDescriptor(cudnnBackendDescriptor_t descriptor); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnBackendInitialize(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendFinalize(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendSetAttribute(cudnnBackendDescriptor_t descriptor, + cudnnBackendAttributeName_t attributeName, + cudnnBackendAttributeType_t attributeType, + int64_t elementCount, + const void *arrayOfElements); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendGetAttribute(cudnnBackendDescriptor_t const descriptor, + cudnnBackendAttributeName_t attributeName, + cudnnBackendAttributeType_t attributeType, + int64_t requestedElementCount, + int64_t *elementCount, + void *arrayOfElements); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendExecute(cudnnHandle_t handle, cudnnBackendDescriptor_t executionPlan, cudnnBackendDescriptor_t variantPack); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendPopulateCudaGraph(cudnnHandle_t handle, + cudnnBackendDescriptor_t executionPlan, + cudnnBackendDescriptor_t variantPack, + cudaGraph_t graph); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendUpdateCudaGraph(cudnnHandle_t handle, + cudnnBackendDescriptor_t executionPlan, + cudnnBackendDescriptor_t variantPack, + cudaGraph_t graph); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_GRAPH_H_ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_ops.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..72d2942e14c8790308da1a598664dcfc4b468431 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_ops.h @@ -0,0 +1,1316 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_ops : cuDNN's basic definitions and basic operations. + */ + +#if !defined(CUDNN_OPS_H_) +#define CUDNN_OPS_H_ + +#include + +#include "cudnn_version.h" +#include "cudnn_graph.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_OPS_MAJOR 9 +#define CUDNN_OPS_MINOR 5 +#define CUDNN_OPS_PATCH 1 + +#if (CUDNN_OPS_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_MINOR != CUDNN_MINOR) || (CUDNN_OPS_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN OPS INFER!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* Data structures to represent Image/Filter and the Neural Network Layer */ +typedef struct cudnnTensorStruct *cudnnTensorDescriptor_t; +typedef struct cudnnPoolingStruct *cudnnPoolingDescriptor_t CUDNN_DEPRECATED; +typedef struct cudnnFilterStruct *cudnnFilterDescriptor_t CUDNN_DEPRECATED; +typedef struct cudnnLRNStruct *cudnnLRNDescriptor_t; +typedef struct cudnnActivationStruct *cudnnActivationDescriptor_t CUDNN_DEPRECATED; +typedef struct cudnnSpatialTransformerStruct *cudnnSpatialTransformerDescriptor_t; +typedef struct cudnnOpTensorStruct *cudnnOpTensorDescriptor_t CUDNN_DEPRECATED; +typedef struct cudnnReduceTensorStruct *cudnnReduceTensorDescriptor_t CUDNN_DEPRECATED; +typedef struct cudnnCTCLossStruct *cudnnCTCLossDescriptor_t; +typedef struct cudnnTensorTransformStruct *cudnnTensorTransformDescriptor_t CUDNN_DEPRECATED; +/* + * CUDNN Determinism + */ +typedef enum { + CUDNN_NON_DETERMINISTIC = 0, + CUDNN_DETERMINISTIC = 1, +} cudnnDeterminism_t; + +/* Create an instance of a generic Tensor descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateTensorDescriptor(cudnnTensorDescriptor_t *tensorDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor4dDescriptor(cudnnTensorDescriptor_t tensorDesc, + cudnnTensorFormat_t format, + cudnnDataType_t dataType, /* image data type */ + int n, /* number of inputs (batch size) */ + int c, /* number of input feature maps */ + int h, /* height of input section */ + int w); /* width of input section */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor4dDescriptorEx(cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t dataType, /* image data type */ + int n, /* number of inputs (batch size) */ + int c, /* number of input feature maps */ + int h, /* height of input section */ + int w, /* width of input section */ + int nStride, + int cStride, + int hStride, + int wStride); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensor4dDescriptor(const cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t *dataType, /* image data type */ + int *n, /* number of inputs (batch size) */ + int *c, /* number of input feature maps */ + int *h, /* height of input section */ + int *w, /* width of input section */ + int *nStride, + int *cStride, + int *hStride, + int *wStride); + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorNdDescriptor(cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t dataType, + int nbDims, + const int dimA[], + const int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorNdDescriptorEx(cudnnTensorDescriptor_t tensorDesc, + cudnnTensorFormat_t format, + cudnnDataType_t dataType, + int nbDims, + const int dimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorNdDescriptor(const cudnnTensorDescriptor_t tensorDesc, + int nbDimsRequested, + cudnnDataType_t *dataType, + int *nbDims, + int dimA[], + int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorSizeInBytes(const cudnnTensorDescriptor_t tensorDesc, size_t *size); + +/* PixelOffset( n, c, h, w ) = n *input_stride + c * feature_stride + h * h_stride + w * w_stride + + 1)Example of all images in row major order one batch of features after the other (with an optional padding on row) + input_stride : c x h x h_stride + feature_stride : h x h_stride + h_stride : >= w ( h_stride = w if no padding) + w_stride : 1 + + + 2)Example of all images in row major with features maps interleaved + input_stride : c x h x h_stride + feature_stride : 1 + h_stride : w x c + w_stride : c + + 3)Example of all images in column major order one batch of features after the other (with optional padding on column) + input_stride : c x w x w_stride + feature_stride : w x w_stride + h_stride : 1 + w_stride : >= h + +*/ + +/* Destroy an instance of Tensor4d descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyTensorDescriptor(cudnnTensorDescriptor_t tensorDesc); + +/* Fold/unfold transforms */ +typedef enum { + CUDNN_TRANSFORM_FOLD = 0U, + CUDNN_TRANSFORM_UNFOLD = 1U, +} cudnnFoldingDirection_t; + +/** Create a destination descriptor for cudnnTransformTensor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnInitTransformDest(const cudnnTensorTransformDescriptor_t transformDesc, + const cudnnTensorDescriptor_t srcDesc, + cudnnTensorDescriptor_t destDesc, + size_t *destSizeInBytes); + +/** Create an empty tensor transform descriptor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateTensorTransformDescriptor(cudnnTensorTransformDescriptor_t *transformDesc); + +/** Initialize a previously created tensor transform descriptor. */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc, + const uint32_t nbDims, + const cudnnTensorFormat_t destFormat, + const int32_t padBeforeA[], + const int32_t padAfterA[], + const uint32_t foldA[], + const cudnnFoldingDirection_t direction); + +/** + * Retrieves the values stored in a previously initialized tensor transform + * descriptor. + */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc, + uint32_t nbDimsRequested, + cudnnTensorFormat_t *destFormat, + int32_t padBeforeA[], + int32_t padAfterA[], + uint32_t foldA[], + cudnnFoldingDirection_t *direction); + +/** + * Destroys a previously created tensor transform descriptor. + */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc); + +/* Tensor layout conversion helper (y = alpha * x + beta * y) */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnTransformTensor(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnTransformTensorEx(cudnnHandle_t handle, + const cudnnTensorTransformDescriptor_t transDesc, + const void *alpha, + const cudnnTensorDescriptor_t srcDesc, + const void *srcData, + const void *beta, + const cudnnTensorDescriptor_t destDesc, + void *destData); + +/* Tensor Bias addition : C = alpha * A + beta * C */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnAddTensor(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* + * CUDNN OpTensor op type + */ +typedef enum { + CUDNN_OP_TENSOR_ADD = 0, + CUDNN_OP_TENSOR_MUL = 1, + CUDNN_OP_TENSOR_MIN = 2, + CUDNN_OP_TENSOR_MAX = 3, + CUDNN_OP_TENSOR_SQRT = 4, + CUDNN_OP_TENSOR_NOT = 5, +} cudnnOpTensorOp_t; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateOpTensorDescriptor(cudnnOpTensorDescriptor_t *opTensorDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc, + cudnnOpTensorOp_t opTensorOp, + cudnnDataType_t opTensorCompType, + cudnnNanPropagation_t opTensorNanOpt); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetOpTensorDescriptor(const cudnnOpTensorDescriptor_t opTensorDesc, + cudnnOpTensorOp_t *opTensorOp, + cudnnDataType_t *opTensorCompType, + cudnnNanPropagation_t *opTensorNanOpt); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc); + +/* Tensor operation : C = op( alpha1 * A, alpha2 * B ) + beta * C */ +/* B tensor is ignored for CUDNN_OP_TENSOR_SQRT, CUDNN_OP_TENSOR_NOT. */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnOpTensor(cudnnHandle_t handle, + const cudnnOpTensorDescriptor_t opTensorDesc, + const void *alpha1, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *alpha2, + const cudnnTensorDescriptor_t bDesc, + const void *B, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* + * CUDNN ReduceTensor indices type + */ +typedef enum { + CUDNN_REDUCE_TENSOR_NO_INDICES = 0, + CUDNN_REDUCE_TENSOR_FLATTENED_INDICES = 1, +} cudnnReduceTensorIndices_t CUDNN_DEPRECATED; + +/* + * CUDNN tensor indices type size (all unsigned) + * Currently not supported, default is 32 bit unsigned. + */ +typedef enum { + CUDNN_32BIT_INDICES = 0, + CUDNN_64BIT_INDICES = 1, + CUDNN_16BIT_INDICES = 2, + CUDNN_8BIT_INDICES = 3, +} cudnnIndicesType_t CUDNN_DEPRECATED; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateReduceTensorDescriptor(cudnnReduceTensorDescriptor_t *reduceTensorDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc, + cudnnReduceTensorOp_t reduceTensorOp, + cudnnDataType_t reduceTensorCompType, + cudnnNanPropagation_t reduceTensorNanOpt, + cudnnReduceTensorIndices_t reduceTensorIndices, + cudnnIndicesType_t reduceTensorIndicesType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetReduceTensorDescriptor(const cudnnReduceTensorDescriptor_t reduceTensorDesc, + cudnnReduceTensorOp_t *reduceTensorOp, + cudnnDataType_t *reduceTensorCompType, + cudnnNanPropagation_t *reduceTensorNanOpt, + cudnnReduceTensorIndices_t *reduceTensorIndices, + cudnnIndicesType_t *reduceTensorIndicesType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc); + +/* Helper function to return the minimum size of the index space to be passed to the reduction given the input and + * output tensors */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetReductionIndicesSize(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + const cudnnTensorDescriptor_t aDesc, + const cudnnTensorDescriptor_t cDesc, + size_t *sizeInBytes); + +/* Helper function to return the minimum size of the workspace to be passed to the reduction given the input and output + * tensors */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetReductionWorkspaceSize(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + const cudnnTensorDescriptor_t aDesc, + const cudnnTensorDescriptor_t cDesc, + size_t *sizeInBytes); + +/* Tensor operation : C = reduce op( alpha * A ) + beta * C */ +/* The NaN propagation enum applies to only the min and max reduce ops; the other reduce ops propagate NaN as usual. */ +/* The indices space is ignored for reduce ops other than min or max. */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnReduceTensor(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + void *indices, + size_t indicesSizeInBytes, + void *workspace, + size_t workspaceSizeInBytes, + const void *alpha, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* Set all values of a tensor to a given value : y[i] = value[0] */ +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *valuePtr); + +/* Scale all values of a tensor by a given factor : y[i] = alpha * y[i] */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnScaleTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *alpha); + +/* Create an instance of FilterStruct */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateFilterDescriptor(cudnnFilterDescriptor_t *filterDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetFilter4dDescriptor(cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t dataType, /* image data type */ + cudnnTensorFormat_t format, + int k, /* number of output feature maps */ + int c, /* number of input feature maps */ + int h, /* height of each input filter */ + int w); /* width of each input filter */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetFilter4dDescriptor(const cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t *dataType, /* image data type */ + cudnnTensorFormat_t *format, + int *k, /* number of output feature maps */ + int *c, /* number of input feature maps */ + int *h, /* height of each input filter */ + int *w); /* width of each input filter */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetFilterNdDescriptor(cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t dataType, /* image data type */ + cudnnTensorFormat_t format, + int nbDims, + const int filterDimA[]); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetFilterNdDescriptor(const cudnnFilterDescriptor_t filterDesc, + int nbDimsRequested, + cudnnDataType_t *dataType, /* image data type */ + cudnnTensorFormat_t *format, + int *nbDims, + int filterDimA[]); +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetFilterSizeInBytes(const cudnnFilterDescriptor_t filterDesc, size_t *size); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnTransformFilter(cudnnHandle_t handle, + const cudnnTensorTransformDescriptor_t transDesc, + const void *alpha, + const cudnnFilterDescriptor_t srcDesc, + const void *srcData, + const void *beta, + const cudnnFilterDescriptor_t destDesc, + void *destData); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyFilterDescriptor(cudnnFilterDescriptor_t filterDesc); + +/* + * softmax algorithm + */ +typedef enum { + CUDNN_SOFTMAX_FAST = 0, /* straightforward implementation */ + CUDNN_SOFTMAX_ACCURATE = 1, /* subtract max from every point to avoid overflow */ + CUDNN_SOFTMAX_LOG = 2 +} cudnnSoftmaxAlgorithm_t; + +typedef enum { + CUDNN_SOFTMAX_MODE_INSTANCE = 0, /* compute the softmax over all C, H, W for each N */ + CUDNN_SOFTMAX_MODE_CHANNEL = 1 /* compute the softmax over all C for each H, W, N */ +} cudnnSoftmaxMode_t; + +/* Softmax functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform forward softmax */ +cudnnStatus_t CUDNNWINAPI +cudnnSoftmaxForward(cudnnHandle_t handle, + cudnnSoftmaxAlgorithm_t algo, + cudnnSoftmaxMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * pooling mode + */ +typedef enum { + CUDNN_POOLING_MAX = 0, + CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING = 1, /* count for average includes padded values */ + CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING = 2, /* count for average does not include padded values */ + CUDNN_POOLING_MAX_DETERMINISTIC = 3 +} cudnnPoolingMode_t CUDNN_DEPRECATED; + +/* Create an instance of pooling descriptor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreatePoolingDescriptor(cudnnPoolingDescriptor_t *poolingDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetPooling2dDescriptor(cudnnPoolingDescriptor_t poolingDesc, + cudnnPoolingMode_t mode, + cudnnNanPropagation_t maxpoolingNanOpt, + int windowHeight, + int windowWidth, + int verticalPadding, + int horizontalPadding, + int verticalStride, + int horizontalStride); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetPooling2dDescriptor(const cudnnPoolingDescriptor_t poolingDesc, + cudnnPoolingMode_t *mode, + cudnnNanPropagation_t *maxpoolingNanOpt, + int *windowHeight, + int *windowWidth, + int *verticalPadding, + int *horizontalPadding, + int *verticalStride, + int *horizontalStride); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetPoolingNdDescriptor(cudnnPoolingDescriptor_t poolingDesc, + const cudnnPoolingMode_t mode, + const cudnnNanPropagation_t maxpoolingNanOpt, + int nbDims, + const int windowDimA[], + const int paddingA[], + const int strideA[]); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetPoolingNdDescriptor(const cudnnPoolingDescriptor_t poolingDesc, + int nbDimsRequested, + cudnnPoolingMode_t *mode, + cudnnNanPropagation_t *maxpoolingNanOpt, + int *nbDims, + int windowDimA[], + int paddingA[], + int strideA[]); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetPoolingNdForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + int nbDims, + int outputTensorDimA[]); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetPooling2dForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + int *n, + int *c, + int *h, + int *w); + +/* Destroy an instance of pooling descriptor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyPoolingDescriptor(cudnnPoolingDescriptor_t poolingDesc); + +/* Pooling functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform forward pooling */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnPoolingForward(cudnnHandle_t handle, + const cudnnPoolingDescriptor_t poolingDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* Activation functions: All of the form "output = alpha * Op(inputs) + beta * output" */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateActivationDescriptor(cudnnActivationDescriptor_t *activationDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetActivationDescriptor(cudnnActivationDescriptor_t activationDesc, + cudnnActivationMode_t mode, + cudnnNanPropagation_t reluNanOpt, + double coef); /* ceiling for clipped RELU, alpha for ELU */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetActivationDescriptor(const cudnnActivationDescriptor_t activationDesc, + cudnnActivationMode_t *mode, + cudnnNanPropagation_t *reluNanOpt, + double *coef); /* ceiling for clipped RELU, alpha for ELU */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double swish_beta); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double *swish_beta); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyActivationDescriptor(cudnnActivationDescriptor_t activationDesc); + +/* Function to perform forward activation */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnActivationForward(cudnnHandle_t handle, + cudnnActivationDescriptor_t activationDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * Create an instance of LRN (Local Response Normalization) descriptor + * Uses lrnN=5, lrnAlpha=1e-4, lrnBeta=0.75, lrnK=2.0 as defaults from Krizhevsky'12 ImageNet paper + */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateLRNDescriptor(cudnnLRNDescriptor_t *normDesc); + +#define CUDNN_LRN_MIN_N 1 /* minimum allowed lrnN */ +#define CUDNN_LRN_MAX_N 16 /* maximum allowed lrnN */ +#define CUDNN_LRN_MIN_K 1e-5 /* minimum allowed lrnK */ +#define CUDNN_LRN_MIN_BETA 0.01 /* minimum allowed lrnBeta */ + +/* LRN layer mode */ +typedef enum { + CUDNN_LRN_CROSS_CHANNEL_DIM1 = 0, /* Normalize across tensor's dimA[1] dimension */ +} cudnnLRNMode_t; + +/* + * Uses a window [center-lookBehind, center+lookAhead], where + * lookBehind = floor( (lrnN-1)/2 ), lookAhead = lrnN-lookBehind-1. + * Values of double parameters cast to tensor data type. + */ +cudnnStatus_t CUDNNWINAPI +cudnnSetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned lrnN, double lrnAlpha, double lrnBeta, double lrnK); +/* + * Retrieve the settings currently stored in an LRN layer descriptor + * Any of the provided pointers can be NULL (no corresponding value will be returned) + */ +cudnnStatus_t CUDNNWINAPI +cudnnGetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned *lrnN, double *lrnAlpha, double *lrnBeta, double *lrnK); + +/* Destroy an instance of LRN descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyLRNDescriptor(cudnnLRNDescriptor_t lrnDesc); + +/* LRN functions: output = alpha * normalize(x) + beta * old_y */ + +/* LRN cross-channel forward computation. Double parameters cast to tensor data type */ +cudnnStatus_t CUDNNWINAPI +cudnnLRNCrossChannelForward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnLRNMode_t lrnMode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +typedef enum { + CUDNN_DIVNORM_PRECOMPUTED_MEANS = 0, +} cudnnDivNormMode_t; + +/* LCN/divisive normalization functions: y = alpha * normalize(x) + beta * y */ +cudnnStatus_t CUDNNWINAPI +cudnnDivisiveNormalizationForward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnDivNormMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, /* same desc for means, temp, temp2 */ + const void *x, + const void *means, /* if NULL, means are assumed to be zero */ + void *temp, + void *temp2, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +typedef enum { + /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */ + CUDNN_BATCHNORM_PER_ACTIVATION = 0, + + /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */ + CUDNN_BATCHNORM_SPATIAL = 1, + + /* + * bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors). + * May be faster than CUDNN_BATCHNORM_SPATIAL but imposes some limits on the range of values + */ + CUDNN_BATCHNORM_SPATIAL_PERSISTENT = 2, +} cudnnBatchNormMode_t CUDNN_DEPRECATED; + +#define CUDNN_BN_MIN_EPSILON 0.0 /* Minimum epsilon allowed to be used in the Batch Normalization formula */ + +/* + * Derives a tensor descriptor from layer data descriptor for BatchNormalization + * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for + * bnScaleBiasMeanVarDesc and bnScaleBiasDiffDesc in Batch Normalization forward and backward functions. + */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDeriveBNTensorDescriptor(cudnnTensorDescriptor_t derivedBnDesc, + const cudnnTensorDescriptor_t xDesc, + cudnnBatchNormMode_t mode); + +typedef enum { + CUDNN_BATCHNORM_OPS_BN = 0, /* do batch normalization only */ + CUDNN_BATCHNORM_OPS_BN_ACTIVATION = 1, /* do batchNorm, then activation */ + CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION = 2, /* do batchNorm, then elemWiseAdd, then activation */ +} cudnnBatchNormOps_t CUDNN_DEPRECATED; + +/* + * Performs Batch Normalization during Inference: + * y[i] = bnScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + bnBias[k] + * with bnScale, bnBias, runningMean, runningInvVariance tensors indexed + * according to spatial or per-activation mode. Refer to cudnnBatchNormalizationForwardTraining + * above for notes on function arguments. + */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardInference(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const void *bnScale, + const void *bnBias, + const void *estimatedMean, + const void *estimatedVariance, + double epsilon); + +typedef enum { + /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */ + CUDNN_NORM_PER_ACTIVATION = 0, + + /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */ + CUDNN_NORM_PER_CHANNEL = 1, +} cudnnNormMode_t CUDNN_DEPRECATED; + +typedef enum { CUDNN_NORM_ALGO_STANDARD = 0, CUDNN_NORM_ALGO_PERSIST = 1 } cudnnNormAlgo_t CUDNN_DEPRECATED; + +/* + * Derives a tensor descriptor from layer data descriptor for Normalization + * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for + * normScaleBiasMeanVarDesc and normScaleBiasDiffDesc in Normalization forward and backward functions. + */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDeriveNormTensorDescriptor(cudnnTensorDescriptor_t derivedNormScaleBiasDesc, + cudnnTensorDescriptor_t derivedNormMeanVarDesc, + const cudnnTensorDescriptor_t xDesc, + cudnnNormMode_t mode, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +typedef enum { + CUDNN_NORM_OPS_NORM = 0, /* do normalization only */ + CUDNN_NORM_OPS_NORM_ACTIVATION = 1, /* do Norm, then activation */ + CUDNN_NORM_OPS_NORM_ADD_ACTIVATION = 2, /* do Norm, then elemWiseAdd, then activation */ +} cudnnNormOps_t CUDNN_DEPRECATED; + +/* + * Performs Normalization during Inference: + * y[i] = normScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + normBias[k] + * with normScale, normBias, runningMean, runningInvVariance tensors indexed + * according to per-channel or per-activation mode. Refer to cudnnNormalizationForwardTraining + * above for notes on function arguments. + */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnNormalizationForwardInference(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t normScaleBiasDesc, + const void *normScale, + const void *normBias, + const cudnnTensorDescriptor_t normMeanVarDesc, + const void *estimatedMean, + const void *estimatedVariance, + const cudnnTensorDescriptor_t zDesc, + const void *z, + cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + double epsilon, + int groupCnt); /* Place hold for future work*/ + +/* APIs for spatial transformer network*/ +typedef enum { + CUDNN_SAMPLER_BILINEAR = 0, +} cudnnSamplerType_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateSpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t *stDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetSpatialTransformerNdDescriptor(cudnnSpatialTransformerDescriptor_t stDesc, + cudnnSamplerType_t samplerType, + cudnnDataType_t dataType, + const int nbDims, + const int dimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroySpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t stDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfGridGeneratorForward(cudnnHandle_t handle, + const cudnnSpatialTransformerDescriptor_t stDesc, + const void *theta, + void *grid); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfSamplerForward(cudnnHandle_t handle, + cudnnSpatialTransformerDescriptor_t stDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *grid, + const void *beta, + cudnnTensorDescriptor_t yDesc, + void *y); + +typedef struct cudnnDropoutStruct *cudnnDropoutDescriptor_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateDropoutDescriptor(cudnnDropoutDescriptor_t *dropoutDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc); + +/*helper function to determine size of the states to be passed to cudnnSetDropoutDescriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDropoutGetStatesSize(cudnnHandle_t handle, size_t *sizeInBytes); + +/*helper function to determine size of the reserve space to be passed to dropout forward/backward calls */ +cudnnStatus_t CUDNNWINAPI +cudnnDropoutGetReserveSpaceSize(cudnnTensorDescriptor_t xdesc, size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnSetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float dropout, + void *states, + size_t stateSizeInBytes, + unsigned long long seed); + +/* Restores the dropout descriptor to a previously saved-off state */ +cudnnStatus_t CUDNNWINAPI +cudnnRestoreDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float dropout, + void *states, + size_t stateSizeInBytes, + unsigned long long seed); + +cudnnStatus_t CUDNNWINAPI +cudnnGetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float *dropout, + void **states, + unsigned long long *seed); + +cudnnStatus_t CUDNNWINAPI +cudnnDropoutForward(cudnnHandle_t handle, + const cudnnDropoutDescriptor_t dropoutDesc, + const cudnnTensorDescriptor_t xdesc, + const void *x, + const cudnnTensorDescriptor_t ydesc, + void *y, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* TODO: move these enums out to the appropriate submodule */ +typedef enum { + CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = 0, + CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM = 1, + CUDNN_CONVOLUTION_FWD_ALGO_GEMM = 2, + CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = 3, + CUDNN_CONVOLUTION_FWD_ALGO_FFT = 4, + CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING = 5, + CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD = 6, + CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED = 7, + CUDNN_CONVOLUTION_FWD_ALGO_COUNT = 8 +} cudnnConvolutionFwdAlgo_t; + +typedef enum { + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = 0, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 = 1, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT = 2, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3 = 3, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD = 4, /* not implemented */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED = 5, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING = 6, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT = 7 +} cudnnConvolutionBwdFilterAlgo_t; + +typedef enum { + CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = 0, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 = 1, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT = 2, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING = 3, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD = 4, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED = 5, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT = 6 +} cudnnConvolutionBwdDataAlgo_t; + +typedef enum { CUDNN_CTC_LOSS_ALGO_DETERMINISTIC = 0, CUDNN_CTC_LOSS_ALGO_NON_DETERMINISTIC = 1 } cudnnCTCLossAlgo_t; + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_SUBLIBRARY_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnOpsVersionCheck(void); + +/* Function to perform backward softmax */ +cudnnStatus_t CUDNNWINAPI +cudnnSoftmaxBackward(cudnnHandle_t handle, + cudnnSoftmaxAlgorithm_t algo, + cudnnSoftmaxMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* Function to perform backward pooling */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnPoolingBackward(cudnnHandle_t handle, + const cudnnPoolingDescriptor_t poolingDesc, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* Function to perform backward activation */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnActivationBackward(cudnnHandle_t handle, + cudnnActivationDescriptor_t activationDesc, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* LRN cross-channel backward computation. Double parameters cast to tensor data type */ +cudnnStatus_t CUDNNWINAPI +cudnnLRNCrossChannelBackward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnLRNMode_t lrnMode, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +cudnnStatus_t CUDNNWINAPI +cudnnDivisiveNormalizationBackward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnDivNormMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, /* same desc for x, means, dy, temp, temp2 */ + const void *x, + const void *means, /* if NULL, means are assumed to be zero */ + const void *dy, + void *temp, + void *temp2, + const void *beta, + const cudnnTensorDescriptor_t dXdMeansDesc, /* same desc for dx, dMeans */ + void *dx, /* output x differential */ + void *dMeans); /* output means differential, can be NULL */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t zDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const cudnnActivationDescriptor_t activationDesc, + size_t *sizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetBatchNormalizationBackwardExWorkspaceSize(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnTensorDescriptor_t dzDesc, + const cudnnTensorDescriptor_t dxDesc, + const cudnnTensorDescriptor_t dBnScaleBiasDesc, + const cudnnActivationDescriptor_t activationDesc, + size_t *sizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetBatchNormalizationTrainingExReserveSpaceSize(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t xDesc, + size_t *sizeInBytes); + +/* Computes y = BN(x). Also accumulates moving averages of mean and inverse variances */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardTraining( + cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + + /* Shared desc for the next 6 tensors in the argument list. + Data type to be set as follows: + type = (typeOf(x) == double) ? double : float + Dimensions for this descriptor depend on normalization mode + - Spatial Normalization : tensors are expected to have dims 1xCx1x1 + (normalization is performed across NxHxW) + - Per-Activation Normalization : tensors are expected to have dims of 1xCxHxW + (normalization is performed across N) */ + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + + /* 'Gamma' and 'Beta' respectively in Ioffe and Szegedy's paper's notation */ + const void *bnScale, + const void *bnBias, + + /* MUST use factor=1 in the very first call of a complete training cycle. + Use a factor=1/(1+n) at N-th call to the function to get + Cumulative Moving Average (CMA) behavior + CMA[n] = (x[1]+...+x[n])/n + Since CMA[n+1] = (n*CMA[n]+x[n+1])/(n+1) = + ((n+1)*CMA[n]-CMA[n])/(n+1) + x[n+1]/(n+1) = + CMA[n]*(1-1/(n+1)) + x[n+1]*1/(n+1) */ + double exponentialAverageFactor, + + /* Used in Training phase only. + runningMean = newMean*factor + runningMean*(1-factor) */ + void *resultRunningMean, + /* Output in training mode, input in inference. Is the moving average + of variance[x] (factor is applied in the same way as for runningMean) */ + void *resultRunningVariance, + + /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */ + double epsilon, + + /* Optionally save intermediate results from the forward pass here + - can be reused to speed up backward pass. NULL if unused */ + void *resultSaveMean, + void *resultSaveInvVariance); + +/* Computes y = relu(BN(x) + z). Also accumulates moving averages of mean and inverse variances */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardTrainingEx( + cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t zDesc, + const void *zData, + const cudnnTensorDescriptor_t yDesc, + void *yData, + + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const void *bnScale, + const void *bnBias, + + double exponentialAverageFactor, + void *resultRunningMean, + void *resultRunningVariance, + + /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */ + double epsilon, + + /* Optionally save intermediate results from the forward pass here + - can be reused to speed up backward pass. NULL if unused */ + void *resultSaveMean, + void *resultSaveInvVariance, + + cudnnActivationDescriptor_t activationDesc, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* Performs backward pass of Batch Normalization layer. Returns x gradient, + * bnScale gradient and bnBias gradient */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationBackward(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + const void *alphaDataDiff, + const void *betaDataDiff, + const void *alphaParamDiff, + const void *betaParamDiff, + const cudnnTensorDescriptor_t xDesc, /* same desc for x, dx, dy */ + const void *x, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t dxDesc, + void *dx, + /* Shared tensor desc for the 4 tensors below */ + const cudnnTensorDescriptor_t dBnScaleBiasDesc, + const void *bnScale, /* bnBias doesn't affect backpropagation */ + /* scale and bias diff are not backpropagated below this layer */ + void *dBnScaleResult, + void *dBnBiasResult, + /* Same epsilon as forward pass */ + double epsilon, + + /* Optionally cached intermediate results from + forward pass */ + const void *savedMean, + const void *savedInvVariance); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationBackwardEx(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + + const void *alphaDataDiff, + const void *betaDataDiff, + const void *alphaParamDiff, + const void *betaParamDiff, + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t yDesc, + const void *yData, + const cudnnTensorDescriptor_t dyDesc, + const void *dyData, + const cudnnTensorDescriptor_t dzDesc, + void *dzData, + const cudnnTensorDescriptor_t dxDesc, + void *dxData, + + /* Shared tensor desc for the 4 tensors below */ + const cudnnTensorDescriptor_t dBnScaleBiasDesc, + const void *bnScaleData, + const void *bnBiasData, /* needed if there is activation */ + void *dBnScaleData, + void *dBnBiasData, + double epsilon, /* Same epsilon as forward pass */ + + /* Optionally cached intermediate results from + forward pass */ + const void *savedMean, + const void *savedInvVariance, + cudnnActivationDescriptor_t activationDesc, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetNormalizationForwardTrainingWorkspaceSize(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t zDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t normScaleBiasDesc, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t normMeanVarDesc, + size_t *sizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetNormalizationBackwardWorkspaceSize(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnTensorDescriptor_t dzDesc, + const cudnnTensorDescriptor_t dxDesc, + const cudnnTensorDescriptor_t dNormScaleBiasDesc, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t normMeanVarDesc, + size_t *sizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetNormalizationTrainingReserveSpaceSize(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t xDesc, + size_t *sizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +/* Computes y = relu(Norm(x) + z). Also accumulates moving averages of mean and inverse variances */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnNormalizationForwardTraining(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t normScaleBiasDesc, + const void *normScale, + const void *normBias, + double exponentialAverageFactor, + const cudnnTensorDescriptor_t normMeanVarDesc, + void *resultRunningMean, + void *resultRunningVariance, + /* Has to be >= 0. Should be the same in forward and backward functions. */ + double epsilon, + /* Optionally save intermediate results from the forward pass here + - can be reused to speed up backward pass. NULL if unused */ + void *resultSaveMean, + void *resultSaveInvVariance, + cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t zDesc, + const void *zData, + const cudnnTensorDescriptor_t yDesc, + void *yData, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnNormalizationBackward(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alphaDataDiff, + const void *betaDataDiff, + const void *alphaParamDiff, + const void *betaParamDiff, + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t yDesc, + const void *yData, + const cudnnTensorDescriptor_t dyDesc, + const void *dyData, + const cudnnTensorDescriptor_t dzDesc, + void *dzData, + const cudnnTensorDescriptor_t dxDesc, + void *dxData, + /* Shared tensor desc for the 4 tensors below */ + const cudnnTensorDescriptor_t dNormScaleBiasDesc, + const void *normScaleData, + const void *normBiasData, /* needed if there is activation */ + void *dNormScaleData, + void *dNormBiasData, + double epsilon, /* Same epsilon as forward pass */ + const cudnnTensorDescriptor_t normMeanVarDesc, + /* Optionally cached intermediate results from + forward pass */ + const void *savedMean, + const void *savedInvVariance, + cudnnActivationDescriptor_t activationDesc, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfGridGeneratorBackward(cudnnHandle_t handle, + const cudnnSpatialTransformerDescriptor_t stDesc, + const void *dgrid, + void *dtheta); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfSamplerBackward(cudnnHandle_t handle, + cudnnSpatialTransformerDescriptor_t stDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx, + const void *alphaDgrid, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const void *grid, + const void *betaDgrid, + void *dgrid); + +cudnnStatus_t CUDNNWINAPI +cudnnDropoutBackward(cudnnHandle_t handle, + const cudnnDropoutDescriptor_t dropoutDesc, + const cudnnTensorDescriptor_t dydesc, + const void *dy, + const cudnnTensorDescriptor_t dxdesc, + void *dx, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_OPS_H_ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_ops_v9.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_ops_v9.h new file mode 100644 index 0000000000000000000000000000000000000000..72d2942e14c8790308da1a598664dcfc4b468431 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_ops_v9.h @@ -0,0 +1,1316 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_ops : cuDNN's basic definitions and basic operations. + */ + +#if !defined(CUDNN_OPS_H_) +#define CUDNN_OPS_H_ + +#include + +#include "cudnn_version.h" +#include "cudnn_graph.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_OPS_MAJOR 9 +#define CUDNN_OPS_MINOR 5 +#define CUDNN_OPS_PATCH 1 + +#if (CUDNN_OPS_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_MINOR != CUDNN_MINOR) || (CUDNN_OPS_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN OPS INFER!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* Data structures to represent Image/Filter and the Neural Network Layer */ +typedef struct cudnnTensorStruct *cudnnTensorDescriptor_t; +typedef struct cudnnPoolingStruct *cudnnPoolingDescriptor_t CUDNN_DEPRECATED; +typedef struct cudnnFilterStruct *cudnnFilterDescriptor_t CUDNN_DEPRECATED; +typedef struct cudnnLRNStruct *cudnnLRNDescriptor_t; +typedef struct cudnnActivationStruct *cudnnActivationDescriptor_t CUDNN_DEPRECATED; +typedef struct cudnnSpatialTransformerStruct *cudnnSpatialTransformerDescriptor_t; +typedef struct cudnnOpTensorStruct *cudnnOpTensorDescriptor_t CUDNN_DEPRECATED; +typedef struct cudnnReduceTensorStruct *cudnnReduceTensorDescriptor_t CUDNN_DEPRECATED; +typedef struct cudnnCTCLossStruct *cudnnCTCLossDescriptor_t; +typedef struct cudnnTensorTransformStruct *cudnnTensorTransformDescriptor_t CUDNN_DEPRECATED; +/* + * CUDNN Determinism + */ +typedef enum { + CUDNN_NON_DETERMINISTIC = 0, + CUDNN_DETERMINISTIC = 1, +} cudnnDeterminism_t; + +/* Create an instance of a generic Tensor descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateTensorDescriptor(cudnnTensorDescriptor_t *tensorDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor4dDescriptor(cudnnTensorDescriptor_t tensorDesc, + cudnnTensorFormat_t format, + cudnnDataType_t dataType, /* image data type */ + int n, /* number of inputs (batch size) */ + int c, /* number of input feature maps */ + int h, /* height of input section */ + int w); /* width of input section */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor4dDescriptorEx(cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t dataType, /* image data type */ + int n, /* number of inputs (batch size) */ + int c, /* number of input feature maps */ + int h, /* height of input section */ + int w, /* width of input section */ + int nStride, + int cStride, + int hStride, + int wStride); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensor4dDescriptor(const cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t *dataType, /* image data type */ + int *n, /* number of inputs (batch size) */ + int *c, /* number of input feature maps */ + int *h, /* height of input section */ + int *w, /* width of input section */ + int *nStride, + int *cStride, + int *hStride, + int *wStride); + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorNdDescriptor(cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t dataType, + int nbDims, + const int dimA[], + const int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorNdDescriptorEx(cudnnTensorDescriptor_t tensorDesc, + cudnnTensorFormat_t format, + cudnnDataType_t dataType, + int nbDims, + const int dimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorNdDescriptor(const cudnnTensorDescriptor_t tensorDesc, + int nbDimsRequested, + cudnnDataType_t *dataType, + int *nbDims, + int dimA[], + int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorSizeInBytes(const cudnnTensorDescriptor_t tensorDesc, size_t *size); + +/* PixelOffset( n, c, h, w ) = n *input_stride + c * feature_stride + h * h_stride + w * w_stride + + 1)Example of all images in row major order one batch of features after the other (with an optional padding on row) + input_stride : c x h x h_stride + feature_stride : h x h_stride + h_stride : >= w ( h_stride = w if no padding) + w_stride : 1 + + + 2)Example of all images in row major with features maps interleaved + input_stride : c x h x h_stride + feature_stride : 1 + h_stride : w x c + w_stride : c + + 3)Example of all images in column major order one batch of features after the other (with optional padding on column) + input_stride : c x w x w_stride + feature_stride : w x w_stride + h_stride : 1 + w_stride : >= h + +*/ + +/* Destroy an instance of Tensor4d descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyTensorDescriptor(cudnnTensorDescriptor_t tensorDesc); + +/* Fold/unfold transforms */ +typedef enum { + CUDNN_TRANSFORM_FOLD = 0U, + CUDNN_TRANSFORM_UNFOLD = 1U, +} cudnnFoldingDirection_t; + +/** Create a destination descriptor for cudnnTransformTensor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnInitTransformDest(const cudnnTensorTransformDescriptor_t transformDesc, + const cudnnTensorDescriptor_t srcDesc, + cudnnTensorDescriptor_t destDesc, + size_t *destSizeInBytes); + +/** Create an empty tensor transform descriptor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateTensorTransformDescriptor(cudnnTensorTransformDescriptor_t *transformDesc); + +/** Initialize a previously created tensor transform descriptor. */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc, + const uint32_t nbDims, + const cudnnTensorFormat_t destFormat, + const int32_t padBeforeA[], + const int32_t padAfterA[], + const uint32_t foldA[], + const cudnnFoldingDirection_t direction); + +/** + * Retrieves the values stored in a previously initialized tensor transform + * descriptor. + */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc, + uint32_t nbDimsRequested, + cudnnTensorFormat_t *destFormat, + int32_t padBeforeA[], + int32_t padAfterA[], + uint32_t foldA[], + cudnnFoldingDirection_t *direction); + +/** + * Destroys a previously created tensor transform descriptor. + */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc); + +/* Tensor layout conversion helper (y = alpha * x + beta * y) */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnTransformTensor(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnTransformTensorEx(cudnnHandle_t handle, + const cudnnTensorTransformDescriptor_t transDesc, + const void *alpha, + const cudnnTensorDescriptor_t srcDesc, + const void *srcData, + const void *beta, + const cudnnTensorDescriptor_t destDesc, + void *destData); + +/* Tensor Bias addition : C = alpha * A + beta * C */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnAddTensor(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* + * CUDNN OpTensor op type + */ +typedef enum { + CUDNN_OP_TENSOR_ADD = 0, + CUDNN_OP_TENSOR_MUL = 1, + CUDNN_OP_TENSOR_MIN = 2, + CUDNN_OP_TENSOR_MAX = 3, + CUDNN_OP_TENSOR_SQRT = 4, + CUDNN_OP_TENSOR_NOT = 5, +} cudnnOpTensorOp_t; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateOpTensorDescriptor(cudnnOpTensorDescriptor_t *opTensorDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc, + cudnnOpTensorOp_t opTensorOp, + cudnnDataType_t opTensorCompType, + cudnnNanPropagation_t opTensorNanOpt); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetOpTensorDescriptor(const cudnnOpTensorDescriptor_t opTensorDesc, + cudnnOpTensorOp_t *opTensorOp, + cudnnDataType_t *opTensorCompType, + cudnnNanPropagation_t *opTensorNanOpt); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc); + +/* Tensor operation : C = op( alpha1 * A, alpha2 * B ) + beta * C */ +/* B tensor is ignored for CUDNN_OP_TENSOR_SQRT, CUDNN_OP_TENSOR_NOT. */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnOpTensor(cudnnHandle_t handle, + const cudnnOpTensorDescriptor_t opTensorDesc, + const void *alpha1, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *alpha2, + const cudnnTensorDescriptor_t bDesc, + const void *B, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* + * CUDNN ReduceTensor indices type + */ +typedef enum { + CUDNN_REDUCE_TENSOR_NO_INDICES = 0, + CUDNN_REDUCE_TENSOR_FLATTENED_INDICES = 1, +} cudnnReduceTensorIndices_t CUDNN_DEPRECATED; + +/* + * CUDNN tensor indices type size (all unsigned) + * Currently not supported, default is 32 bit unsigned. + */ +typedef enum { + CUDNN_32BIT_INDICES = 0, + CUDNN_64BIT_INDICES = 1, + CUDNN_16BIT_INDICES = 2, + CUDNN_8BIT_INDICES = 3, +} cudnnIndicesType_t CUDNN_DEPRECATED; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateReduceTensorDescriptor(cudnnReduceTensorDescriptor_t *reduceTensorDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc, + cudnnReduceTensorOp_t reduceTensorOp, + cudnnDataType_t reduceTensorCompType, + cudnnNanPropagation_t reduceTensorNanOpt, + cudnnReduceTensorIndices_t reduceTensorIndices, + cudnnIndicesType_t reduceTensorIndicesType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetReduceTensorDescriptor(const cudnnReduceTensorDescriptor_t reduceTensorDesc, + cudnnReduceTensorOp_t *reduceTensorOp, + cudnnDataType_t *reduceTensorCompType, + cudnnNanPropagation_t *reduceTensorNanOpt, + cudnnReduceTensorIndices_t *reduceTensorIndices, + cudnnIndicesType_t *reduceTensorIndicesType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc); + +/* Helper function to return the minimum size of the index space to be passed to the reduction given the input and + * output tensors */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetReductionIndicesSize(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + const cudnnTensorDescriptor_t aDesc, + const cudnnTensorDescriptor_t cDesc, + size_t *sizeInBytes); + +/* Helper function to return the minimum size of the workspace to be passed to the reduction given the input and output + * tensors */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetReductionWorkspaceSize(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + const cudnnTensorDescriptor_t aDesc, + const cudnnTensorDescriptor_t cDesc, + size_t *sizeInBytes); + +/* Tensor operation : C = reduce op( alpha * A ) + beta * C */ +/* The NaN propagation enum applies to only the min and max reduce ops; the other reduce ops propagate NaN as usual. */ +/* The indices space is ignored for reduce ops other than min or max. */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnReduceTensor(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + void *indices, + size_t indicesSizeInBytes, + void *workspace, + size_t workspaceSizeInBytes, + const void *alpha, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* Set all values of a tensor to a given value : y[i] = value[0] */ +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *valuePtr); + +/* Scale all values of a tensor by a given factor : y[i] = alpha * y[i] */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnScaleTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *alpha); + +/* Create an instance of FilterStruct */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateFilterDescriptor(cudnnFilterDescriptor_t *filterDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetFilter4dDescriptor(cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t dataType, /* image data type */ + cudnnTensorFormat_t format, + int k, /* number of output feature maps */ + int c, /* number of input feature maps */ + int h, /* height of each input filter */ + int w); /* width of each input filter */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetFilter4dDescriptor(const cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t *dataType, /* image data type */ + cudnnTensorFormat_t *format, + int *k, /* number of output feature maps */ + int *c, /* number of input feature maps */ + int *h, /* height of each input filter */ + int *w); /* width of each input filter */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetFilterNdDescriptor(cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t dataType, /* image data type */ + cudnnTensorFormat_t format, + int nbDims, + const int filterDimA[]); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetFilterNdDescriptor(const cudnnFilterDescriptor_t filterDesc, + int nbDimsRequested, + cudnnDataType_t *dataType, /* image data type */ + cudnnTensorFormat_t *format, + int *nbDims, + int filterDimA[]); +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetFilterSizeInBytes(const cudnnFilterDescriptor_t filterDesc, size_t *size); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnTransformFilter(cudnnHandle_t handle, + const cudnnTensorTransformDescriptor_t transDesc, + const void *alpha, + const cudnnFilterDescriptor_t srcDesc, + const void *srcData, + const void *beta, + const cudnnFilterDescriptor_t destDesc, + void *destData); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyFilterDescriptor(cudnnFilterDescriptor_t filterDesc); + +/* + * softmax algorithm + */ +typedef enum { + CUDNN_SOFTMAX_FAST = 0, /* straightforward implementation */ + CUDNN_SOFTMAX_ACCURATE = 1, /* subtract max from every point to avoid overflow */ + CUDNN_SOFTMAX_LOG = 2 +} cudnnSoftmaxAlgorithm_t; + +typedef enum { + CUDNN_SOFTMAX_MODE_INSTANCE = 0, /* compute the softmax over all C, H, W for each N */ + CUDNN_SOFTMAX_MODE_CHANNEL = 1 /* compute the softmax over all C for each H, W, N */ +} cudnnSoftmaxMode_t; + +/* Softmax functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform forward softmax */ +cudnnStatus_t CUDNNWINAPI +cudnnSoftmaxForward(cudnnHandle_t handle, + cudnnSoftmaxAlgorithm_t algo, + cudnnSoftmaxMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * pooling mode + */ +typedef enum { + CUDNN_POOLING_MAX = 0, + CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING = 1, /* count for average includes padded values */ + CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING = 2, /* count for average does not include padded values */ + CUDNN_POOLING_MAX_DETERMINISTIC = 3 +} cudnnPoolingMode_t CUDNN_DEPRECATED; + +/* Create an instance of pooling descriptor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreatePoolingDescriptor(cudnnPoolingDescriptor_t *poolingDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetPooling2dDescriptor(cudnnPoolingDescriptor_t poolingDesc, + cudnnPoolingMode_t mode, + cudnnNanPropagation_t maxpoolingNanOpt, + int windowHeight, + int windowWidth, + int verticalPadding, + int horizontalPadding, + int verticalStride, + int horizontalStride); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetPooling2dDescriptor(const cudnnPoolingDescriptor_t poolingDesc, + cudnnPoolingMode_t *mode, + cudnnNanPropagation_t *maxpoolingNanOpt, + int *windowHeight, + int *windowWidth, + int *verticalPadding, + int *horizontalPadding, + int *verticalStride, + int *horizontalStride); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetPoolingNdDescriptor(cudnnPoolingDescriptor_t poolingDesc, + const cudnnPoolingMode_t mode, + const cudnnNanPropagation_t maxpoolingNanOpt, + int nbDims, + const int windowDimA[], + const int paddingA[], + const int strideA[]); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetPoolingNdDescriptor(const cudnnPoolingDescriptor_t poolingDesc, + int nbDimsRequested, + cudnnPoolingMode_t *mode, + cudnnNanPropagation_t *maxpoolingNanOpt, + int *nbDims, + int windowDimA[], + int paddingA[], + int strideA[]); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetPoolingNdForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + int nbDims, + int outputTensorDimA[]); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetPooling2dForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + int *n, + int *c, + int *h, + int *w); + +/* Destroy an instance of pooling descriptor */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyPoolingDescriptor(cudnnPoolingDescriptor_t poolingDesc); + +/* Pooling functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform forward pooling */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnPoolingForward(cudnnHandle_t handle, + const cudnnPoolingDescriptor_t poolingDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* Activation functions: All of the form "output = alpha * Op(inputs) + beta * output" */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateActivationDescriptor(cudnnActivationDescriptor_t *activationDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetActivationDescriptor(cudnnActivationDescriptor_t activationDesc, + cudnnActivationMode_t mode, + cudnnNanPropagation_t reluNanOpt, + double coef); /* ceiling for clipped RELU, alpha for ELU */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetActivationDescriptor(const cudnnActivationDescriptor_t activationDesc, + cudnnActivationMode_t *mode, + cudnnNanPropagation_t *reluNanOpt, + double *coef); /* ceiling for clipped RELU, alpha for ELU */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double swish_beta); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double *swish_beta); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyActivationDescriptor(cudnnActivationDescriptor_t activationDesc); + +/* Function to perform forward activation */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnActivationForward(cudnnHandle_t handle, + cudnnActivationDescriptor_t activationDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * Create an instance of LRN (Local Response Normalization) descriptor + * Uses lrnN=5, lrnAlpha=1e-4, lrnBeta=0.75, lrnK=2.0 as defaults from Krizhevsky'12 ImageNet paper + */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateLRNDescriptor(cudnnLRNDescriptor_t *normDesc); + +#define CUDNN_LRN_MIN_N 1 /* minimum allowed lrnN */ +#define CUDNN_LRN_MAX_N 16 /* maximum allowed lrnN */ +#define CUDNN_LRN_MIN_K 1e-5 /* minimum allowed lrnK */ +#define CUDNN_LRN_MIN_BETA 0.01 /* minimum allowed lrnBeta */ + +/* LRN layer mode */ +typedef enum { + CUDNN_LRN_CROSS_CHANNEL_DIM1 = 0, /* Normalize across tensor's dimA[1] dimension */ +} cudnnLRNMode_t; + +/* + * Uses a window [center-lookBehind, center+lookAhead], where + * lookBehind = floor( (lrnN-1)/2 ), lookAhead = lrnN-lookBehind-1. + * Values of double parameters cast to tensor data type. + */ +cudnnStatus_t CUDNNWINAPI +cudnnSetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned lrnN, double lrnAlpha, double lrnBeta, double lrnK); +/* + * Retrieve the settings currently stored in an LRN layer descriptor + * Any of the provided pointers can be NULL (no corresponding value will be returned) + */ +cudnnStatus_t CUDNNWINAPI +cudnnGetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned *lrnN, double *lrnAlpha, double *lrnBeta, double *lrnK); + +/* Destroy an instance of LRN descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyLRNDescriptor(cudnnLRNDescriptor_t lrnDesc); + +/* LRN functions: output = alpha * normalize(x) + beta * old_y */ + +/* LRN cross-channel forward computation. Double parameters cast to tensor data type */ +cudnnStatus_t CUDNNWINAPI +cudnnLRNCrossChannelForward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnLRNMode_t lrnMode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +typedef enum { + CUDNN_DIVNORM_PRECOMPUTED_MEANS = 0, +} cudnnDivNormMode_t; + +/* LCN/divisive normalization functions: y = alpha * normalize(x) + beta * y */ +cudnnStatus_t CUDNNWINAPI +cudnnDivisiveNormalizationForward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnDivNormMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, /* same desc for means, temp, temp2 */ + const void *x, + const void *means, /* if NULL, means are assumed to be zero */ + void *temp, + void *temp2, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +typedef enum { + /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */ + CUDNN_BATCHNORM_PER_ACTIVATION = 0, + + /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */ + CUDNN_BATCHNORM_SPATIAL = 1, + + /* + * bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors). + * May be faster than CUDNN_BATCHNORM_SPATIAL but imposes some limits on the range of values + */ + CUDNN_BATCHNORM_SPATIAL_PERSISTENT = 2, +} cudnnBatchNormMode_t CUDNN_DEPRECATED; + +#define CUDNN_BN_MIN_EPSILON 0.0 /* Minimum epsilon allowed to be used in the Batch Normalization formula */ + +/* + * Derives a tensor descriptor from layer data descriptor for BatchNormalization + * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for + * bnScaleBiasMeanVarDesc and bnScaleBiasDiffDesc in Batch Normalization forward and backward functions. + */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDeriveBNTensorDescriptor(cudnnTensorDescriptor_t derivedBnDesc, + const cudnnTensorDescriptor_t xDesc, + cudnnBatchNormMode_t mode); + +typedef enum { + CUDNN_BATCHNORM_OPS_BN = 0, /* do batch normalization only */ + CUDNN_BATCHNORM_OPS_BN_ACTIVATION = 1, /* do batchNorm, then activation */ + CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION = 2, /* do batchNorm, then elemWiseAdd, then activation */ +} cudnnBatchNormOps_t CUDNN_DEPRECATED; + +/* + * Performs Batch Normalization during Inference: + * y[i] = bnScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + bnBias[k] + * with bnScale, bnBias, runningMean, runningInvVariance tensors indexed + * according to spatial or per-activation mode. Refer to cudnnBatchNormalizationForwardTraining + * above for notes on function arguments. + */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardInference(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const void *bnScale, + const void *bnBias, + const void *estimatedMean, + const void *estimatedVariance, + double epsilon); + +typedef enum { + /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */ + CUDNN_NORM_PER_ACTIVATION = 0, + + /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */ + CUDNN_NORM_PER_CHANNEL = 1, +} cudnnNormMode_t CUDNN_DEPRECATED; + +typedef enum { CUDNN_NORM_ALGO_STANDARD = 0, CUDNN_NORM_ALGO_PERSIST = 1 } cudnnNormAlgo_t CUDNN_DEPRECATED; + +/* + * Derives a tensor descriptor from layer data descriptor for Normalization + * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for + * normScaleBiasMeanVarDesc and normScaleBiasDiffDesc in Normalization forward and backward functions. + */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDeriveNormTensorDescriptor(cudnnTensorDescriptor_t derivedNormScaleBiasDesc, + cudnnTensorDescriptor_t derivedNormMeanVarDesc, + const cudnnTensorDescriptor_t xDesc, + cudnnNormMode_t mode, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +typedef enum { + CUDNN_NORM_OPS_NORM = 0, /* do normalization only */ + CUDNN_NORM_OPS_NORM_ACTIVATION = 1, /* do Norm, then activation */ + CUDNN_NORM_OPS_NORM_ADD_ACTIVATION = 2, /* do Norm, then elemWiseAdd, then activation */ +} cudnnNormOps_t CUDNN_DEPRECATED; + +/* + * Performs Normalization during Inference: + * y[i] = normScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + normBias[k] + * with normScale, normBias, runningMean, runningInvVariance tensors indexed + * according to per-channel or per-activation mode. Refer to cudnnNormalizationForwardTraining + * above for notes on function arguments. + */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnNormalizationForwardInference(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t normScaleBiasDesc, + const void *normScale, + const void *normBias, + const cudnnTensorDescriptor_t normMeanVarDesc, + const void *estimatedMean, + const void *estimatedVariance, + const cudnnTensorDescriptor_t zDesc, + const void *z, + cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + double epsilon, + int groupCnt); /* Place hold for future work*/ + +/* APIs for spatial transformer network*/ +typedef enum { + CUDNN_SAMPLER_BILINEAR = 0, +} cudnnSamplerType_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateSpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t *stDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetSpatialTransformerNdDescriptor(cudnnSpatialTransformerDescriptor_t stDesc, + cudnnSamplerType_t samplerType, + cudnnDataType_t dataType, + const int nbDims, + const int dimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroySpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t stDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfGridGeneratorForward(cudnnHandle_t handle, + const cudnnSpatialTransformerDescriptor_t stDesc, + const void *theta, + void *grid); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfSamplerForward(cudnnHandle_t handle, + cudnnSpatialTransformerDescriptor_t stDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *grid, + const void *beta, + cudnnTensorDescriptor_t yDesc, + void *y); + +typedef struct cudnnDropoutStruct *cudnnDropoutDescriptor_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateDropoutDescriptor(cudnnDropoutDescriptor_t *dropoutDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc); + +/*helper function to determine size of the states to be passed to cudnnSetDropoutDescriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDropoutGetStatesSize(cudnnHandle_t handle, size_t *sizeInBytes); + +/*helper function to determine size of the reserve space to be passed to dropout forward/backward calls */ +cudnnStatus_t CUDNNWINAPI +cudnnDropoutGetReserveSpaceSize(cudnnTensorDescriptor_t xdesc, size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnSetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float dropout, + void *states, + size_t stateSizeInBytes, + unsigned long long seed); + +/* Restores the dropout descriptor to a previously saved-off state */ +cudnnStatus_t CUDNNWINAPI +cudnnRestoreDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float dropout, + void *states, + size_t stateSizeInBytes, + unsigned long long seed); + +cudnnStatus_t CUDNNWINAPI +cudnnGetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float *dropout, + void **states, + unsigned long long *seed); + +cudnnStatus_t CUDNNWINAPI +cudnnDropoutForward(cudnnHandle_t handle, + const cudnnDropoutDescriptor_t dropoutDesc, + const cudnnTensorDescriptor_t xdesc, + const void *x, + const cudnnTensorDescriptor_t ydesc, + void *y, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* TODO: move these enums out to the appropriate submodule */ +typedef enum { + CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = 0, + CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM = 1, + CUDNN_CONVOLUTION_FWD_ALGO_GEMM = 2, + CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = 3, + CUDNN_CONVOLUTION_FWD_ALGO_FFT = 4, + CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING = 5, + CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD = 6, + CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED = 7, + CUDNN_CONVOLUTION_FWD_ALGO_COUNT = 8 +} cudnnConvolutionFwdAlgo_t; + +typedef enum { + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = 0, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 = 1, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT = 2, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3 = 3, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD = 4, /* not implemented */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED = 5, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING = 6, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT = 7 +} cudnnConvolutionBwdFilterAlgo_t; + +typedef enum { + CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = 0, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 = 1, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT = 2, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING = 3, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD = 4, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED = 5, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT = 6 +} cudnnConvolutionBwdDataAlgo_t; + +typedef enum { CUDNN_CTC_LOSS_ALGO_DETERMINISTIC = 0, CUDNN_CTC_LOSS_ALGO_NON_DETERMINISTIC = 1 } cudnnCTCLossAlgo_t; + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_SUBLIBRARY_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnOpsVersionCheck(void); + +/* Function to perform backward softmax */ +cudnnStatus_t CUDNNWINAPI +cudnnSoftmaxBackward(cudnnHandle_t handle, + cudnnSoftmaxAlgorithm_t algo, + cudnnSoftmaxMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* Function to perform backward pooling */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnPoolingBackward(cudnnHandle_t handle, + const cudnnPoolingDescriptor_t poolingDesc, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* Function to perform backward activation */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnActivationBackward(cudnnHandle_t handle, + cudnnActivationDescriptor_t activationDesc, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* LRN cross-channel backward computation. Double parameters cast to tensor data type */ +cudnnStatus_t CUDNNWINAPI +cudnnLRNCrossChannelBackward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnLRNMode_t lrnMode, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +cudnnStatus_t CUDNNWINAPI +cudnnDivisiveNormalizationBackward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnDivNormMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, /* same desc for x, means, dy, temp, temp2 */ + const void *x, + const void *means, /* if NULL, means are assumed to be zero */ + const void *dy, + void *temp, + void *temp2, + const void *beta, + const cudnnTensorDescriptor_t dXdMeansDesc, /* same desc for dx, dMeans */ + void *dx, /* output x differential */ + void *dMeans); /* output means differential, can be NULL */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t zDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const cudnnActivationDescriptor_t activationDesc, + size_t *sizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetBatchNormalizationBackwardExWorkspaceSize(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnTensorDescriptor_t dzDesc, + const cudnnTensorDescriptor_t dxDesc, + const cudnnTensorDescriptor_t dBnScaleBiasDesc, + const cudnnActivationDescriptor_t activationDesc, + size_t *sizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetBatchNormalizationTrainingExReserveSpaceSize(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t xDesc, + size_t *sizeInBytes); + +/* Computes y = BN(x). Also accumulates moving averages of mean and inverse variances */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardTraining( + cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + + /* Shared desc for the next 6 tensors in the argument list. + Data type to be set as follows: + type = (typeOf(x) == double) ? double : float + Dimensions for this descriptor depend on normalization mode + - Spatial Normalization : tensors are expected to have dims 1xCx1x1 + (normalization is performed across NxHxW) + - Per-Activation Normalization : tensors are expected to have dims of 1xCxHxW + (normalization is performed across N) */ + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + + /* 'Gamma' and 'Beta' respectively in Ioffe and Szegedy's paper's notation */ + const void *bnScale, + const void *bnBias, + + /* MUST use factor=1 in the very first call of a complete training cycle. + Use a factor=1/(1+n) at N-th call to the function to get + Cumulative Moving Average (CMA) behavior + CMA[n] = (x[1]+...+x[n])/n + Since CMA[n+1] = (n*CMA[n]+x[n+1])/(n+1) = + ((n+1)*CMA[n]-CMA[n])/(n+1) + x[n+1]/(n+1) = + CMA[n]*(1-1/(n+1)) + x[n+1]*1/(n+1) */ + double exponentialAverageFactor, + + /* Used in Training phase only. + runningMean = newMean*factor + runningMean*(1-factor) */ + void *resultRunningMean, + /* Output in training mode, input in inference. Is the moving average + of variance[x] (factor is applied in the same way as for runningMean) */ + void *resultRunningVariance, + + /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */ + double epsilon, + + /* Optionally save intermediate results from the forward pass here + - can be reused to speed up backward pass. NULL if unused */ + void *resultSaveMean, + void *resultSaveInvVariance); + +/* Computes y = relu(BN(x) + z). Also accumulates moving averages of mean and inverse variances */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardTrainingEx( + cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t zDesc, + const void *zData, + const cudnnTensorDescriptor_t yDesc, + void *yData, + + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const void *bnScale, + const void *bnBias, + + double exponentialAverageFactor, + void *resultRunningMean, + void *resultRunningVariance, + + /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */ + double epsilon, + + /* Optionally save intermediate results from the forward pass here + - can be reused to speed up backward pass. NULL if unused */ + void *resultSaveMean, + void *resultSaveInvVariance, + + cudnnActivationDescriptor_t activationDesc, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* Performs backward pass of Batch Normalization layer. Returns x gradient, + * bnScale gradient and bnBias gradient */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationBackward(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + const void *alphaDataDiff, + const void *betaDataDiff, + const void *alphaParamDiff, + const void *betaParamDiff, + const cudnnTensorDescriptor_t xDesc, /* same desc for x, dx, dy */ + const void *x, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t dxDesc, + void *dx, + /* Shared tensor desc for the 4 tensors below */ + const cudnnTensorDescriptor_t dBnScaleBiasDesc, + const void *bnScale, /* bnBias doesn't affect backpropagation */ + /* scale and bias diff are not backpropagated below this layer */ + void *dBnScaleResult, + void *dBnBiasResult, + /* Same epsilon as forward pass */ + double epsilon, + + /* Optionally cached intermediate results from + forward pass */ + const void *savedMean, + const void *savedInvVariance); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationBackwardEx(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + + const void *alphaDataDiff, + const void *betaDataDiff, + const void *alphaParamDiff, + const void *betaParamDiff, + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t yDesc, + const void *yData, + const cudnnTensorDescriptor_t dyDesc, + const void *dyData, + const cudnnTensorDescriptor_t dzDesc, + void *dzData, + const cudnnTensorDescriptor_t dxDesc, + void *dxData, + + /* Shared tensor desc for the 4 tensors below */ + const cudnnTensorDescriptor_t dBnScaleBiasDesc, + const void *bnScaleData, + const void *bnBiasData, /* needed if there is activation */ + void *dBnScaleData, + void *dBnBiasData, + double epsilon, /* Same epsilon as forward pass */ + + /* Optionally cached intermediate results from + forward pass */ + const void *savedMean, + const void *savedInvVariance, + cudnnActivationDescriptor_t activationDesc, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetNormalizationForwardTrainingWorkspaceSize(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t zDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t normScaleBiasDesc, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t normMeanVarDesc, + size_t *sizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetNormalizationBackwardWorkspaceSize(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnTensorDescriptor_t dzDesc, + const cudnnTensorDescriptor_t dxDesc, + const cudnnTensorDescriptor_t dNormScaleBiasDesc, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t normMeanVarDesc, + size_t *sizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetNormalizationTrainingReserveSpaceSize(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t xDesc, + size_t *sizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +/* Computes y = relu(Norm(x) + z). Also accumulates moving averages of mean and inverse variances */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnNormalizationForwardTraining(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t normScaleBiasDesc, + const void *normScale, + const void *normBias, + double exponentialAverageFactor, + const cudnnTensorDescriptor_t normMeanVarDesc, + void *resultRunningMean, + void *resultRunningVariance, + /* Has to be >= 0. Should be the same in forward and backward functions. */ + double epsilon, + /* Optionally save intermediate results from the forward pass here + - can be reused to speed up backward pass. NULL if unused */ + void *resultSaveMean, + void *resultSaveInvVariance, + cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t zDesc, + const void *zData, + const cudnnTensorDescriptor_t yDesc, + void *yData, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnNormalizationBackward(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alphaDataDiff, + const void *betaDataDiff, + const void *alphaParamDiff, + const void *betaParamDiff, + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t yDesc, + const void *yData, + const cudnnTensorDescriptor_t dyDesc, + const void *dyData, + const cudnnTensorDescriptor_t dzDesc, + void *dzData, + const cudnnTensorDescriptor_t dxDesc, + void *dxData, + /* Shared tensor desc for the 4 tensors below */ + const cudnnTensorDescriptor_t dNormScaleBiasDesc, + const void *normScaleData, + const void *normBiasData, /* needed if there is activation */ + void *dNormScaleData, + void *dNormBiasData, + double epsilon, /* Same epsilon as forward pass */ + const cudnnTensorDescriptor_t normMeanVarDesc, + /* Optionally cached intermediate results from + forward pass */ + const void *savedMean, + const void *savedInvVariance, + cudnnActivationDescriptor_t activationDesc, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfGridGeneratorBackward(cudnnHandle_t handle, + const cudnnSpatialTransformerDescriptor_t stDesc, + const void *dgrid, + void *dtheta); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfSamplerBackward(cudnnHandle_t handle, + cudnnSpatialTransformerDescriptor_t stDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx, + const void *alphaDgrid, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const void *grid, + const void *betaDgrid, + void *dgrid); + +cudnnStatus_t CUDNNWINAPI +cudnnDropoutBackward(cudnnHandle_t handle, + const cudnnDropoutDescriptor_t dropoutDesc, + const cudnnTensorDescriptor_t dydesc, + const void *dy, + const cudnnTensorDescriptor_t dxdesc, + void *dx, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_OPS_H_ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_v9.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_v9.h new file mode 100644 index 0000000000000000000000000000000000000000..7e08847c95f1294bc99e96e737a53cc6ebb7a458 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_v9.h @@ -0,0 +1,68 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* cudnn : Neural Networks Library */ + +#if !defined(CUDNN_H_) +#define CUDNN_H_ +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include "cudnn_version.h" +#include "cudnn_graph.h" +#include "cudnn_ops.h" +#include "cudnn_adv.h" +#include "cudnn_cnn.h" + +#if defined(__cplusplus) +} +#endif +#endif /* CUDNN_H_ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_version.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_version.h new file mode 100644 index 0000000000000000000000000000000000000000..c3563e53e076713a782be170e0bebbc111fc7f6a --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_version.h @@ -0,0 +1,70 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/** + * \file: The master cuDNN version file. + */ + +#ifndef CUDNN_VERSION_H_ +#define CUDNN_VERSION_H_ + +#define CUDNN_MAJOR 9 +#define CUDNN_MINOR 5 +#define CUDNN_PATCHLEVEL 1 + +#define CUDNN_VERSION (CUDNN_MAJOR * 10000 + CUDNN_MINOR * 100 + CUDNN_PATCHLEVEL) + +/* cannot use constexpr here since this is a C-only file */ +/* Below is the max SM version this cuDNN library is aware of and supports natively */ + +#define CUDNN_MAX_SM_MAJOR_NUMBER 9 +#define CUDNN_MAX_SM_MINOR_NUMBER 0 +#define CUDNN_MAX_DEVICE_VERSION (CUDNN_MAX_SM_MAJOR_NUMBER * 100 + CUDNN_MAX_SM_MINOR_NUMBER * 10) + +#endif /* CUDNN_VERSION_H */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_version_v9.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_version_v9.h new file mode 100644 index 0000000000000000000000000000000000000000..c3563e53e076713a782be170e0bebbc111fc7f6a --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/include/cudnn_version_v9.h @@ -0,0 +1,70 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/** + * \file: The master cuDNN version file. + */ + +#ifndef CUDNN_VERSION_H_ +#define CUDNN_VERSION_H_ + +#define CUDNN_MAJOR 9 +#define CUDNN_MINOR 5 +#define CUDNN_PATCHLEVEL 1 + +#define CUDNN_VERSION (CUDNN_MAJOR * 10000 + CUDNN_MINOR * 100 + CUDNN_PATCHLEVEL) + +/* cannot use constexpr here since this is a C-only file */ +/* Below is the max SM version this cuDNN library is aware of and supports natively */ + +#define CUDNN_MAX_SM_MAJOR_NUMBER 9 +#define CUDNN_MAX_SM_MINOR_NUMBER 0 +#define CUDNN_MAX_DEVICE_VERSION (CUDNN_MAX_SM_MAJOR_NUMBER * 100 + CUDNN_MAX_SM_MINOR_NUMBER * 10) + +#endif /* CUDNN_VERSION_H */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/__init__.py b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e951c5cb08d882be151830c9e59d91c8a996f1cf Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn.so.9 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn.so.9 new file mode 100644 index 0000000000000000000000000000000000000000..a594f8b48e1f7909e7c287e580de956aeaea3d2c --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn.so.9 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf06ffbd2c4343c3569c269adb48f280778e7105ce15037cdf044042b3c5e134 +size 125136 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_adv.so.9 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_adv.so.9 new file mode 100644 index 0000000000000000000000000000000000000000..66a27d13670d835a5e0d68c1657384fbd9be47d4 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_adv.so.9 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ad851859f34b6da23d9ca3dcf37b382e45d058b7ec7b4db9c0d7467335d6e7a +size 245380760 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_cnn.so.9 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_cnn.so.9 new file mode 100644 index 0000000000000000000000000000000000000000..6a7f0946cb3a6a98eb682d6918480eb54722b9ff --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_cnn.so.9 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3619ef593da05b037d7b6e65c25124cb25f1ed27573e0e245ed6052dd7bebd88 +size 6301232 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_engines_precompiled.so.9 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_engines_precompiled.so.9 new file mode 100644 index 0000000000000000000000000000000000000000..854ed8710c80ed235214dc5a76fc9f6a3578e3ce --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_engines_precompiled.so.9 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f217312a2bb4cd72677f7a0cceb5381a2f04217f6be222fefeca48ffc3a9b6d8 +size 438317848 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_engines_runtime_compiled.so.9 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_engines_runtime_compiled.so.9 new file mode 100644 index 0000000000000000000000000000000000000000..f4d240953f24395b515a2103f5167b463c358ddd --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_engines_runtime_compiled.so.9 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51d9b6859335a2912a892de019f507d62ab3fb479b0b89d1ede107ba84c8ee12 +size 20983544 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_graph.so.9 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_graph.so.9 new file mode 100644 index 0000000000000000000000000000000000000000..5b9ec9f92b0bb8fb7dc70d2b24060066e39d11da --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_graph.so.9 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfc112cee23d146560354564f5b8c9571706aaef89be88137b485792fbcf4cbe +size 4358576 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_heuristic.so.9 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_heuristic.so.9 new file mode 100644 index 0000000000000000000000000000000000000000..8061090c706e5b56b526d23be4392c0bd64af316 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_heuristic.so.9 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a47fb0a4b059d84b6278a87f873fb84125296bfaba792a8f036e16825ff3a469 +size 50335360 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_ops.so.9 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_ops.so.9 new file mode 100644 index 0000000000000000000000000000000000000000..0c197cd49ba4aaaa71699b4bdf88cbdad4e6c172 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_ops.so.9 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e71bbec93faf11117d6e658e702e4631b889b72235e607452ee4e64b97667d6 +size 109061640 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd2c4e0854901dac8fac16fafb0207bb2fa33952 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/__init__.py b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a47cbcf5b8b1efecb056e132669495e3cbbedc40 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/cudalibxt.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/cudalibxt.h new file mode 100644 index 0000000000000000000000000000000000000000..94fcf4745fafa04f57678ba5ee64103f8ebd6444 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/cudalibxt.h @@ -0,0 +1,97 @@ + /* Copyright 2013,2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/*! +* \file cudalibxt.h +* \brief Public header file for the NVIDIA library multi-GPU support structures +*/ + +#ifndef _CUDA_LIB_XT_H_ +#define _CUDA_LIB_XT_H_ +#include + +#define CUDA_XT_DESCRIPTOR_VERSION 0x01000000 // This is added to CUDART_VERSION + +enum cudaXtCopyType_t { + LIB_XT_COPY_HOST_TO_DEVICE, + LIB_XT_COPY_DEVICE_TO_HOST, + LIB_XT_COPY_DEVICE_TO_DEVICE +} ; +typedef enum cudaXtCopyType_t cudaLibXtCopyType; + +enum libFormat_t { + LIB_FORMAT_CUFFT = 0x0, + LIB_FORMAT_UNDEFINED = 0x1 +}; + +typedef enum libFormat_t libFormat; + +#define MAX_CUDA_DESCRIPTOR_GPUS 64 + +struct cudaXtDesc_t{ + int version; //descriptor version + int nGPUs; //number of GPUs + int GPUs[MAX_CUDA_DESCRIPTOR_GPUS]; //array of device IDs + void *data[MAX_CUDA_DESCRIPTOR_GPUS]; //array of pointers to data, one per GPU + size_t size[MAX_CUDA_DESCRIPTOR_GPUS]; //array of data sizes, one per GPU + void *cudaXtState; //opaque CUDA utility structure +}; +typedef struct cudaXtDesc_t cudaXtDesc; + +struct cudaLibXtDesc_t{ + int version; //descriptor version + cudaXtDesc *descriptor; //multi-GPU memory descriptor + libFormat library; //which library recognizes the format + int subFormat; //library specific enumerator of sub formats + void *libDescriptor; //library specific descriptor e.g. FFT transform plan object +}; +typedef struct cudaLibXtDesc_t cudaLibXtDesc; + + +#endif + diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/cufft.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/cufft.h new file mode 100644 index 0000000000000000000000000000000000000000..2c3dbc4efc36a510117a1e9ed0973f7319d76ce9 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/cufft.h @@ -0,0 +1,335 @@ + /* Copyright 2005-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/*! +* \file cufft.h +* \brief Public header file for the NVIDIA CUDA FFT library (CUFFT) +*/ + +#ifndef _CUFFT_H_ +#define _CUFFT_H_ + + +#include "cuComplex.h" +#include "driver_types.h" +#include "library_types.h" + +#ifndef CUFFTAPI +#ifdef _WIN32 +#define CUFFTAPI __stdcall +#elif __GNUC__ >= 4 +#define CUFFTAPI __attribute__ ((visibility ("default"))) +#else +#define CUFFTAPI +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#define CUFFT_VER_MAJOR 11 +#define CUFFT_VER_MINOR 3 +#define CUFFT_VER_PATCH 0 +#define CUFFT_VER_BUILD 4 + +#define CUFFT_VERSION 11300 + +// CUFFT API function return values +typedef enum cufftResult_t { + CUFFT_SUCCESS = 0x0, + CUFFT_INVALID_PLAN = 0x1, + CUFFT_ALLOC_FAILED = 0x2, + CUFFT_INVALID_TYPE = 0x3, + CUFFT_INVALID_VALUE = 0x4, + CUFFT_INTERNAL_ERROR = 0x5, + CUFFT_EXEC_FAILED = 0x6, + CUFFT_SETUP_FAILED = 0x7, + CUFFT_INVALID_SIZE = 0x8, + CUFFT_UNALIGNED_DATA = 0x9, + CUFFT_INCOMPLETE_PARAMETER_LIST = 0xA, + CUFFT_INVALID_DEVICE = 0xB, + CUFFT_PARSE_ERROR = 0xC, + CUFFT_NO_WORKSPACE = 0xD, + CUFFT_NOT_IMPLEMENTED = 0xE, + CUFFT_LICENSE_ERROR = 0x0F, + CUFFT_NOT_SUPPORTED = 0x10 + +} cufftResult; + +#define MAX_CUFFT_ERROR 0x11 + + +// CUFFT defines and supports the following data types + + +// cufftReal is a single-precision, floating-point real data type. +// cufftDoubleReal is a double-precision, real data type. +typedef float cufftReal; +typedef double cufftDoubleReal; + +// cufftComplex is a single-precision, floating-point complex data type that +// consists of interleaved real and imaginary components. +// cufftDoubleComplex is the double-precision equivalent. +typedef cuComplex cufftComplex; +typedef cuDoubleComplex cufftDoubleComplex; + +// CUFFT transform directions +#define CUFFT_FORWARD -1 // Forward FFT +#define CUFFT_INVERSE 1 // Inverse FFT + +// CUFFT supports the following transform types +typedef enum cufftType_t { + CUFFT_R2C = 0x2a, // Real to Complex (interleaved) + CUFFT_C2R = 0x2c, // Complex (interleaved) to Real + CUFFT_C2C = 0x29, // Complex to Complex, interleaved + CUFFT_D2Z = 0x6a, // Double to Double-Complex + CUFFT_Z2D = 0x6c, // Double-Complex to Double + CUFFT_Z2Z = 0x69 // Double-Complex to Double-Complex +} cufftType; + +// CUFFT supports the following data layouts +typedef enum cufftCompatibility_t { + CUFFT_COMPATIBILITY_FFTW_PADDING = 0x01 // The default value +} cufftCompatibility; + +#define CUFFT_COMPATIBILITY_DEFAULT CUFFT_COMPATIBILITY_FFTW_PADDING + +// +// structure definition used by the shim between old and new APIs +// +#define MAX_SHIM_RANK 3 + +// cufftHandle is a handle type used to store and access CUFFT plans. +typedef int cufftHandle; + + +cufftResult CUFFTAPI cufftPlan1d(cufftHandle *plan, + int nx, + cufftType type, + int batch); + +cufftResult CUFFTAPI cufftPlan2d(cufftHandle *plan, + int nx, int ny, + cufftType type); + +cufftResult CUFFTAPI cufftPlan3d(cufftHandle *plan, + int nx, int ny, int nz, + cufftType type); + +cufftResult CUFFTAPI cufftPlanMany(cufftHandle *plan, + int rank, + int *n, + int *inembed, int istride, int idist, + int *onembed, int ostride, int odist, + cufftType type, + int batch); + +cufftResult CUFFTAPI cufftMakePlan1d(cufftHandle plan, + int nx, + cufftType type, + int batch, + size_t *workSize); + +cufftResult CUFFTAPI cufftMakePlan2d(cufftHandle plan, + int nx, int ny, + cufftType type, + size_t *workSize); + +cufftResult CUFFTAPI cufftMakePlan3d(cufftHandle plan, + int nx, int ny, int nz, + cufftType type, + size_t *workSize); + +cufftResult CUFFTAPI cufftMakePlanMany(cufftHandle plan, + int rank, + int *n, + int *inembed, int istride, int idist, + int *onembed, int ostride, int odist, + cufftType type, + int batch, + size_t *workSize); + +cufftResult CUFFTAPI cufftMakePlanMany64(cufftHandle plan, + int rank, + long long int *n, + long long int *inembed, + long long int istride, + long long int idist, + long long int *onembed, + long long int ostride, long long int odist, + cufftType type, + long long int batch, + size_t * workSize); + +cufftResult CUFFTAPI cufftGetSizeMany64(cufftHandle plan, + int rank, + long long int *n, + long long int *inembed, + long long int istride, long long int idist, + long long int *onembed, + long long int ostride, long long int odist, + cufftType type, + long long int batch, + size_t *workSize); + + + + +cufftResult CUFFTAPI cufftEstimate1d(int nx, + cufftType type, + int batch, + size_t *workSize); + +cufftResult CUFFTAPI cufftEstimate2d(int nx, int ny, + cufftType type, + size_t *workSize); + +cufftResult CUFFTAPI cufftEstimate3d(int nx, int ny, int nz, + cufftType type, + size_t *workSize); + +cufftResult CUFFTAPI cufftEstimateMany(int rank, + int *n, + int *inembed, int istride, int idist, + int *onembed, int ostride, int odist, + cufftType type, + int batch, + size_t *workSize); + +cufftResult CUFFTAPI cufftCreate(cufftHandle * handle); + +cufftResult CUFFTAPI cufftGetSize1d(cufftHandle handle, + int nx, + cufftType type, + int batch, + size_t *workSize ); + +cufftResult CUFFTAPI cufftGetSize2d(cufftHandle handle, + int nx, int ny, + cufftType type, + size_t *workSize); + +cufftResult CUFFTAPI cufftGetSize3d(cufftHandle handle, + int nx, int ny, int nz, + cufftType type, + size_t *workSize); + +cufftResult CUFFTAPI cufftGetSizeMany(cufftHandle handle, + int rank, int *n, + int *inembed, int istride, int idist, + int *onembed, int ostride, int odist, + cufftType type, int batch, size_t *workArea); + +cufftResult CUFFTAPI cufftGetSize(cufftHandle handle, size_t *workSize); + +cufftResult CUFFTAPI cufftSetWorkArea(cufftHandle plan, void *workArea); + +cufftResult CUFFTAPI cufftSetAutoAllocation(cufftHandle plan, int autoAllocate); + +cufftResult CUFFTAPI cufftExecC2C(cufftHandle plan, + cufftComplex *idata, + cufftComplex *odata, + int direction); + +cufftResult CUFFTAPI cufftExecR2C(cufftHandle plan, + cufftReal *idata, + cufftComplex *odata); + +cufftResult CUFFTAPI cufftExecC2R(cufftHandle plan, + cufftComplex *idata, + cufftReal *odata); + +cufftResult CUFFTAPI cufftExecZ2Z(cufftHandle plan, + cufftDoubleComplex *idata, + cufftDoubleComplex *odata, + int direction); + +cufftResult CUFFTAPI cufftExecD2Z(cufftHandle plan, + cufftDoubleReal *idata, + cufftDoubleComplex *odata); + +cufftResult CUFFTAPI cufftExecZ2D(cufftHandle plan, + cufftDoubleComplex *idata, + cufftDoubleReal *odata); + + +// utility functions +cufftResult CUFFTAPI cufftSetStream(cufftHandle plan, + cudaStream_t stream); + +cufftResult CUFFTAPI cufftDestroy(cufftHandle plan); + +cufftResult CUFFTAPI cufftGetVersion(int *version); + +cufftResult CUFFTAPI cufftGetProperty(libraryPropertyType type, + int *value); + +// +// Set/Get PlanProperty APIs configures per-plan behavior +// +typedef enum cufftProperty_t { + NVFFT_PLAN_PROPERTY_INT64_PATIENT_JIT = 0x1, + NVFFT_PLAN_PROPERTY_INT64_MAX_NUM_HOST_THREADS = 0x2, +} cufftProperty; + +cufftResult CUFFTAPI cufftSetPlanPropertyInt64(cufftHandle plan, + cufftProperty property, + const long long int inputValueInt); + +cufftResult CUFFTAPI cufftGetPlanPropertyInt64(cufftHandle plan, + cufftProperty property, + long long int* returnPtrValue); + +cufftResult CUFFTAPI cufftResetPlanProperty(cufftHandle plan, cufftProperty property); + +#ifdef __cplusplus +} +#endif + +#endif /* _CUFFT_H_ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/cufftXt.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/cufftXt.h new file mode 100644 index 0000000000000000000000000000000000000000..cb587e061cd85e5cfc8d6cb7015f575778e45ec7 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/cufftXt.h @@ -0,0 +1,278 @@ + + /* Copyright 2005-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/*! +* \file cufftXt.h +* \brief Public header file for the NVIDIA CUDA FFT library (CUFFT) +*/ + +#ifndef _CUFFTXT_H_ +#define _CUFFTXT_H_ +#include "cudalibxt.h" +#include "cufft.h" + + +#ifndef CUFFTAPI +#ifdef _WIN32 +#define CUFFTAPI __stdcall +#else +#define CUFFTAPI +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +// +// cufftXtSubFormat identifies the data layout of +// a memory descriptor owned by cufft. +// note that multi GPU cufft does not yet support out-of-place transforms +// + +typedef enum cufftXtSubFormat_t { + CUFFT_XT_FORMAT_INPUT = 0x00, //by default input is in linear order across GPUs + CUFFT_XT_FORMAT_OUTPUT = 0x01, //by default output is in scrambled order depending on transform + CUFFT_XT_FORMAT_INPLACE = 0x02, //by default inplace is input order, which is linear across GPUs + CUFFT_XT_FORMAT_INPLACE_SHUFFLED = 0x03, //shuffled output order after execution of the transform + CUFFT_XT_FORMAT_1D_INPUT_SHUFFLED = 0x04, //shuffled input order prior to execution of 1D transforms + CUFFT_XT_FORMAT_DISTRIBUTED_INPUT = 0x05, + CUFFT_XT_FORMAT_DISTRIBUTED_OUTPUT = 0x06, + CUFFT_FORMAT_UNDEFINED = 0x07 +} cufftXtSubFormat; + +// +// cufftXtCopyType specifies the type of copy for cufftXtMemcpy +// +typedef enum cufftXtCopyType_t { + CUFFT_COPY_HOST_TO_DEVICE = 0x00, + CUFFT_COPY_DEVICE_TO_HOST = 0x01, + CUFFT_COPY_DEVICE_TO_DEVICE = 0x02, + CUFFT_COPY_UNDEFINED = 0x03 +} cufftXtCopyType; + +// +// cufftXtQueryType specifies the type of query for cufftXtQueryPlan +// +typedef enum cufftXtQueryType_t { + CUFFT_QUERY_1D_FACTORS = 0x00, + CUFFT_QUERY_UNDEFINED = 0x01 +} cufftXtQueryType; + +typedef struct cufftXt1dFactors_t { + long long int size; + long long int stringCount; + long long int stringLength; + long long int substringLength; + long long int factor1; + long long int factor2; + long long int stringMask; + long long int substringMask; + long long int factor1Mask; + long long int factor2Mask; + int stringShift; + int substringShift; + int factor1Shift; + int factor2Shift; +} cufftXt1dFactors; + +// +// cufftXtWorkAreaPolicy specifies policy for cufftXtSetWorkAreaPolicy +// +typedef enum cufftXtWorkAreaPolicy_t { + CUFFT_WORKAREA_MINIMAL = 0, /* maximum reduction */ + CUFFT_WORKAREA_USER = 1, /* use workSize parameter as limit */ + CUFFT_WORKAREA_PERFORMANCE = 2, /* default - 1x overhead or more, maximum performance */ +} cufftXtWorkAreaPolicy; + +// multi-GPU routines +cufftResult CUFFTAPI cufftXtSetGPUs(cufftHandle handle, int nGPUs, int *whichGPUs); + +cufftResult CUFFTAPI cufftXtMalloc(cufftHandle plan, + cudaLibXtDesc ** descriptor, + cufftXtSubFormat format); + +cufftResult CUFFTAPI cufftXtMemcpy(cufftHandle plan, + void *dstPointer, + void *srcPointer, + cufftXtCopyType type); + +cufftResult CUFFTAPI cufftXtFree(cudaLibXtDesc *descriptor); + +cufftResult CUFFTAPI cufftXtSetWorkArea(cufftHandle plan, void **workArea); + +cufftResult CUFFTAPI cufftXtExecDescriptorC2C(cufftHandle plan, + cudaLibXtDesc *input, + cudaLibXtDesc *output, + int direction); + +cufftResult CUFFTAPI cufftXtExecDescriptorR2C(cufftHandle plan, + cudaLibXtDesc *input, + cudaLibXtDesc *output); + +cufftResult CUFFTAPI cufftXtExecDescriptorC2R(cufftHandle plan, + cudaLibXtDesc *input, + cudaLibXtDesc *output); + +cufftResult CUFFTAPI cufftXtExecDescriptorZ2Z(cufftHandle plan, + cudaLibXtDesc *input, + cudaLibXtDesc *output, + int direction); + +cufftResult CUFFTAPI cufftXtExecDescriptorD2Z(cufftHandle plan, + cudaLibXtDesc *input, + cudaLibXtDesc *output); + +cufftResult CUFFTAPI cufftXtExecDescriptorZ2D(cufftHandle plan, + cudaLibXtDesc *input, + cudaLibXtDesc *output); + +// Utility functions + +cufftResult CUFFTAPI cufftXtQueryPlan(cufftHandle plan, void *queryStruct, cufftXtQueryType queryType); + + +// callbacks + + +typedef enum cufftXtCallbackType_t { + CUFFT_CB_LD_COMPLEX = 0x0, + CUFFT_CB_LD_COMPLEX_DOUBLE = 0x1, + CUFFT_CB_LD_REAL = 0x2, + CUFFT_CB_LD_REAL_DOUBLE = 0x3, + CUFFT_CB_ST_COMPLEX = 0x4, + CUFFT_CB_ST_COMPLEX_DOUBLE = 0x5, + CUFFT_CB_ST_REAL = 0x6, + CUFFT_CB_ST_REAL_DOUBLE = 0x7, + CUFFT_CB_UNDEFINED = 0x8 + +} cufftXtCallbackType; + +// Legacy callbacks +typedef cufftComplex (*cufftCallbackLoadC)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer); +typedef cufftDoubleComplex (*cufftCallbackLoadZ)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer); +typedef cufftReal (*cufftCallbackLoadR)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer); +typedef cufftDoubleReal(*cufftCallbackLoadD)(void *dataIn, size_t offset, void *callerInfo, void *sharedPointer); + +typedef void (*cufftCallbackStoreC)(void *dataOut, size_t offset, cufftComplex element, void *callerInfo, void *sharedPointer); +typedef void (*cufftCallbackStoreZ)(void *dataOut, size_t offset, cufftDoubleComplex element, void *callerInfo, void *sharedPointer); +typedef void (*cufftCallbackStoreR)(void *dataOut, size_t offset, cufftReal element, void *callerInfo, void *sharedPointer); +typedef void (*cufftCallbackStoreD)(void *dataOut, size_t offset, cufftDoubleReal element, void *callerInfo, void *sharedPointer); + +// LTO callbacks (notice the different offset type vs. legacy callbacks) +typedef cufftComplex (*cufftJITCallbackLoadC)(void *dataIn, unsigned long long offset, void *callerInfo, void *sharedPointer); +typedef cufftDoubleComplex (*cufftJITCallbackLoadZ)(void *dataIn, unsigned long long offset, void *callerInfo, void *sharedPointer); +typedef cufftReal (*cufftJITCallbackLoadR)(void *dataIn, unsigned long long offset, void *callerInfo, void *sharedPointer); +typedef cufftDoubleReal(*cufftJITCallbackLoadD)(void *dataIn, unsigned long long offset, void *callerInfo, void *sharedPointer); + +typedef void (*cufftJITCallbackStoreC)(void *dataOut, unsigned long long offset, cufftComplex element, void *callerInfo, void *sharedPointer); +typedef void (*cufftJITCallbackStoreZ)(void *dataOut, unsigned long long offset, cufftDoubleComplex element, void *callerInfo, void *sharedPointer); +typedef void (*cufftJITCallbackStoreR)(void *dataOut, unsigned long long offset, cufftReal element, void *callerInfo, void *sharedPointer); +typedef void (*cufftJITCallbackStoreD)(void *dataOut, unsigned long long offset, cufftDoubleReal element, void *callerInfo, void *sharedPointer); + +cufftResult CUFFTAPI cufftXtSetCallback(cufftHandle plan, void **callback_routine, cufftXtCallbackType cbType, void **caller_info); +cufftResult CUFFTAPI cufftXtClearCallback(cufftHandle plan, cufftXtCallbackType cbType); +cufftResult CUFFTAPI cufftXtSetCallbackSharedSize(cufftHandle plan, cufftXtCallbackType cbType, size_t sharedSize); + +cufftResult CUFFTAPI __cufftXtSetJITCallback_12_7(cufftHandle plan, const char* lto_callback_symbol_name, const void *lto_callback_fatbin, size_t lto_callback_fatbin_size, cufftXtCallbackType type, void **caller_info ); + +#ifndef CUFFT_NO_INLINE +static inline cufftResult cufftXtSetJITCallback(cufftHandle plan, const char* lto_callback_symbol_name, const void *lto_callback_fatbin, size_t lto_callback_fatbin_size, cufftXtCallbackType type, void **caller_info) { + return __cufftXtSetJITCallback_12_7(plan, lto_callback_symbol_name, lto_callback_fatbin, lto_callback_fatbin_size, type, caller_info); +}; +#endif + +cufftResult CUFFTAPI cufftXtMakePlanMany(cufftHandle plan, + int rank, + long long int *n, + long long int *inembed, + long long int istride, + long long int idist, + cudaDataType inputtype, + long long int *onembed, + long long int ostride, + long long int odist, + cudaDataType outputtype, + long long int batch, + size_t *workSize, + cudaDataType executiontype); + +cufftResult CUFFTAPI cufftXtGetSizeMany(cufftHandle plan, + int rank, + long long int *n, + long long int *inembed, + long long int istride, + long long int idist, + cudaDataType inputtype, + long long int *onembed, + long long int ostride, + long long int odist, + cudaDataType outputtype, + long long int batch, + size_t *workSize, + cudaDataType executiontype); + + +cufftResult CUFFTAPI cufftXtExec(cufftHandle plan, + void *input, + void *output, + int direction); + +cufftResult CUFFTAPI cufftXtExecDescriptor(cufftHandle plan, + cudaLibXtDesc *input, + cudaLibXtDesc *output, + int direction); + +cufftResult CUFFTAPI cufftXtSetWorkAreaPolicy(cufftHandle plan, cufftXtWorkAreaPolicy policy, size_t *workSize); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/cufftw.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/cufftw.h new file mode 100644 index 0000000000000000000000000000000000000000..dcf72370f777bc0aaa76dddae9a34efd667a5922 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/include/cufftw.h @@ -0,0 +1,465 @@ + + /* Copyright 2005-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/*! +* \file cufftw.h +* \brief Public header file for the NVIDIA CUDA FFTW library (CUFFTW) +*/ + +#ifndef _CUFFTW_H_ +#define _CUFFTW_H_ + + +#include +#include "cufft.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Transform direction +#define FFTW_FORWARD -1 +#define FFTW_INVERSE 1 +#define FFTW_BACKWARD 1 + +// Planner flags +#define FFTW_ESTIMATE 0x01 +#define FFTW_MEASURE 0x02 +#define FFTW_PATIENT 0x03 +#define FFTW_EXHAUSTIVE 0x04 +#define FFTW_WISDOM_ONLY 0x05 + +// Algorithm restriction flags +#define FFTW_DESTROY_INPUT 0x08 +#define FFTW_PRESERVE_INPUT 0x0C +#define FFTW_UNALIGNED 0x10 + +// CUFFTW defines and supports the following data types + +// note if complex.h has been included we use the C99 complex types +#if !defined(FFTW_NO_Complex) && defined(_Complex_I) && defined (complex) + typedef double _Complex fftw_complex; + typedef float _Complex fftwf_complex; +#else + typedef double fftw_complex[2]; + typedef float fftwf_complex[2]; +#endif + +typedef void *fftw_plan; + +typedef void *fftwf_plan; + +typedef struct { + int n; + int is; + int os; +} fftw_iodim; + +typedef fftw_iodim fftwf_iodim; + +typedef struct { + ptrdiff_t n; + ptrdiff_t is; + ptrdiff_t os; +} fftw_iodim64; + +typedef fftw_iodim64 fftwf_iodim64; + +// CUFFTW defines and supports the following double precision APIs + +fftw_plan CUFFTAPI fftw_plan_dft_1d(int n, + fftw_complex *in, + fftw_complex *out, + int sign, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_2d(int n0, + int n1, + fftw_complex *in, + fftw_complex *out, + int sign, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_3d(int n0, + int n1, + int n2, + fftw_complex *in, + fftw_complex *out, + int sign, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft(int rank, + const int *n, + fftw_complex *in, + fftw_complex *out, + int sign, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_r2c_1d(int n, + double *in, + fftw_complex *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_r2c_2d(int n0, + int n1, + double *in, + fftw_complex *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_r2c_3d(int n0, + int n1, + int n2, + double *in, + fftw_complex *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_r2c(int rank, + const int *n, + double *in, + fftw_complex *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_c2r_1d(int n, + fftw_complex *in, + double *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_c2r_2d(int n0, + int n1, + fftw_complex *in, + double *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_c2r_3d(int n0, + int n1, + int n2, + fftw_complex *in, + double *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_dft_c2r(int rank, + const int *n, + fftw_complex *in, + double *out, + unsigned flags); + + +fftw_plan CUFFTAPI fftw_plan_many_dft(int rank, + const int *n, + int batch, + fftw_complex *in, + const int *inembed, int istride, int idist, + fftw_complex *out, + const int *onembed, int ostride, int odist, + int sign, unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_many_dft_r2c(int rank, + const int *n, + int batch, + double *in, + const int *inembed, int istride, int idist, + fftw_complex *out, + const int *onembed, int ostride, int odist, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_many_dft_c2r(int rank, + const int *n, + int batch, + fftw_complex *in, + const int *inembed, int istride, int idist, + double *out, + const int *onembed, int ostride, int odist, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_guru_dft(int rank, const fftw_iodim *dims, + int batch_rank, const fftw_iodim *batch_dims, + fftw_complex *in, fftw_complex *out, + int sign, unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_guru_dft_r2c(int rank, const fftw_iodim *dims, + int batch_rank, const fftw_iodim *batch_dims, + double *in, fftw_complex *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_guru_dft_c2r(int rank, const fftw_iodim *dims, + int batch_rank, const fftw_iodim *batch_dims, + fftw_complex *in, double *out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_guru64_dft(int rank, const fftw_iodim64* dims, + int batch_rank, const fftw_iodim64* batch_dims, + fftw_complex* in, fftw_complex* out, + int sign, unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_guru64_dft_r2c(int rank, const fftw_iodim64* dims, + int batch_rank, const fftw_iodim64* batch_dims, + double* in, fftw_complex* out, + unsigned flags); + +fftw_plan CUFFTAPI fftw_plan_guru64_dft_c2r(int rank, const fftw_iodim64* dims, + int batch_rank, const fftw_iodim64* batch_dims, + fftw_complex* in, double* out, + unsigned flags); + +void CUFFTAPI fftw_execute(const fftw_plan plan); + +void CUFFTAPI fftw_execute_dft(const fftw_plan plan, + fftw_complex *idata, + fftw_complex *odata); + +void CUFFTAPI fftw_execute_dft_r2c(const fftw_plan plan, + double *idata, + fftw_complex *odata); + +void CUFFTAPI fftw_execute_dft_c2r(const fftw_plan plan, + fftw_complex *idata, + double *odata); + +// CUFFTW defines and supports the following single precision APIs + +fftwf_plan CUFFTAPI fftwf_plan_dft_1d(int n, + fftwf_complex *in, + fftwf_complex *out, + int sign, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_2d(int n0, + int n1, + fftwf_complex *in, + fftwf_complex *out, + int sign, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_3d(int n0, + int n1, + int n2, + fftwf_complex *in, + fftwf_complex *out, + int sign, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft(int rank, + const int *n, + fftwf_complex *in, + fftwf_complex *out, + int sign, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_r2c_1d(int n, + float *in, + fftwf_complex *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_r2c_2d(int n0, + int n1, + float *in, + fftwf_complex *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_r2c_3d(int n0, + int n1, + int n2, + float *in, + fftwf_complex *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_r2c(int rank, + const int *n, + float *in, + fftwf_complex *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_c2r_1d(int n, + fftwf_complex *in, + float *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_c2r_2d(int n0, + int n1, + fftwf_complex *in, + float *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_c2r_3d(int n0, + int n1, + int n2, + fftwf_complex *in, + float *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_dft_c2r(int rank, + const int *n, + fftwf_complex *in, + float *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_many_dft(int rank, + const int *n, + int batch, + fftwf_complex *in, + const int *inembed, int istride, int idist, + fftwf_complex *out, + const int *onembed, int ostride, int odist, + int sign, unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_many_dft_r2c(int rank, + const int *n, + int batch, + float *in, + const int *inembed, int istride, int idist, + fftwf_complex *out, + const int *onembed, int ostride, int odist, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_many_dft_c2r(int rank, + const int *n, + int batch, + fftwf_complex *in, + const int *inembed, int istride, int idist, + float *out, + const int *onembed, int ostride, int odist, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_guru_dft(int rank, const fftwf_iodim *dims, + int batch_rank, const fftwf_iodim *batch_dims, + fftwf_complex *in, fftwf_complex *out, + int sign, unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_guru_dft_r2c(int rank, const fftwf_iodim *dims, + int batch_rank, const fftwf_iodim *batch_dims, + float *in, fftwf_complex *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_guru_dft_c2r(int rank, const fftwf_iodim *dims, + int batch_rank, const fftwf_iodim *batch_dims, + fftwf_complex *in, float *out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_guru64_dft(int rank, const fftwf_iodim64* dims, + int batch_rank, const fftwf_iodim64* batch_dims, + fftwf_complex* in, fftwf_complex* out, + int sign, unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_guru64_dft_r2c(int rank, const fftwf_iodim64* dims, + int batch_rank, const fftwf_iodim64* batch_dims, + float* in, fftwf_complex* out, + unsigned flags); + +fftwf_plan CUFFTAPI fftwf_plan_guru64_dft_c2r(int rank, const fftwf_iodim64* dims, + int batch_rank, const fftwf_iodim64* batch_dims, + fftwf_complex* in, float* out, + unsigned flags); + +void CUFFTAPI fftwf_execute(const fftw_plan plan); + +void CUFFTAPI fftwf_execute_dft(const fftwf_plan plan, + fftwf_complex *idata, + fftwf_complex *odata); + +void CUFFTAPI fftwf_execute_dft_r2c(const fftwf_plan plan, + float *idata, + fftwf_complex *odata); + +void CUFFTAPI fftwf_execute_dft_c2r(const fftwf_plan plan, + fftwf_complex *idata, + float *odata); + +#ifdef _WIN32 +#define _CUFFTAPI(T) T CUFFTAPI +#else +#define _CUFFTAPI(T) CUFFTAPI T +#endif + +// CUFFTW defines and supports the following support APIs + +_CUFFTAPI(void *) fftw_malloc(size_t n); + +_CUFFTAPI(void *) fftwf_malloc(size_t n); + +void CUFFTAPI fftw_free(void *pointer); + +void CUFFTAPI fftwf_free(void *pointer); + +void CUFFTAPI fftw_export_wisdom_to_file(FILE * output_file); + +void CUFFTAPI fftwf_export_wisdom_to_file(FILE * output_file); + +int CUFFTAPI fftw_import_wisdom_from_file(FILE * input_file); + +int CUFFTAPI fftwf_import_wisdom_from_file(FILE * input_file); + +void CUFFTAPI fftw_print_plan(const fftw_plan plan); + +void CUFFTAPI fftwf_print_plan(const fftwf_plan plan); + +void CUFFTAPI fftw_set_timelimit(double seconds); + +void CUFFTAPI fftwf_set_timelimit(double seconds); + +double CUFFTAPI fftw_cost(const fftw_plan plan); + +double CUFFTAPI fftwf_cost(const fftw_plan plan); + +void CUFFTAPI fftw_flops(const fftw_plan plan, double *add, double *mul, double *fma); + +void CUFFTAPI fftwf_flops(const fftw_plan plan, double *add, double *mul, double *fma); + +void CUFFTAPI fftw_destroy_plan(fftw_plan plan); + +void CUFFTAPI fftwf_destroy_plan(fftwf_plan plan); + +void CUFFTAPI fftw_cleanup(void); + +void CUFFTAPI fftwf_cleanup(void); + +#ifdef __cplusplus +} +#endif + +#endif /* _CUFFTW_H_ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/lib/__init__.py b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/lib/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/lib/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1949cdb0e7b64553089336a7b2f87b7e449dd35c Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/lib/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/lib/libcufft.so.11 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/lib/libcufft.so.11 new file mode 100644 index 0000000000000000000000000000000000000000..5de64fc22f400767ae803f2b07803116d63b2c5e --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/lib/libcufft.so.11 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e7f52f5eb42b28bac29943a1213d940bf4c59bf9f73027286474a7d8f7dc8a2 +size 278925008 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/lib/libcufftw.so.11 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/lib/libcufftw.so.11 new file mode 100644 index 0000000000000000000000000000000000000000..b60fb346da5e41c55cf387753ac8c8b467fd6dcf --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/lib/libcufftw.so.11 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17b23543f92f46c0b5f5c90d69be928f1b918d37dcecbf458abd2e23f4598032 +size 2101480 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b38a40fc137da652ab44cb887f59638b96c27a9 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/__init__.py b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..734a184bf8492615a8225daf845f009f93baff0b Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolverDn.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolverDn.h new file mode 100644 index 0000000000000000000000000000000000000000..fe212dee512540298db1cb71d196ca7b3208779c --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolverDn.h @@ -0,0 +1,5009 @@ +/* + * Copyright 2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* cuSolverDN : Dense Linear Algebra Library + +*/ + +#if !defined(CUSOLVERDN_H_) + #define CUSOLVERDN_H_ + +struct cusolverDnContext; +typedef struct cusolverDnContext *cusolverDnHandle_t; + +struct syevjInfo; +typedef struct syevjInfo *syevjInfo_t; + +struct gesvdjInfo; +typedef struct gesvdjInfo *gesvdjInfo_t; + +//------------------------------------------------------ +// opaque cusolverDnIRS structure for IRS solver +struct cusolverDnIRSParams; +typedef struct cusolverDnIRSParams *cusolverDnIRSParams_t; + +struct cusolverDnIRSInfos; +typedef struct cusolverDnIRSInfos *cusolverDnIRSInfos_t; +//------------------------------------------------------ + +struct cusolverDnParams; +typedef struct cusolverDnParams *cusolverDnParams_t; + +typedef enum { + CUSOLVERDN_GETRF = 0, + CUSOLVERDN_POTRF = 1 +} cusolverDnFunction_t; + +typedef enum { + CUSOLVER_DETERMINISTIC_RESULTS = 1, + CUSOLVER_ALLOW_NON_DETERMINISTIC_RESULTS = 2 +} cusolverDeterministicMode_t; + + #include + + #include "cuComplex.h" /* import complex data type */ + #include "cublas_v2.h" + #include "cusolver_common.h" + + /*******************************************************************************/ + #ifdef __cplusplus +extern "C" { + #endif + + cusolverStatus_t CUSOLVERAPI cusolverDnCreate(cusolverDnHandle_t *handle); + cusolverStatus_t CUSOLVERAPI cusolverDnDestroy(cusolverDnHandle_t handle); + cusolverStatus_t CUSOLVERAPI + cusolverDnSetStream(cusolverDnHandle_t handle, cudaStream_t streamId); + cusolverStatus_t CUSOLVERAPI + cusolverDnGetStream(cusolverDnHandle_t handle, cudaStream_t *streamId); + + //============================================================ + // Deterministic Mode + //============================================================ + cusolverStatus_t CUSOLVERAPI cusolverDnSetDeterministicMode(cusolverDnHandle_t + handle, cusolverDeterministicMode_t mode); + cusolverStatus_t CUSOLVERAPI cusolverDnGetDeterministicMode(cusolverDnHandle_t + handle, cusolverDeterministicMode_t* mode); + + //============================================================ + // IRS headers + //============================================================ + + // ============================================================================= + // IRS helper function API + // ============================================================================= + cusolverStatus_t CUSOLVERAPI + cusolverDnIRSParamsCreate(cusolverDnIRSParams_t *params_ptr); + + cusolverStatus_t CUSOLVERAPI + cusolverDnIRSParamsDestroy(cusolverDnIRSParams_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnIRSParamsSetRefinementSolver( + cusolverDnIRSParams_t params, + cusolverIRSRefinement_t refinement_solver); + + cusolverStatus_t CUSOLVERAPI cusolverDnIRSParamsSetSolverMainPrecision( + cusolverDnIRSParams_t params, + cusolverPrecType_t solver_main_precision); + + cusolverStatus_t CUSOLVERAPI cusolverDnIRSParamsSetSolverLowestPrecision( + cusolverDnIRSParams_t params, + cusolverPrecType_t solver_lowest_precision); + + cusolverStatus_t CUSOLVERAPI cusolverDnIRSParamsSetSolverPrecisions( + cusolverDnIRSParams_t params, + cusolverPrecType_t solver_main_precision, + cusolverPrecType_t solver_lowest_precision); + + cusolverStatus_t CUSOLVERAPI + cusolverDnIRSParamsSetTol(cusolverDnIRSParams_t params, double val); + + cusolverStatus_t CUSOLVERAPI + cusolverDnIRSParamsSetTolInner(cusolverDnIRSParams_t params, double val); + + cusolverStatus_t CUSOLVERAPI cusolverDnIRSParamsSetMaxIters( + cusolverDnIRSParams_t params, + cusolver_int_t maxiters); + + cusolverStatus_t CUSOLVERAPI cusolverDnIRSParamsSetMaxItersInner( + cusolverDnIRSParams_t params, + cusolver_int_t maxiters_inner); + + cusolverStatus_t CUSOLVERAPI cusolverDnIRSParamsGetMaxIters( + cusolverDnIRSParams_t params, + cusolver_int_t * maxiters); + + cusolverStatus_t CUSOLVERAPI + cusolverDnIRSParamsEnableFallback(cusolverDnIRSParams_t params); + + cusolverStatus_t CUSOLVERAPI + cusolverDnIRSParamsDisableFallback(cusolverDnIRSParams_t params); + + // ============================================================================= + // cusolverDnIRSInfos prototypes + // ============================================================================= + cusolverStatus_t CUSOLVERAPI + cusolverDnIRSInfosDestroy(cusolverDnIRSInfos_t infos); + + cusolverStatus_t CUSOLVERAPI + cusolverDnIRSInfosCreate(cusolverDnIRSInfos_t *infos_ptr); + + cusolverStatus_t CUSOLVERAPI cusolverDnIRSInfosGetNiters( + cusolverDnIRSInfos_t infos, + cusolver_int_t * niters); + + cusolverStatus_t CUSOLVERAPI cusolverDnIRSInfosGetOuterNiters( + cusolverDnIRSInfos_t infos, + cusolver_int_t * outer_niters); + + cusolverStatus_t CUSOLVERAPI + cusolverDnIRSInfosRequestResidual(cusolverDnIRSInfos_t infos); + + cusolverStatus_t CUSOLVERAPI cusolverDnIRSInfosGetResidualHistory( + cusolverDnIRSInfos_t infos, + void ** residual_history); + + cusolverStatus_t CUSOLVERAPI cusolverDnIRSInfosGetMaxIters( + cusolverDnIRSInfos_t infos, + cusolver_int_t * maxiters); + + //============================================================ + // IRS functions API + //============================================================ + + /*******************************************************************************/ /* + * [ZZ, ZC, ZK, ZE, ZY, CC, CK, CE, CY, DD, DS, DH, DB, DX, SS, SH, SB, SX]gesv + * users API Prototypes */ + /*******************************************************************************/ + cusolverStatus_t CUSOLVERAPI cusolverDnZZgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZCgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZKgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZEgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZYgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCCgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCEgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCKgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCYgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDDgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDSgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDHgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDBgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDXgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnSSgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnSHgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnSBgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnSXgesv( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + /*******************************************************************************/ + + /*******************************************************************************/ /* + * [ZZ, ZC, ZK, ZE, ZY, CC, CK, CE, CY, DD, DS, DH, DB, DX, SS, SH, SB, SX]gesv_bufferSize + * users API Prototypes */ + /*******************************************************************************/ + cusolverStatus_t CUSOLVERAPI cusolverDnZZgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnZCgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnZKgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnZEgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnZYgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnCCgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnCKgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnCEgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnCYgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnDDgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnDSgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnDHgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnDBgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnDXgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnSSgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnSHgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnSBgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnSXgesv_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + cusolver_int_t * dipiv, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + /*******************************************************************************/ + + /*******************************************************************************/ /* + * [ZZ, ZC, ZK, ZE, ZY, CC, CK, CE, CY, DD, DS, DH, DB, DX, SS, SH, SB, SX]gels + * users API Prototypes */ + /*******************************************************************************/ + cusolverStatus_t CUSOLVERAPI cusolverDnZZgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZCgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZKgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZEgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZYgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCCgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCKgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCEgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCYgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDDgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDSgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDHgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDBgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDXgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnSSgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnSHgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnSBgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnSXgels( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * iter, + cusolver_int_t * d_info); + /*******************************************************************************/ + + /*******************************************************************************/ /* + * [ZZ, ZC, ZK, ZE, ZY, CC, CK, CE, CY, DD, DS, DH, DB, DX, SS, SH, SB, SX]gels_bufferSize + * API prototypes */ + /*******************************************************************************/ + cusolverStatus_t CUSOLVERAPI cusolverDnZZgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnZCgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnZKgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnZEgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnZYgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuDoubleComplex * dA, + cusolver_int_t ldda, + cuDoubleComplex * dB, + cusolver_int_t lddb, + cuDoubleComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnCCgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnCKgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnCEgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnCYgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + cuComplex * dA, + cusolver_int_t ldda, + cuComplex * dB, + cusolver_int_t lddb, + cuComplex * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnDDgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnDSgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnDHgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnDBgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnDXgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + double * dA, + cusolver_int_t ldda, + double * dB, + cusolver_int_t lddb, + double * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnSSgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnSHgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnSBgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnSXgels_bufferSize( + cusolverDnHandle_t handle, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + float * dA, + cusolver_int_t ldda, + float * dB, + cusolver_int_t lddb, + float * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t * lwork_bytes); + /*******************************************************************************/ + + /*******************************************************************************/ /* + * expert users API for IRS Prototypes + * */ + /*******************************************************************************/ + cusolverStatus_t CUSOLVERAPI cusolverDnIRSXgesv( + cusolverDnHandle_t handle, + cusolverDnIRSParams_t gesv_irs_params, + cusolverDnIRSInfos_t gesv_irs_infos, + cusolver_int_t n, + cusolver_int_t nrhs, + void * dA, + cusolver_int_t ldda, + void * dB, + cusolver_int_t lddb, + void * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * niters, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnIRSXgesv_bufferSize( + cusolverDnHandle_t handle, + cusolverDnIRSParams_t params, + cusolver_int_t n, + cusolver_int_t nrhs, + size_t * lwork_bytes); + + cusolverStatus_t CUSOLVERAPI cusolverDnIRSXgels( + cusolverDnHandle_t handle, + cusolverDnIRSParams_t gels_irs_params, + cusolverDnIRSInfos_t gels_irs_infos, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + void * dA, + cusolver_int_t ldda, + void * dB, + cusolver_int_t lddb, + void * dX, + cusolver_int_t lddx, + void * dWorkspace, + size_t lwork_bytes, + cusolver_int_t * niters, + cusolver_int_t * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnIRSXgels_bufferSize( + cusolverDnHandle_t handle, + cusolverDnIRSParams_t params, + cusolver_int_t m, + cusolver_int_t n, + cusolver_int_t nrhs, + size_t * lwork_bytes); + /*******************************************************************************/ + + /* Cholesky factorization and its solver */ + cusolverStatus_t CUSOLVERAPI cusolverDnSpotrf_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + int * Lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDpotrf_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + int * Lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnCpotrf_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + int * Lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZpotrf_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + int * Lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSpotrf( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + float * Workspace, + int Lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnDpotrf( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + double * Workspace, + int Lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnCpotrf( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + cuComplex * Workspace, + int Lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnZpotrf( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + cuDoubleComplex * Workspace, + int Lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnSpotrs( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + int nrhs, + const float * A, + int lda, + float * B, + int ldb, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnDpotrs( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + int nrhs, + const double * A, + int lda, + double * B, + int ldb, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnCpotrs( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + int nrhs, + const cuComplex * A, + int lda, + cuComplex * B, + int ldb, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnZpotrs( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + int nrhs, + const cuDoubleComplex *A, + int lda, + cuDoubleComplex * B, + int ldb, + int * devInfo); + + /* batched Cholesky factorization and its solver */ + cusolverStatus_t CUSOLVERAPI cusolverDnSpotrfBatched( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + float * Aarray[], + int lda, + int * infoArray, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnDpotrfBatched( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + double * Aarray[], + int lda, + int * infoArray, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnCpotrfBatched( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuComplex * Aarray[], + int lda, + int * infoArray, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnZpotrfBatched( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * Aarray[], + int lda, + int * infoArray, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnSpotrsBatched( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + int nrhs, /* only support rhs = 1*/ + float * A[], + int lda, + float * B[], + int ldb, + int * d_info, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnDpotrsBatched( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + int nrhs, /* only support rhs = 1*/ + double * A[], + int lda, + double * B[], + int ldb, + int * d_info, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnCpotrsBatched( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + int nrhs, /* only support rhs = 1*/ + cuComplex * A[], + int lda, + cuComplex * B[], + int ldb, + int * d_info, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnZpotrsBatched( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + int nrhs, /* only support rhs = 1*/ + cuDoubleComplex * A[], + int lda, + cuDoubleComplex * B[], + int ldb, + int * d_info, + int batchSize); + + /* s.p.d. matrix inversion (POTRI) and auxiliary routines (TRTRI and LAUUM) */ + cusolverStatus_t CUSOLVERAPI cusolverDnSpotri_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDpotri_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnCpotri_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZpotri_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSpotri( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + float * work, + int lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnDpotri( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + double * work, + int lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnCpotri( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + cuComplex * work, + int lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnZpotri( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + cuDoubleComplex * work, + int lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnXtrtri_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + cublasDiagType_t diag, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + size_t * workspaceInBytesOnDevice, + size_t * workspaceInBytesOnHost); + + cusolverStatus_t CUSOLVERAPI cusolverDnXtrtri( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + cublasDiagType_t diag, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + void * bufferOnDevice, + size_t workspaceInBytesOnDevice, + void * bufferOnHost, + size_t workspaceInBytesOnHost, + int * devInfo); + + /* lauum, auxiliar routine for s.p.d matrix inversion */ + cusolverStatus_t CUSOLVERAPI cusolverDnSlauum_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDlauum_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnClauum_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZlauum_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSlauum( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + float * work, + int lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnDlauum( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + double * work, + int lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnClauum( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + cuComplex * work, + int lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnZlauum( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + cuDoubleComplex * work, + int lwork, + int * devInfo); + + /* LU Factorization */ + cusolverStatus_t CUSOLVERAPI cusolverDnSgetrf_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + float * A, + int lda, + int * Lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDgetrf_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + double * A, + int lda, + int * Lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnCgetrf_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + cuComplex * A, + int lda, + int * Lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZgetrf_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + cuDoubleComplex * A, + int lda, + int * Lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSgetrf( + cusolverDnHandle_t handle, + int m, + int n, + float * A, + int lda, + float * Workspace, + int * devIpiv, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnDgetrf( + cusolverDnHandle_t handle, + int m, + int n, + double * A, + int lda, + double * Workspace, + int * devIpiv, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnCgetrf( + cusolverDnHandle_t handle, + int m, + int n, + cuComplex * A, + int lda, + cuComplex * Workspace, + int * devIpiv, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnZgetrf( + cusolverDnHandle_t handle, + int m, + int n, + cuDoubleComplex * A, + int lda, + cuDoubleComplex * Workspace, + int * devIpiv, + int * devInfo); + + /* Row pivoting */ + cusolverStatus_t CUSOLVERAPI cusolverDnSlaswp( + cusolverDnHandle_t handle, + int n, + float * A, + int lda, + int k1, + int k2, + const int * devIpiv, + int incx); + + cusolverStatus_t CUSOLVERAPI cusolverDnDlaswp( + cusolverDnHandle_t handle, + int n, + double * A, + int lda, + int k1, + int k2, + const int * devIpiv, + int incx); + + cusolverStatus_t CUSOLVERAPI cusolverDnClaswp( + cusolverDnHandle_t handle, + int n, + cuComplex * A, + int lda, + int k1, + int k2, + const int * devIpiv, + int incx); + + cusolverStatus_t CUSOLVERAPI cusolverDnZlaswp( + cusolverDnHandle_t handle, + int n, + cuDoubleComplex * A, + int lda, + int k1, + int k2, + const int * devIpiv, + int incx); + + /* LU solve */ + cusolverStatus_t CUSOLVERAPI cusolverDnSgetrs( + cusolverDnHandle_t handle, + cublasOperation_t trans, + int n, + int nrhs, + const float * A, + int lda, + const int * devIpiv, + float * B, + int ldb, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnDgetrs( + cusolverDnHandle_t handle, + cublasOperation_t trans, + int n, + int nrhs, + const double * A, + int lda, + const int * devIpiv, + double * B, + int ldb, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnCgetrs( + cusolverDnHandle_t handle, + cublasOperation_t trans, + int n, + int nrhs, + const cuComplex * A, + int lda, + const int * devIpiv, + cuComplex * B, + int ldb, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnZgetrs( + cusolverDnHandle_t handle, + cublasOperation_t trans, + int n, + int nrhs, + const cuDoubleComplex *A, + int lda, + const int * devIpiv, + cuDoubleComplex * B, + int ldb, + int * devInfo); + + /* QR factorization */ + cusolverStatus_t CUSOLVERAPI cusolverDnSgeqrf_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + float * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDgeqrf_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + double * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnCgeqrf_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + cuComplex * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZgeqrf_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + cuDoubleComplex * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSgeqrf( + cusolverDnHandle_t handle, + int m, + int n, + float * A, + int lda, + float * TAU, + float * Workspace, + int Lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnDgeqrf( + cusolverDnHandle_t handle, + int m, + int n, + double * A, + int lda, + double * TAU, + double * Workspace, + int Lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnCgeqrf( + cusolverDnHandle_t handle, + int m, + int n, + cuComplex * A, + int lda, + cuComplex * TAU, + cuComplex * Workspace, + int Lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnZgeqrf( + cusolverDnHandle_t handle, + int m, + int n, + cuDoubleComplex * A, + int lda, + cuDoubleComplex * TAU, + cuDoubleComplex * Workspace, + int Lwork, + int * devInfo); + + /* generate unitary matrix Q from QR factorization */ + cusolverStatus_t CUSOLVERAPI cusolverDnSorgqr_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + int k, + const float * A, + int lda, + const float * tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDorgqr_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + int k, + const double * A, + int lda, + const double * tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnCungqr_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + int k, + const cuComplex * A, + int lda, + const cuComplex * tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZungqr_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + int k, + const cuDoubleComplex *A, + int lda, + const cuDoubleComplex *tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSorgqr( + cusolverDnHandle_t handle, + int m, + int n, + int k, + float * A, + int lda, + const float * tau, + float * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDorgqr( + cusolverDnHandle_t handle, + int m, + int n, + int k, + double * A, + int lda, + const double * tau, + double * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCungqr( + cusolverDnHandle_t handle, + int m, + int n, + int k, + cuComplex * A, + int lda, + const cuComplex * tau, + cuComplex * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZungqr( + cusolverDnHandle_t handle, + int m, + int n, + int k, + cuDoubleComplex * A, + int lda, + const cuDoubleComplex *tau, + cuDoubleComplex * work, + int lwork, + int * info); + + /* compute Q**T*b in solve min||A*x = b|| */ + cusolverStatus_t CUSOLVERAPI cusolverDnSormqr_bufferSize( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasOperation_t trans, + int m, + int n, + int k, + const float * A, + int lda, + const float * tau, + const float * C, + int ldc, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDormqr_bufferSize( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasOperation_t trans, + int m, + int n, + int k, + const double * A, + int lda, + const double * tau, + const double * C, + int ldc, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnCunmqr_bufferSize( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasOperation_t trans, + int m, + int n, + int k, + const cuComplex * A, + int lda, + const cuComplex * tau, + const cuComplex * C, + int ldc, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZunmqr_bufferSize( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasOperation_t trans, + int m, + int n, + int k, + const cuDoubleComplex *A, + int lda, + const cuDoubleComplex *tau, + const cuDoubleComplex *C, + int ldc, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSormqr( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasOperation_t trans, + int m, + int n, + int k, + const float * A, + int lda, + const float * tau, + float * C, + int ldc, + float * work, + int lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnDormqr( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasOperation_t trans, + int m, + int n, + int k, + const double * A, + int lda, + const double * tau, + double * C, + int ldc, + double * work, + int lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnCunmqr( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasOperation_t trans, + int m, + int n, + int k, + const cuComplex * A, + int lda, + const cuComplex * tau, + cuComplex * C, + int ldc, + cuComplex * work, + int lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnZunmqr( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasOperation_t trans, + int m, + int n, + int k, + const cuDoubleComplex *A, + int lda, + const cuDoubleComplex *tau, + cuDoubleComplex * C, + int ldc, + cuDoubleComplex * work, + int lwork, + int * devInfo); + + /* L*D*L**T,U*D*U**T factorization */ + cusolverStatus_t CUSOLVERAPI cusolverDnSsytrf_bufferSize( + cusolverDnHandle_t handle, + int n, + float * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsytrf_bufferSize( + cusolverDnHandle_t handle, + int n, + double * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnCsytrf_bufferSize( + cusolverDnHandle_t handle, + int n, + cuComplex * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZsytrf_bufferSize( + cusolverDnHandle_t handle, + int n, + cuDoubleComplex * A, + int lda, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSsytrf( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + int * ipiv, + float * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsytrf( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + int * ipiv, + double * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCsytrf( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + int * ipiv, + cuComplex * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZsytrf( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + int * ipiv, + cuDoubleComplex * work, + int lwork, + int * info); + + /* Symmetric indefinite solve (SYTRS) */ + cusolverStatus_t CUSOLVERAPI cusolverDnXsytrs_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + int64_t nrhs, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + const int64_t * ipiv, + cudaDataType dataTypeB, + void * B, + int64_t ldb, + size_t * workspaceInBytesOnDevice, + size_t * workspaceInBytesOnHost); + + cusolverStatus_t CUSOLVERAPI cusolverDnXsytrs( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int64_t n, + int64_t nrhs, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + const int64_t * ipiv, + cudaDataType dataTypeB, + void * B, + int64_t ldb, + void * bufferOnDevice, + size_t workspaceInBytesOnDevice, + void * bufferOnHost, + size_t workspaceInBytesOnHost, + int * info); + + /* Symmetric indefinite inversion (sytri) */ + cusolverStatus_t CUSOLVERAPI cusolverDnSsytri_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + const int * ipiv, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsytri_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + const int * ipiv, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnCsytri_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + const int * ipiv, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZsytri_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + const int * ipiv, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSsytri( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + const int * ipiv, + float * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsytri( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + const int * ipiv, + double * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCsytri( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + const int * ipiv, + cuComplex * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZsytri( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + const int * ipiv, + cuDoubleComplex * work, + int lwork, + int * info); + + /* bidiagonal factorization */ + cusolverStatus_t CUSOLVERAPI cusolverDnSgebrd_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + int * Lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDgebrd_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + int * Lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnCgebrd_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + int * Lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZgebrd_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + int * Lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSgebrd( + cusolverDnHandle_t handle, + int m, + int n, + float * A, + int lda, + float * D, + float * E, + float * TAUQ, + float * TAUP, + float * Work, + int Lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnDgebrd( + cusolverDnHandle_t handle, + int m, + int n, + double * A, + int lda, + double * D, + double * E, + double * TAUQ, + double * TAUP, + double * Work, + int Lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnCgebrd( + cusolverDnHandle_t handle, + int m, + int n, + cuComplex * A, + int lda, + float * D, + float * E, + cuComplex * TAUQ, + cuComplex * TAUP, + cuComplex * Work, + int Lwork, + int * devInfo); + + cusolverStatus_t CUSOLVERAPI cusolverDnZgebrd( + cusolverDnHandle_t handle, + int m, + int n, + cuDoubleComplex * A, + int lda, + double * D, + double * E, + cuDoubleComplex * TAUQ, + cuDoubleComplex * TAUP, + cuDoubleComplex * Work, + int Lwork, + int * devInfo); + + /* generates one of the unitary matrices Q or P**T determined by GEBRD*/ + cusolverStatus_t CUSOLVERAPI cusolverDnSorgbr_bufferSize( + cusolverDnHandle_t handle, + cublasSideMode_t side, + int m, + int n, + int k, + const float * A, + int lda, + const float * tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDorgbr_bufferSize( + cusolverDnHandle_t handle, + cublasSideMode_t side, + int m, + int n, + int k, + const double * A, + int lda, + const double * tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnCungbr_bufferSize( + cusolverDnHandle_t handle, + cublasSideMode_t side, + int m, + int n, + int k, + const cuComplex * A, + int lda, + const cuComplex * tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZungbr_bufferSize( + cusolverDnHandle_t handle, + cublasSideMode_t side, + int m, + int n, + int k, + const cuDoubleComplex *A, + int lda, + const cuDoubleComplex *tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSorgbr( + cusolverDnHandle_t handle, + cublasSideMode_t side, + int m, + int n, + int k, + float * A, + int lda, + const float * tau, + float * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDorgbr( + cusolverDnHandle_t handle, + cublasSideMode_t side, + int m, + int n, + int k, + double * A, + int lda, + const double * tau, + double * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCungbr( + cusolverDnHandle_t handle, + cublasSideMode_t side, + int m, + int n, + int k, + cuComplex * A, + int lda, + const cuComplex * tau, + cuComplex * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZungbr( + cusolverDnHandle_t handle, + cublasSideMode_t side, + int m, + int n, + int k, + cuDoubleComplex * A, + int lda, + const cuDoubleComplex *tau, + cuDoubleComplex * work, + int lwork, + int * info); + + /* tridiagonal factorization */ + cusolverStatus_t CUSOLVERAPI cusolverDnSsytrd_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + const float * A, + int lda, + const float * d, + const float * e, + const float * tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsytrd_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + const double * A, + int lda, + const double * d, + const double * e, + const double * tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnChetrd_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex * A, + int lda, + const float * d, + const float * e, + const cuComplex * tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZhetrd_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex *A, + int lda, + const double * d, + const double * e, + const cuDoubleComplex *tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSsytrd( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + float * d, + float * e, + float * tau, + float * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsytrd( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + double * d, + double * e, + double * tau, + double * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnChetrd( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + float * d, + float * e, + cuComplex * tau, + cuComplex * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZhetrd( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + double * d, + double * e, + cuDoubleComplex * tau, + cuDoubleComplex * work, + int lwork, + int * info); + + /* generate unitary Q comes from sytrd */ + cusolverStatus_t CUSOLVERAPI cusolverDnSorgtr_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + const float * A, + int lda, + const float * tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDorgtr_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + const double * A, + int lda, + const double * tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnCungtr_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuComplex * A, + int lda, + const cuComplex * tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZungtr_bufferSize( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex *A, + int lda, + const cuDoubleComplex *tau, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSorgtr( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + const float * tau, + float * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDorgtr( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + const double * tau, + double * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCungtr( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + const cuComplex * tau, + cuComplex * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZungtr( + cusolverDnHandle_t handle, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + const cuDoubleComplex *tau, + cuDoubleComplex * work, + int lwork, + int * info); + + /* compute op(Q)*C or C*op(Q) where Q comes from sytrd */ + cusolverStatus_t CUSOLVERAPI cusolverDnSormtr_bufferSize( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + int m, + int n, + const float * A, + int lda, + const float * tau, + const float * C, + int ldc, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDormtr_bufferSize( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + int m, + int n, + const double * A, + int lda, + const double * tau, + const double * C, + int ldc, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnCunmtr_bufferSize( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + int m, + int n, + const cuComplex * A, + int lda, + const cuComplex * tau, + const cuComplex * C, + int ldc, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZunmtr_bufferSize( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + int m, + int n, + const cuDoubleComplex *A, + int lda, + const cuDoubleComplex *tau, + const cuDoubleComplex *C, + int ldc, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSormtr( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + int m, + int n, + float * A, + int lda, + float * tau, + float * C, + int ldc, + float * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDormtr( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + int m, + int n, + double * A, + int lda, + double * tau, + double * C, + int ldc, + double * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCunmtr( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + int m, + int n, + cuComplex * A, + int lda, + cuComplex * tau, + cuComplex * C, + int ldc, + cuComplex * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZunmtr( + cusolverDnHandle_t handle, + cublasSideMode_t side, + cublasFillMode_t uplo, + cublasOperation_t trans, + int m, + int n, + cuDoubleComplex * A, + int lda, + cuDoubleComplex * tau, + cuDoubleComplex * C, + int ldc, + cuDoubleComplex * work, + int lwork, + int * info); + + /* singular value decomposition, A = U * Sigma * V^H */ + cusolverStatus_t CUSOLVERAPI cusolverDnSgesvd_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDgesvd_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnCgesvd_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZgesvd_bufferSize( + cusolverDnHandle_t handle, + int m, + int n, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSgesvd( + cusolverDnHandle_t handle, + signed char jobu, + signed char jobvt, + int m, + int n, + float * A, + int lda, + float * S, + float * U, + int ldu, + float * VT, + int ldvt, + float * work, + int lwork, + float * rwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDgesvd( + cusolverDnHandle_t handle, + signed char jobu, + signed char jobvt, + int m, + int n, + double * A, + int lda, + double * S, + double * U, + int ldu, + double * VT, + int ldvt, + double * work, + int lwork, + double * rwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCgesvd( + cusolverDnHandle_t handle, + signed char jobu, + signed char jobvt, + int m, + int n, + cuComplex * A, + int lda, + float * S, + cuComplex * U, + int ldu, + cuComplex * VT, + int ldvt, + cuComplex * work, + int lwork, + float * rwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZgesvd( + cusolverDnHandle_t handle, + signed char jobu, + signed char jobvt, + int m, + int n, + cuDoubleComplex * A, + int lda, + double * S, + cuDoubleComplex * U, + int ldu, + cuDoubleComplex * VT, + int ldvt, + cuDoubleComplex * work, + int lwork, + double * rwork, + int * info); + + /* standard symmetric eigenvalue solver, A*x = lambda*x, by divide-and-conquer + */ + cusolverStatus_t CUSOLVERAPI cusolverDnSsyevd_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const float * A, + int lda, + const float * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsyevd_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const double * A, + int lda, + const double * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnCheevd_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const cuComplex * A, + int lda, + const float * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZheevd_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex *A, + int lda, + const double * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSsyevd( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + float * W, + float * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsyevd( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + double * W, + double * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCheevd( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + float * W, + cuComplex * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZheevd( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + double * W, + cuDoubleComplex * work, + int lwork, + int * info); + + /* standard selective symmetric eigenvalue solver, A*x = lambda*x, by + * divide-and-conquer */ + cusolverStatus_t CUSOLVERAPI cusolverDnSsyevdx_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + const float * A, + int lda, + float vl, + float vu, + int il, + int iu, + int * meig, + const float * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsyevdx_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + const double * A, + int lda, + double vl, + double vu, + int il, + int iu, + int * meig, + const double * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnCheevdx_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + const cuComplex * A, + int lda, + float vl, + float vu, + int il, + int iu, + int * meig, + const float * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZheevdx_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex *A, + int lda, + double vl, + double vu, + int il, + int iu, + int * meig, + const double * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSsyevdx( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + float vl, + float vu, + int il, + int iu, + int * meig, + float * W, + float * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsyevdx( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + double vl, + double vu, + int il, + int iu, + int * meig, + double * W, + double * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCheevdx( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + float vl, + float vu, + int il, + int iu, + int * meig, + float * W, + cuComplex * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZheevdx( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + double vl, + double vu, + int il, + int iu, + int * meig, + double * W, + cuDoubleComplex * work, + int lwork, + int * info); + + /* selective generalized symmetric eigenvalue solver, A*x = lambda*B*x, by + * divide-and-conquer */ + cusolverStatus_t CUSOLVERAPI cusolverDnSsygvdx_bufferSize( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + const float * A, + int lda, + const float * B, + int ldb, + float vl, + float vu, + int il, + int iu, + int * meig, + const float * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsygvdx_bufferSize( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + const double * A, + int lda, + const double * B, + int ldb, + double vl, + double vu, + int il, + int iu, + int * meig, + const double * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnChegvdx_bufferSize( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + const cuComplex * A, + int lda, + const cuComplex * B, + int ldb, + float vl, + float vu, + int il, + int iu, + int * meig, + const float * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZhegvdx_bufferSize( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex *A, + int lda, + const cuDoubleComplex *B, + int ldb, + double vl, + double vu, + int il, + int iu, + int * meig, + const double * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSsygvdx( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + float * B, + int ldb, + float vl, + float vu, + int il, + int iu, + int * meig, + float * W, + float * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsygvdx( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + double * B, + int ldb, + double vl, + double vu, + int il, + int iu, + int * meig, + double * W, + double * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnChegvdx( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + cuComplex * B, + int ldb, + float vl, + float vu, + int il, + int iu, + int * meig, + float * W, + cuComplex * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZhegvdx( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + cuDoubleComplex * B, + int ldb, + double vl, + double vu, + int il, + int iu, + int * meig, + double * W, + cuDoubleComplex * work, + int lwork, + int * info); + + /* generalized symmetric eigenvalue solver, A*x = lambda*B*x, by + * divide-and-conquer */ + cusolverStatus_t CUSOLVERAPI cusolverDnSsygvd_bufferSize( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const float * A, + int lda, + const float * B, + int ldb, + const float * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsygvd_bufferSize( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const double * A, + int lda, + const double * B, + int ldb, + const double * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnChegvd_bufferSize( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const cuComplex * A, + int lda, + const cuComplex * B, + int ldb, + const float * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnZhegvd_bufferSize( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex *A, + int lda, + const cuDoubleComplex *B, + int ldb, + const double * W, + int * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverDnSsygvd( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + float * B, + int ldb, + float * W, + float * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsygvd( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + double * B, + int ldb, + double * W, + double * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnChegvd( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + cuComplex * B, + int ldb, + float * W, + cuComplex * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnZhegvd( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + cuDoubleComplex * B, + int ldb, + double * W, + cuDoubleComplex * work, + int lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverDnCreateSyevjInfo(syevjInfo_t *info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDestroySyevjInfo(syevjInfo_t info); + + cusolverStatus_t CUSOLVERAPI + cusolverDnXsyevjSetTolerance(syevjInfo_t info, double tolerance); + + cusolverStatus_t CUSOLVERAPI + cusolverDnXsyevjSetMaxSweeps(syevjInfo_t info, int max_sweeps); + + cusolverStatus_t CUSOLVERAPI + cusolverDnXsyevjSetSortEig(syevjInfo_t info, int sort_eig); + + cusolverStatus_t CUSOLVERAPI cusolverDnXsyevjGetResidual( + cusolverDnHandle_t handle, + syevjInfo_t info, + double * residual); + + cusolverStatus_t CUSOLVERAPI cusolverDnXsyevjGetSweeps( + cusolverDnHandle_t handle, + syevjInfo_t info, + int * executed_sweeps); + + cusolverStatus_t CUSOLVERAPI cusolverDnSsyevjBatched_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const float * A, + int lda, + const float * W, + int * lwork, + syevjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsyevjBatched_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const double * A, + int lda, + const double * W, + int * lwork, + syevjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnCheevjBatched_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const cuComplex * A, + int lda, + const float * W, + int * lwork, + syevjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnZheevjBatched_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex *A, + int lda, + const double * W, + int * lwork, + syevjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnSsyevjBatched( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + float * W, + float * work, + int lwork, + int * info, + syevjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsyevjBatched( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + double * W, + double * work, + int lwork, + int * info, + syevjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnCheevjBatched( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + float * W, + cuComplex * work, + int lwork, + int * info, + syevjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnZheevjBatched( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + double * W, + cuDoubleComplex * work, + int lwork, + int * info, + syevjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnSsyevj_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const float * A, + int lda, + const float * W, + int * lwork, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsyevj_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const double * A, + int lda, + const double * W, + int * lwork, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnCheevj_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const cuComplex * A, + int lda, + const float * W, + int * lwork, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnZheevj_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex *A, + int lda, + const double * W, + int * lwork, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnSsyevj( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + float * W, + float * work, + int lwork, + int * info, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsyevj( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + double * W, + double * work, + int lwork, + int * info, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnCheevj( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + float * W, + cuComplex * work, + int lwork, + int * info, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnZheevj( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + double * W, + cuDoubleComplex * work, + int lwork, + int * info, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnSsygvj_bufferSize( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const float * A, + int lda, + const float * B, + int ldb, + const float * W, + int * lwork, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsygvj_bufferSize( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const double * A, + int lda, + const double * B, + int ldb, + const double * W, + int * lwork, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnChegvj_bufferSize( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const cuComplex * A, + int lda, + const cuComplex * B, + int ldb, + const float * W, + int * lwork, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnZhegvj_bufferSize( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + const cuDoubleComplex *A, + int lda, + const cuDoubleComplex *B, + int ldb, + const double * W, + int * lwork, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnSsygvj( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + float * A, + int lda, + float * B, + int ldb, + float * W, + float * work, + int lwork, + int * info, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnDsygvj( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + double * A, + int lda, + double * B, + int ldb, + double * W, + double * work, + int lwork, + int * info, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnChegvj( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + cuComplex * A, + int lda, + cuComplex * B, + int ldb, + float * W, + cuComplex * work, + int lwork, + int * info, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnZhegvj( + cusolverDnHandle_t handle, + cusolverEigType_t itype, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int n, + cuDoubleComplex * A, + int lda, + cuDoubleComplex * B, + int ldb, + double * W, + cuDoubleComplex * work, + int lwork, + int * info, + syevjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnCreateGesvdjInfo(gesvdjInfo_t *info); + + cusolverStatus_t CUSOLVERAPI cusolverDnDestroyGesvdjInfo(gesvdjInfo_t info); + + cusolverStatus_t CUSOLVERAPI + cusolverDnXgesvdjSetTolerance(gesvdjInfo_t info, double tolerance); + + cusolverStatus_t CUSOLVERAPI + cusolverDnXgesvdjSetMaxSweeps(gesvdjInfo_t info, int max_sweeps); + + cusolverStatus_t CUSOLVERAPI + cusolverDnXgesvdjSetSortEig(gesvdjInfo_t info, int sort_svd); + + cusolverStatus_t CUSOLVERAPI cusolverDnXgesvdjGetResidual( + cusolverDnHandle_t handle, + gesvdjInfo_t info, + double * residual); + + cusolverStatus_t CUSOLVERAPI cusolverDnXgesvdjGetSweeps( + cusolverDnHandle_t handle, + gesvdjInfo_t info, + int * executed_sweeps); + + cusolverStatus_t CUSOLVERAPI cusolverDnSgesvdjBatched_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int m, + int n, + const float * A, + int lda, + const float * S, + const float * U, + int ldu, + const float * V, + int ldv, + int * lwork, + gesvdjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnDgesvdjBatched_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int m, + int n, + const double * A, + int lda, + const double * S, + const double * U, + int ldu, + const double * V, + int ldv, + int * lwork, + gesvdjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnCgesvdjBatched_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int m, + int n, + const cuComplex * A, + int lda, + const float * S, + const cuComplex * U, + int ldu, + const cuComplex * V, + int ldv, + int * lwork, + gesvdjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnZgesvdjBatched_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int m, + int n, + const cuDoubleComplex *A, + int lda, + const double * S, + const cuDoubleComplex *U, + int ldu, + const cuDoubleComplex *V, + int ldv, + int * lwork, + gesvdjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnSgesvdjBatched( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int m, + int n, + float * A, + int lda, + float * S, + float * U, + int ldu, + float * V, + int ldv, + float * work, + int lwork, + int * info, + gesvdjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnDgesvdjBatched( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int m, + int n, + double * A, + int lda, + double * S, + double * U, + int ldu, + double * V, + int ldv, + double * work, + int lwork, + int * info, + gesvdjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnCgesvdjBatched( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int m, + int n, + cuComplex * A, + int lda, + float * S, + cuComplex * U, + int ldu, + cuComplex * V, + int ldv, + cuComplex * work, + int lwork, + int * info, + gesvdjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnZgesvdjBatched( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int m, + int n, + cuDoubleComplex * A, + int lda, + double * S, + cuDoubleComplex * U, + int ldu, + cuDoubleComplex * V, + int ldv, + cuDoubleComplex * work, + int lwork, + int * info, + gesvdjInfo_t params, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnSgesvdj_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int econ, + int m, + int n, + const float * A, + int lda, + const float * S, + const float * U, + int ldu, + const float * V, + int ldv, + int * lwork, + gesvdjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnDgesvdj_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int econ, + int m, + int n, + const double * A, + int lda, + const double * S, + const double * U, + int ldu, + const double * V, + int ldv, + int * lwork, + gesvdjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnCgesvdj_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int econ, + int m, + int n, + const cuComplex * A, + int lda, + const float * S, + const cuComplex * U, + int ldu, + const cuComplex * V, + int ldv, + int * lwork, + gesvdjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnZgesvdj_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int econ, + int m, + int n, + const cuDoubleComplex *A, + int lda, + const double * S, + const cuDoubleComplex *U, + int ldu, + const cuDoubleComplex *V, + int ldv, + int * lwork, + gesvdjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnSgesvdj( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int econ, + int m, + int n, + float * A, + int lda, + float * S, + float * U, + int ldu, + float * V, + int ldv, + float * work, + int lwork, + int * info, + gesvdjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnDgesvdj( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int econ, + int m, + int n, + double * A, + int lda, + double * S, + double * U, + int ldu, + double * V, + int ldv, + double * work, + int lwork, + int * info, + gesvdjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnCgesvdj( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int econ, + int m, + int n, + cuComplex * A, + int lda, + float * S, + cuComplex * U, + int ldu, + cuComplex * V, + int ldv, + cuComplex * work, + int lwork, + int * info, + gesvdjInfo_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnZgesvdj( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int econ, + int m, + int n, + cuDoubleComplex * A, + int lda, + double * S, + cuDoubleComplex * U, + int ldu, + cuDoubleComplex * V, + int ldv, + cuDoubleComplex * work, + int lwork, + int * info, + gesvdjInfo_t params); + + /* batched approximate SVD */ + + cusolverStatus_t CUSOLVERAPI cusolverDnSgesvdaStridedBatched_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int rank, + int m, + int n, + const float * d_A, + int lda, + long long int strideA, + const float * d_S, + long long int strideS, + const float * d_U, + int ldu, + long long int strideU, + const float * d_V, + int ldv, + long long int strideV, + int * lwork, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnDgesvdaStridedBatched_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int rank, + int m, + int n, + const double * d_A, + int lda, + long long int strideA, + const double * d_S, + long long int strideS, + const double * d_U, + int ldu, + long long int strideU, + const double * d_V, + int ldv, + long long int strideV, + int * lwork, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnCgesvdaStridedBatched_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int rank, + int m, + int n, + const cuComplex * d_A, + int lda, + long long int strideA, + const float * d_S, + long long int strideS, + const cuComplex * d_U, + int ldu, + long long int strideU, + const cuComplex * d_V, + int ldv, + long long int strideV, + int * lwork, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnZgesvdaStridedBatched_bufferSize( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int rank, + int m, + int n, + const cuDoubleComplex *d_A, + int lda, + long long int strideA, + const double * d_S, + long long int strideS, + const cuDoubleComplex *d_U, + int ldu, + long long int strideU, + const cuDoubleComplex *d_V, + int ldv, + long long int strideV, + int * lwork, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnSgesvdaStridedBatched( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int rank, + int m, + int n, + const float * d_A, + int lda, + long long int strideA, + float * d_S, + long long int strideS, + float * d_U, + int ldu, + long long int strideU, + float * d_V, + int ldv, + long long int strideV, + float * d_work, + int lwork, + int * d_info, + double * h_R_nrmF, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnDgesvdaStridedBatched( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int rank, + int m, + int n, + const double * d_A, + int lda, + long long int strideA, + double * d_S, + long long int strideS, + double * d_U, + int ldu, + long long int strideU, + double * d_V, + int ldv, + long long int strideV, + double * d_work, + int lwork, + int * d_info, + double * h_R_nrmF, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnCgesvdaStridedBatched( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int rank, + int m, + int n, + const cuComplex * d_A, + int lda, + long long int strideA, + float * d_S, + long long int strideS, + cuComplex * d_U, + int ldu, + long long int strideU, + cuComplex * d_V, + int ldv, + long long int strideV, + cuComplex * d_work, + int lwork, + int * d_info, + double * h_R_nrmF, + int batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnZgesvdaStridedBatched( + cusolverDnHandle_t handle, + cusolverEigMode_t jobz, + int rank, + int m, + int n, + const cuDoubleComplex *d_A, + int lda, + long long int strideA, + double * d_S, + long long int strideS, + cuDoubleComplex * d_U, + int ldu, + long long int strideU, + cuDoubleComplex * d_V, + int ldv, + long long int strideV, + cuDoubleComplex * d_work, + int lwork, + int * d_info, + double * h_R_nrmF, + int batchSize); + + cusolverStatus_t CUSOLVERAPI + cusolverDnCreateParams(cusolverDnParams_t *params); + + cusolverStatus_t CUSOLVERAPI + cusolverDnDestroyParams(cusolverDnParams_t params); + + cusolverStatus_t CUSOLVERAPI cusolverDnSetAdvOptions( + cusolverDnParams_t params, + cusolverDnFunction_t function, + cusolverAlgMode_t algo); + + /* 64-bit API for POTRF */ + CUSOLVER_DEPRECATED(cusolverDnXpotrf_bufferSize) + cusolverStatus_t CUSOLVERAPI cusolverDnPotrf_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cublasFillMode_t uplo, + int64_t n, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + cudaDataType computeType, + size_t * workspaceInBytes); + + CUSOLVER_DEPRECATED(cusolverDnXpotrf) + cusolverStatus_t CUSOLVERAPI cusolverDnPotrf( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cublasFillMode_t uplo, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + cudaDataType computeType, + void * pBuffer, + size_t workspaceInBytes, + int * info); + + /* 64-bit API for POTRS */ + CUSOLVER_DEPRECATED(cusolverDnXpotrs) + cusolverStatus_t CUSOLVERAPI cusolverDnPotrs( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cublasFillMode_t uplo, + int64_t n, + int64_t nrhs, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + cudaDataType dataTypeB, + void * B, + int64_t ldb, + int * info); + + /* 64-bit API for GEQRF */ + CUSOLVER_DEPRECATED(cusolverDnXgeqrf_bufferSize) + cusolverStatus_t CUSOLVERAPI cusolverDnGeqrf_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + int64_t m, + int64_t n, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + cudaDataType dataTypeTau, + const void * tau, + cudaDataType computeType, + size_t * workspaceInBytes); + + CUSOLVER_DEPRECATED(cusolverDnXgeqrf) + cusolverStatus_t CUSOLVERAPI cusolverDnGeqrf( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + int64_t m, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + cudaDataType dataTypeTau, + void * tau, + cudaDataType computeType, + void * pBuffer, + size_t workspaceInBytes, + int * info); + + /* 64-bit API for GETRF */ + CUSOLVER_DEPRECATED(cusolverDnXgetrf_bufferSize) + cusolverStatus_t CUSOLVERAPI cusolverDnGetrf_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + int64_t m, + int64_t n, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + cudaDataType computeType, + size_t * workspaceInBytes); + + CUSOLVER_DEPRECATED(cusolverDnXgetrf) + cusolverStatus_t CUSOLVERAPI cusolverDnGetrf( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + int64_t m, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + int64_t * ipiv, + cudaDataType computeType, + void * pBuffer, + size_t workspaceInBytes, + int * info); + + /* 64-bit API for GETRS */ + CUSOLVER_DEPRECATED(cusolverDnXgetrs) + cusolverStatus_t CUSOLVERAPI cusolverDnGetrs( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cublasOperation_t trans, + int64_t n, + int64_t nrhs, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + const int64_t * ipiv, + cudaDataType dataTypeB, + void * B, + int64_t ldb, + int * info); + + /* 64-bit API for SYEVD */ + CUSOLVER_DEPRECATED(cusolverDnXsyevd_bufferSize) + cusolverStatus_t CUSOLVERAPI cusolverDnSyevd_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int64_t n, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + cudaDataType dataTypeW, + const void * W, + cudaDataType computeType, + size_t * workspaceInBytes); + + CUSOLVER_DEPRECATED(cusolverDnXsyevd) + cusolverStatus_t CUSOLVERAPI cusolverDnSyevd( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + cudaDataType dataTypeW, + void * W, + cudaDataType computeType, + void * pBuffer, + size_t workspaceInBytes, + int * info); + + /* 64-bit API for SYEVDX */ + CUSOLVER_DEPRECATED(cusolverDnXsyevdx_bufferSize) + cusolverStatus_t CUSOLVERAPI cusolverDnSyevdx_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int64_t n, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + void * vl, + void * vu, + int64_t il, + int64_t iu, + int64_t * h_meig, + cudaDataType dataTypeW, + const void * W, + cudaDataType computeType, + size_t * workspaceInBytes); + + CUSOLVER_DEPRECATED(cusolverDnXsyevdx) + cusolverStatus_t CUSOLVERAPI cusolverDnSyevdx( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + void * vl, + void * vu, + int64_t il, + int64_t iu, + int64_t * meig64, + cudaDataType dataTypeW, + void * W, + cudaDataType computeType, + void * pBuffer, + size_t workspaceInBytes, + int * info); + + /* 64-bit API for GESVD */ + CUSOLVER_DEPRECATED(cusolverDnXgesvd_bufferSize) + cusolverStatus_t CUSOLVERAPI cusolverDnGesvd_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + signed char jobu, + signed char jobvt, + int64_t m, + int64_t n, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + cudaDataType dataTypeS, + const void * S, + cudaDataType dataTypeU, + const void * U, + int64_t ldu, + cudaDataType dataTypeVT, + const void * VT, + int64_t ldvt, + cudaDataType computeType, + size_t * workspaceInBytes); + + CUSOLVER_DEPRECATED(cusolverDnXgesvd) + cusolverStatus_t CUSOLVERAPI cusolverDnGesvd( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + signed char jobu, + signed char jobvt, + int64_t m, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + cudaDataType dataTypeS, + void * S, + cudaDataType dataTypeU, + void * U, + int64_t ldu, + cudaDataType dataTypeVT, + void * VT, + int64_t ldvt, + cudaDataType computeType, + void * pBuffer, + size_t workspaceInBytes, + int * info); + + /* + * new 64-bit API + */ + /* 64-bit API for POTRF */ + cusolverStatus_t CUSOLVERAPI cusolverDnXpotrf_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cublasFillMode_t uplo, + int64_t n, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + cudaDataType computeType, + size_t * workspaceInBytesOnDevice, + size_t * workspaceInBytesOnHost); + + cusolverStatus_t CUSOLVERAPI cusolverDnXpotrf( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cublasFillMode_t uplo, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + cudaDataType computeType, + void * bufferOnDevice, + size_t workspaceInBytesOnDevice, + void * bufferOnHost, + size_t workspaceInBytesOnHost, + int * info); + + /* 64-bit API for POTRS */ + cusolverStatus_t CUSOLVERAPI cusolverDnXpotrs( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cublasFillMode_t uplo, + int64_t n, + int64_t nrhs, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + cudaDataType dataTypeB, + void * B, + int64_t ldb, + int * info); + + /* 64-bit API for GEQRF */ + cusolverStatus_t CUSOLVERAPI cusolverDnXgeqrf_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + int64_t m, + int64_t n, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + cudaDataType dataTypeTau, + const void * tau, + cudaDataType computeType, + size_t * workspaceInBytesOnDevice, + size_t * workspaceInBytesOnHost); + + cusolverStatus_t CUSOLVERAPI cusolverDnXgeqrf( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + int64_t m, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + cudaDataType dataTypeTau, + void * tau, + cudaDataType computeType, + void * bufferOnDevice, + size_t workspaceInBytesOnDevice, + void * bufferOnHost, + size_t workspaceInBytesOnHost, + int * info); + + /* 64-bit API for GETRF */ + cusolverStatus_t CUSOLVERAPI cusolverDnXgetrf_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + int64_t m, + int64_t n, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + cudaDataType computeType, + size_t * workspaceInBytesOnDevice, + size_t * workspaceInBytesOnHost); + + cusolverStatus_t CUSOLVERAPI cusolverDnXgetrf( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + int64_t m, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + int64_t * ipiv, + cudaDataType computeType, + void * bufferOnDevice, + size_t workspaceInBytesOnDevice, + void * bufferOnHost, + size_t workspaceInBytesOnHost, + int * info); + + /* 64-bit API for GETRS */ + cusolverStatus_t CUSOLVERAPI cusolverDnXgetrs( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cublasOperation_t trans, + int64_t n, + int64_t nrhs, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + const int64_t * ipiv, + cudaDataType dataTypeB, + void * B, + int64_t ldb, + int * info); + + /* 64-bit API for SYEVD */ + cusolverStatus_t CUSOLVERAPI cusolverDnXsyevd_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int64_t n, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + cudaDataType dataTypeW, + const void * W, + cudaDataType computeType, + size_t * workspaceInBytesOnDevice, + size_t * workspaceInBytesOnHost); + + cusolverStatus_t CUSOLVERAPI cusolverDnXsyevd( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + cudaDataType dataTypeW, + void * W, + cudaDataType computeType, + void * bufferOnDevice, + size_t workspaceInBytesOnDevice, + void * bufferOnHost, + size_t workspaceInBytesOnHost, + int * info); + + /* 64-bit API for batched SYEVD */ + cusolverStatus_t CUSOLVERAPI cusolverDnXsyevBatched_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int64_t n, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + cudaDataType dataTypeW, + const void * W, + cudaDataType computeType, + size_t * workspaceInBytesOnDevice, + size_t * workspaceInBytesOnHost, + int64_t batchSize); + + cusolverStatus_t CUSOLVERAPI cusolverDnXsyevBatched( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + cudaDataType dataTypeW, + void * W, + cudaDataType computeType, + void * bufferOnDevice, + size_t workspaceInBytesOnDevice, + void * bufferOnHost, + size_t workspaceInBytesOnHost, + int * info, + int64_t batchSize); + + /* 64-bit API for SYEVDX */ + cusolverStatus_t CUSOLVERAPI cusolverDnXsyevdx_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int64_t n, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + void * vl, + void * vu, + int64_t il, + int64_t iu, + int64_t * h_meig, + cudaDataType dataTypeW, + const void * W, + cudaDataType computeType, + size_t * workspaceInBytesOnDevice, + size_t * workspaceInBytesOnHost); + + cusolverStatus_t CUSOLVERAPI cusolverDnXsyevdx( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverEigMode_t jobz, + cusolverEigRange_t range, + cublasFillMode_t uplo, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + void * vl, + void * vu, + int64_t il, + int64_t iu, + int64_t * meig64, + cudaDataType dataTypeW, + void * W, + cudaDataType computeType, + void * bufferOnDevice, + size_t workspaceInBytesOnDevice, + void * bufferOnHost, + size_t workspaceInBytesOnHost, + int * info); + + /* 64-bit API for GEEV */ + cusolverStatus_t CUSOLVERAPI cusolverDnXgeev_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverEigMode_t jobvl, + cusolverEigMode_t jobvr, + int64_t n, + cudaDataType dataTypeA, + const void* A, + int64_t lda, + cudaDataType dataTypeW, + const void* W, + cudaDataType dataTypeVL, + const void* VL, + int64_t ldvl, + cudaDataType dataTypeVR, + const void* VR, + int64_t ldvr, + cudaDataType computeType, + size_t* workspaceInBytesOnDevice, + size_t* workspaceInBytesOnHost); + + cusolverStatus_t CUSOLVERAPI cusolverDnXgeev( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverEigMode_t jobvl, + cusolverEigMode_t jobvr, + int64_t n, + cudaDataType dataTypeA, + void* A, + int64_t lda, + cudaDataType dataTypeW, + void* W, + cudaDataType dataTypeVL, + void* VL, + int64_t ldvl, + cudaDataType dataTypeVR, + void* VR, + int64_t ldvr, + cudaDataType computeType, + void* bufferOnDevice, + size_t workspaceInBytesOnDevice, + void* bufferOnHost, + size_t workspaceInBytesOnHost, + int* info); + + /* 64-bit API for GESVD */ + cusolverStatus_t CUSOLVERAPI cusolverDnXgesvd_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + signed char jobu, + signed char jobvt, + int64_t m, + int64_t n, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + cudaDataType dataTypeS, + const void * S, + cudaDataType dataTypeU, + const void * U, + int64_t ldu, + cudaDataType dataTypeVT, + const void * VT, + int64_t ldvt, + cudaDataType computeType, + size_t * workspaceInBytesOnDevice, + size_t * workspaceInBytesOnHost); + + cusolverStatus_t CUSOLVERAPI cusolverDnXgesvd( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + signed char jobu, + signed char jobvt, + int64_t m, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + cudaDataType dataTypeS, + void * S, + cudaDataType dataTypeU, + void * U, + int64_t ldu, + cudaDataType dataTypeVT, + void * VT, + int64_t ldvt, + cudaDataType computeType, + void * bufferOnDevice, + size_t workspaceInBytesOnDevice, + void * bufferOnHost, + size_t workspaceInBytesOnHost, + int * info); + + /* 64-bit API for GESVDP */ + cusolverStatus_t CUSOLVERAPI cusolverDnXgesvdp_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverEigMode_t jobz, + int econ, + int64_t m, + int64_t n, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + cudaDataType dataTypeS, + const void * S, + cudaDataType dataTypeU, + const void * U, + int64_t ldu, + cudaDataType dataTypeV, + const void * V, + int64_t ldv, + cudaDataType computeType, + size_t * workspaceInBytesOnDevice, + size_t * workspaceInBytesOnHost); + + cusolverStatus_t CUSOLVERAPI cusolverDnXgesvdp( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverEigMode_t jobz, + int econ, + int64_t m, + int64_t n, + cudaDataType dataTypeA, + void * A, + int64_t lda, + cudaDataType dataTypeS, + void * S, + cudaDataType dataTypeU, + void * U, + int64_t ldu, + cudaDataType dataTypeV, + void * V, + int64_t ldv, + cudaDataType computeType, + void * bufferOnDevice, + size_t workspaceInBytesOnDevice, + void * bufferOnHost, + size_t workspaceInBytesOnHost, + int * d_info, + double * h_err_sigma); + + cusolverStatus_t CUSOLVERAPI cusolverDnXgesvdr_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + signed char jobu, + signed char jobv, + int64_t m, + int64_t n, + int64_t k, + int64_t p, + int64_t niters, + cudaDataType dataTypeA, + const void * A, + int64_t lda, + cudaDataType dataTypeSrand, + const void * Srand, + cudaDataType dataTypeUrand, + const void * Urand, + int64_t ldUrand, + cudaDataType dataTypeVrand, + const void * Vrand, + int64_t ldVrand, + cudaDataType computeType, + size_t * workspaceInBytesOnDevice, + size_t * workspaceInBytesOnHost); + + cusolverStatus_t CUSOLVERAPI cusolverDnXgesvdr( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + signed char jobu, + signed char jobv, + int64_t m, + int64_t n, + int64_t k, + int64_t p, + int64_t niters, + cudaDataType dataTypeA, + void * A, + int64_t lda, + cudaDataType dataTypeSrand, + void * Srand, + cudaDataType dataTypeUrand, + void * Urand, + int64_t ldUrand, + cudaDataType dataTypeVrand, + void * Vrand, + int64_t ldVrand, + cudaDataType computeType, + void * bufferOnDevice, + size_t workspaceInBytesOnDevice, + void * bufferOnHost, + size_t workspaceInBytesOnHost, + int * d_info); + + cusolverStatus_t CUSOLVERAPI cusolverDnXlarft_bufferSize( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverDirectMode_t direct, + cusolverStorevMode_t storev, + int64_t n, + int64_t k, + cudaDataType dataTypeV, + const void *V, + int64_t ldv, + cudaDataType dataTypeTau, + const void *tau, + cudaDataType dataTypeT, + void *T, + int64_t ldt, + cudaDataType computeType, + size_t *workspaceInBytesOnDevice, + size_t *workspaceInBytesOnHost); + + cusolverStatus_t CUSOLVERAPI cusolverDnXlarft( + cusolverDnHandle_t handle, + cusolverDnParams_t params, + cusolverDirectMode_t direct, + cusolverStorevMode_t storev, + int64_t n, + int64_t k, + cudaDataType dataTypeV, + const void *V, + int64_t ldv, + cudaDataType dataTypeTau, + const void *tau, + cudaDataType dataTypeT, + void *T, + int64_t ldt, + cudaDataType computeType, + void *bufferOnDevice, + size_t workspaceInBytesOnDevice, + void *bufferOnHost, + size_t workspaceInBytesOnHost); + + typedef void (*cusolverDnLoggerCallback_t)( + int logLevel, + const char *functionName, + const char *message); + + cusolverStatus_t CUSOLVERAPI + cusolverDnLoggerSetCallback(cusolverDnLoggerCallback_t callback); + + cusolverStatus_t CUSOLVERAPI cusolverDnLoggerSetFile(FILE *file); + + cusolverStatus_t CUSOLVERAPI cusolverDnLoggerOpenFile(const char *logFile); + + cusolverStatus_t CUSOLVERAPI cusolverDnLoggerSetLevel(int level); + + cusolverStatus_t CUSOLVERAPI cusolverDnLoggerSetMask(int mask); + + cusolverStatus_t CUSOLVERAPI cusolverDnLoggerForceDisable(); + + #if defined(__cplusplus) +} + #endif /* __cplusplus */ + +#endif /* !defined(CUDENSE_H_) */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolverMg.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolverMg.h new file mode 100644 index 0000000000000000000000000000000000000000..7702191f7253d66cf998016f6ae9f14149fbbb0b --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolverMg.h @@ -0,0 +1,318 @@ +/* + * Copyright 2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(CUSOLVERMG_H_) + #define CUSOLVERMG_H_ + + #include + #include "cusolverDn.h" + + #if defined(__cplusplus) +extern "C" { + #endif /* __cplusplus */ + + struct cusolverMgContext; + typedef struct cusolverMgContext *cusolverMgHandle_t; + + /** + * \beief This enum decides how 1D device Ids (or process ranks) get mapped to + * a 2D grid. + */ + typedef enum { + + CUDALIBMG_GRID_MAPPING_ROW_MAJOR = 1, + CUDALIBMG_GRID_MAPPING_COL_MAJOR = 0 + + } cusolverMgGridMapping_t; + + /** \brief Opaque structure of the distributed grid */ + typedef void *cudaLibMgGrid_t; + /** \brief Opaque structure of the distributed matrix descriptor */ + typedef void *cudaLibMgMatrixDesc_t; + + cusolverStatus_t CUSOLVERAPI cusolverMgCreate(cusolverMgHandle_t *handle); + + cusolverStatus_t CUSOLVERAPI cusolverMgDestroy(cusolverMgHandle_t handle); + + cusolverStatus_t CUSOLVERAPI cusolverMgDeviceSelect( + cusolverMgHandle_t handle, + int nbDevices, + int deviceId[]); + + /** + * \brief Allocates resources related to the shared memory device grid. + * \param[out] grid the opaque data strcuture that holds the grid + * \param[in] numRowDevices number of devices in the row + * \param[in] numColDevices number of devices in the column + * \param[in] deviceId This array of size height * width stores the + * device-ids of the 2D grid; each entry must correspond to a valid + * gpu or to -1 (denoting CPU). \param[in] mapping whether the 2D grid is in + * row/column major \returns the status code + */ + cusolverStatus_t CUSOLVERAPI cusolverMgCreateDeviceGrid( + cudaLibMgGrid_t * grid, + int32_t numRowDevices, + int32_t numColDevices, + const int32_t deviceId[], + cusolverMgGridMapping_t mapping); + + /** + * \brief Releases the allocated resources related to the distributed grid. + * \param[in] grid the opaque data strcuture that holds the distributed grid + * \returns the status code + */ + cusolverStatus_t CUSOLVERAPI cusolverMgDestroyGrid(cudaLibMgGrid_t grid); + + /** + * \brief Allocates resources related to the distributed matrix descriptor. + * \param[out] desc the opaque data strcuture that holds the descriptor + * \param[in] numRows number of total rows + * \param[in] numCols number of total columns + * \param[in] rowBlockSize row block size + * \param[in] colBlockSize column block size + * \param[in] dataType the data type of each element in cudaDataType + * \param[in] grid the opaque data structure of the distributed grid + * \returns the status code + */ + cusolverStatus_t CUSOLVERAPI cusolverMgCreateMatrixDesc( + cudaLibMgMatrixDesc_t *desc, + int64_t numRows, + int64_t numCols, + int64_t rowBlockSize, + int64_t colBlockSize, + cudaDataType dataType, + const cudaLibMgGrid_t grid); + + /** + * \brief Releases the allocated resources related to the distributed matrix + * descriptor. \param[in] desc the opaque data strcuture that holds the + * descriptor \returns the status code + */ + cusolverStatus_t CUSOLVERAPI + cusolverMgDestroyMatrixDesc(cudaLibMgMatrixDesc_t desc); + + cusolverStatus_t CUSOLVERAPI cusolverMgSyevd_bufferSize( + cusolverMgHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int N, + void * array_d_A[], + int IA, + int JA, + cudaLibMgMatrixDesc_t descrA, + void * W, + cudaDataType dataTypeW, + cudaDataType computeType, + int64_t * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverMgSyevd( + cusolverMgHandle_t handle, + cusolverEigMode_t jobz, + cublasFillMode_t uplo, + int N, + void * array_d_A[], + int IA, + int JA, + cudaLibMgMatrixDesc_t descrA, + void * W, + cudaDataType dataTypeW, + cudaDataType computeType, + void * array_d_work[], + int64_t lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverMgGetrf_bufferSize( + cusolverMgHandle_t handle, + int M, + int N, + void * array_d_A[], + int IA, + int JA, + cudaLibMgMatrixDesc_t descrA, + int * array_d_IPIV[], + cudaDataType computeType, + int64_t * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverMgGetrf( + cusolverMgHandle_t handle, + int M, + int N, + void * array_d_A[], + int IA, + int JA, + cudaLibMgMatrixDesc_t descrA, + int * array_d_IPIV[], + cudaDataType computeType, + void * array_d_work[], + int64_t lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverMgGetrs_bufferSize( + cusolverMgHandle_t handle, + cublasOperation_t TRANS, + int N, + int NRHS, + void * array_d_A[], + int IA, + int JA, + cudaLibMgMatrixDesc_t descrA, + int * array_d_IPIV[], + void * array_d_B[], + int IB, + int JB, + cudaLibMgMatrixDesc_t descrB, + cudaDataType computeType, + int64_t * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverMgGetrs( + cusolverMgHandle_t handle, + cublasOperation_t TRANS, + int N, + int NRHS, + void * array_d_A[], + int IA, + int JA, + cudaLibMgMatrixDesc_t descrA, + int * array_d_IPIV[], + void * array_d_B[], + int IB, + int JB, + cudaLibMgMatrixDesc_t descrB, + cudaDataType computeType, + void * array_d_work[], + int64_t lwork, + int * info); + + cusolverStatus_t CUSOLVERAPI cusolverMgPotrf_bufferSize( + cusolverMgHandle_t handle, + cublasFillMode_t uplo, + int N, + void * array_d_A[], + int IA, + int JA, + cudaLibMgMatrixDesc_t descrA, + cudaDataType computeType, + int64_t * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverMgPotrf( + cusolverMgHandle_t handle, + cublasFillMode_t uplo, + int N, + void * array_d_A[], + int IA, + int JA, + cudaLibMgMatrixDesc_t descrA, + cudaDataType computeType, + void * array_d_work[], + int64_t lwork, + int * h_info); + + cusolverStatus_t CUSOLVERAPI cusolverMgPotrs_bufferSize( + cusolverMgHandle_t handle, + cublasFillMode_t uplo, + int n, + int nrhs, + void * array_d_A[], + int IA, + int JA, + cudaLibMgMatrixDesc_t descrA, + void * array_d_B[], + int IB, + int JB, + cudaLibMgMatrixDesc_t descrB, + cudaDataType computeType, + int64_t * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverMgPotrs( + cusolverMgHandle_t handle, + cublasFillMode_t uplo, + int n, + int nrhs, + void * array_d_A[], + int IA, + int JA, + cudaLibMgMatrixDesc_t descrA, + void * array_d_B[], + int IB, + int JB, + cudaLibMgMatrixDesc_t descrB, + cudaDataType computeType, + void * array_d_work[], + int64_t lwork, + int * h_info); + + cusolverStatus_t CUSOLVERAPI cusolverMgPotri_bufferSize( + cusolverMgHandle_t handle, + cublasFillMode_t uplo, + int N, + void * array_d_A[], + int IA, + int JA, + cudaLibMgMatrixDesc_t descrA, + cudaDataType computeType, + int64_t * lwork); + + cusolverStatus_t CUSOLVERAPI cusolverMgPotri( + cusolverMgHandle_t handle, + cublasFillMode_t uplo, + int N, + void * array_d_A[], + int IA, + int JA, + cudaLibMgMatrixDesc_t descrA, + cudaDataType computeType, + void * array_d_work[], + int64_t lwork, + int * h_info); + + #if defined(__cplusplus) +} + #endif /* __cplusplus */ + +#endif // CUSOLVERMG_H_ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolverRf.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolverRf.h new file mode 100644 index 0000000000000000000000000000000000000000..c74e9ca6bb34a8d1214c10450c45e2599417636f --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolverRf.h @@ -0,0 +1,339 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(CUSOLVERRF_H_) + #define CUSOLVERRF_H_ + + #include "driver_types.h" + #include "cuComplex.h" + #include "cusolver_common.h" + + #if defined(__cplusplus) +extern "C" { + #endif /* __cplusplus */ + + /* CUSOLVERRF mode */ + typedef enum { + CUSOLVERRF_RESET_VALUES_FAST_MODE_OFF = 0, // default + CUSOLVERRF_RESET_VALUES_FAST_MODE_ON = 1 + } cusolverRfResetValuesFastMode_t; + + /* CUSOLVERRF matrix format */ + typedef enum { + CUSOLVERRF_MATRIX_FORMAT_CSR = 0, // default + CUSOLVERRF_MATRIX_FORMAT_CSC = 1 + } cusolverRfMatrixFormat_t; + + /* CUSOLVERRF unit diagonal */ + typedef enum { + CUSOLVERRF_UNIT_DIAGONAL_STORED_L = 0, // default + CUSOLVERRF_UNIT_DIAGONAL_STORED_U = 1, + CUSOLVERRF_UNIT_DIAGONAL_ASSUMED_L = 2, + CUSOLVERRF_UNIT_DIAGONAL_ASSUMED_U = 3 + } cusolverRfUnitDiagonal_t; + + /* CUSOLVERRF factorization algorithm */ + typedef enum { + CUSOLVERRF_FACTORIZATION_ALG0 = 0, // default + CUSOLVERRF_FACTORIZATION_ALG1 = 1, + CUSOLVERRF_FACTORIZATION_ALG2 = 2, + } cusolverRfFactorization_t; + + /* CUSOLVERRF triangular solve algorithm */ + typedef enum { + CUSOLVERRF_TRIANGULAR_SOLVE_ALG1 = 1, // default + CUSOLVERRF_TRIANGULAR_SOLVE_ALG2 = 2, + CUSOLVERRF_TRIANGULAR_SOLVE_ALG3 = 3 + } cusolverRfTriangularSolve_t; + + /* CUSOLVERRF numeric boost report */ + typedef enum { + CUSOLVERRF_NUMERIC_BOOST_NOT_USED = 0, // default + CUSOLVERRF_NUMERIC_BOOST_USED = 1 + } cusolverRfNumericBoostReport_t; + + /* Opaque structure holding CUSOLVERRF library common */ + struct cusolverRfCommon; + typedef struct cusolverRfCommon* cusolverRfHandle_t; + + /* CUSOLVERRF create (allocate memory) and destroy (free memory) in the handle + */ + cusolverStatus_t CUSOLVERAPI cusolverRfCreate(cusolverRfHandle_t* handle); + cusolverStatus_t CUSOLVERAPI cusolverRfDestroy(cusolverRfHandle_t handle); + + /* CUSOLVERRF set and get input format */ + cusolverStatus_t CUSOLVERAPI cusolverRfGetMatrixFormat( + cusolverRfHandle_t handle, + cusolverRfMatrixFormat_t* format, + cusolverRfUnitDiagonal_t* diag); + + cusolverStatus_t CUSOLVERAPI cusolverRfSetMatrixFormat( + cusolverRfHandle_t handle, + cusolverRfMatrixFormat_t format, + cusolverRfUnitDiagonal_t diag); + + /* CUSOLVERRF set and get numeric properties */ + cusolverStatus_t CUSOLVERAPI cusolverRfSetNumericProperties( + cusolverRfHandle_t handle, + double zero, + double boost); + + cusolverStatus_t CUSOLVERAPI cusolverRfGetNumericProperties( + cusolverRfHandle_t handle, + double* zero, + double* boost); + + cusolverStatus_t CUSOLVERAPI cusolverRfGetNumericBoostReport( + cusolverRfHandle_t handle, + cusolverRfNumericBoostReport_t* report); + + /* CUSOLVERRF choose the triangular solve algorithm */ + cusolverStatus_t CUSOLVERAPI cusolverRfSetAlgs( + cusolverRfHandle_t handle, + cusolverRfFactorization_t factAlg, + cusolverRfTriangularSolve_t solveAlg); + + cusolverStatus_t CUSOLVERAPI cusolverRfGetAlgs( + cusolverRfHandle_t handle, + cusolverRfFactorization_t* factAlg, + cusolverRfTriangularSolve_t* solveAlg); + + /* CUSOLVERRF set and get fast mode */ + cusolverStatus_t CUSOLVERAPI cusolverRfGetResetValuesFastMode( + cusolverRfHandle_t handle, + cusolverRfResetValuesFastMode_t* fastMode); + + cusolverStatus_t CUSOLVERAPI cusolverRfSetResetValuesFastMode( + cusolverRfHandle_t handle, + cusolverRfResetValuesFastMode_t fastMode); + + /*** Non-Batched Routines ***/ + /* CUSOLVERRF setup of internal structures from host or device memory */ + cusolverStatus_t CUSOLVERAPI + cusolverRfSetupHost(/* Input (in the host memory) */ + int n, + int nnzA, + int* h_csrRowPtrA, + int* h_csrColIndA, + double* h_csrValA, + int nnzL, + int* h_csrRowPtrL, + int* h_csrColIndL, + double* h_csrValL, + int nnzU, + int* h_csrRowPtrU, + int* h_csrColIndU, + double* h_csrValU, + int* h_P, + int* h_Q, + /* Output */ + cusolverRfHandle_t handle); + + cusolverStatus_t CUSOLVERAPI + cusolverRfSetupDevice(/* Input (in the device memory) */ + int n, + int nnzA, + int* csrRowPtrA, + int* csrColIndA, + double* csrValA, + int nnzL, + int* csrRowPtrL, + int* csrColIndL, + double* csrValL, + int nnzU, + int* csrRowPtrU, + int* csrColIndU, + double* csrValU, + int* P, + int* Q, + /* Output */ + cusolverRfHandle_t handle); + + /* CUSOLVERRF update the matrix values (assuming the reordering, pivoting + and consequently the sparsity pattern of L and U did not change), + and zero out the remaining values. */ + cusolverStatus_t CUSOLVERAPI + cusolverRfResetValues(/* Input (in the device memory) */ + int n, + int nnzA, + int* csrRowPtrA, + int* csrColIndA, + double* csrValA, + int* P, + int* Q, + /* Output */ + cusolverRfHandle_t handle); + + /* CUSOLVERRF analysis (for parallelism) */ + cusolverStatus_t CUSOLVERAPI cusolverRfAnalyze(cusolverRfHandle_t handle); + + /* CUSOLVERRF re-factorization (for parallelism) */ + cusolverStatus_t CUSOLVERAPI cusolverRfRefactor(cusolverRfHandle_t handle); + + /* CUSOLVERRF extraction: Get L & U packed into a single matrix M */ + cusolverStatus_t CUSOLVERAPI + cusolverRfAccessBundledFactorsDevice(/* Input */ + cusolverRfHandle_t handle, + /* Output (in the host memory) */ + int* nnzM, + /* Output (in the device memory) */ + int** Mp, + int** Mi, + double** Mx); + + cusolverStatus_t CUSOLVERAPI + cusolverRfExtractBundledFactorsHost(/* Input */ + cusolverRfHandle_t handle, + /* Output (in the host memory) */ + int* h_nnzM, + int** h_Mp, + int** h_Mi, + double** h_Mx); + + /* CUSOLVERRF extraction: Get L & U individually */ + cusolverStatus_t CUSOLVERAPI + cusolverRfExtractSplitFactorsHost(/* Input */ + cusolverRfHandle_t handle, + /* Output (in the host memory) */ + int* h_nnzL, + int** h_csrRowPtrL, + int** h_csrColIndL, + double** h_csrValL, + int* h_nnzU, + int** h_csrRowPtrU, + int** h_csrColIndU, + double** h_csrValU); + + /* CUSOLVERRF (forward and backward triangular) solves */ + cusolverStatus_t CUSOLVERAPI + cusolverRfSolve(/* Input (in the device memory) */ + cusolverRfHandle_t handle, + int* P, + int* Q, + int nrhs, // only nrhs=1 is supported + double* Temp, // of size ldt*nrhs (ldt>=n) + int ldt, + /* Input/Output (in the device memory) */ + double* XF, + /* Input */ + int ldxf); + + /*** Batched Routines ***/ + /* CUSOLVERRF-batch setup of internal structures from host */ + cusolverStatus_t CUSOLVERAPI + cusolverRfBatchSetupHost(/* Input (in the host memory)*/ + int batchSize, + int n, + int nnzA, + int* h_csrRowPtrA, + int* h_csrColIndA, + double* h_csrValA_array[], + int nnzL, + int* h_csrRowPtrL, + int* h_csrColIndL, + double* h_csrValL, + int nnzU, + int* h_csrRowPtrU, + int* h_csrColIndU, + double* h_csrValU, + int* h_P, + int* h_Q, + /* Output (in the device memory) */ + cusolverRfHandle_t handle); + + /* CUSOLVERRF-batch update the matrix values (assuming the reordering, + pivoting and consequently the sparsity pattern of L and U did not change), + and zero out the remaining values. */ + cusolverStatus_t CUSOLVERAPI + cusolverRfBatchResetValues(/* Input (in the device memory) */ + int batchSize, + int n, + int nnzA, + int* csrRowPtrA, + int* csrColIndA, + double* csrValA_array[], + int* P, + int* Q, + /* Output */ + cusolverRfHandle_t handle); + + /* CUSOLVERRF-batch analysis (for parallelism) */ + cusolverStatus_t CUSOLVERAPI + cusolverRfBatchAnalyze(cusolverRfHandle_t handle); + + /* CUSOLVERRF-batch re-factorization (for parallelism) */ + cusolverStatus_t CUSOLVERAPI + cusolverRfBatchRefactor(cusolverRfHandle_t handle); + + /* CUSOLVERRF-batch (forward and backward triangular) solves */ + cusolverStatus_t CUSOLVERAPI + cusolverRfBatchSolve(/* Input (in the device memory) */ + cusolverRfHandle_t handle, + int* P, + int* Q, + int nrhs, // only nrhs=1 is supported + double* Temp, // of size 2*batchSize*(n*nrhs) + int ldt, // only ldt=n is supported + /* Input/Output (in the device memory) */ + double* XF_array[], + /* Input */ + int ldxf); + + /* CUSOLVERRF-batch obtain the position of zero pivot */ + cusolverStatus_t CUSOLVERAPI + cusolverRfBatchZeroPivot(/* Input */ + cusolverRfHandle_t handle, + /* Output (in the host memory) */ + int* position); + + #if defined(__cplusplus) +} + #endif /* __cplusplus */ + +#endif /* CUSOLVERRF_H_ */ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolverSp.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolverSp.h new file mode 100644 index 0000000000000000000000000000000000000000..a00a2fac14664090a116bae89fe34f97d8e41f9c --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolverSp.h @@ -0,0 +1,923 @@ +/* + * Copyright 2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(CUSOLVERSP_H_) + #define CUSOLVERSP_H_ + + #include "cusparse.h" + #include "cublas_v2.h" + #include "cusolver_common.h" + + #if defined(__cplusplus) +extern "C" { + #endif /* __cplusplus */ + + struct cusolverSpContext; + typedef struct cusolverSpContext *cusolverSpHandle_t; + + struct csrqrInfo; + typedef struct csrqrInfo *csrqrInfo_t; + + cusolverStatus_t CUSOLVERAPI cusolverSpCreate(cusolverSpHandle_t *handle); + cusolverStatus_t CUSOLVERAPI cusolverSpDestroy(cusolverSpHandle_t handle); + cusolverStatus_t CUSOLVERAPI + cusolverSpSetStream(cusolverSpHandle_t handle, cudaStream_t streamId); + cusolverStatus_t CUSOLVERAPI + cusolverSpGetStream(cusolverSpHandle_t handle, cudaStream_t *streamId); + + cusolverStatus_t CUSOLVERAPI cusolverSpXcsrissymHost( + cusolverSpHandle_t handle, + int m, + int nnzA, + const cusparseMatDescr_t descrA, + const int * csrRowPtrA, + const int * csrEndPtrA, + const int * csrColIndA, + int * issym); + + /* -------- GPU linear solver by LU factorization + * solve A*x = b, A can be singular + * [ls] stands for linear solve + * [v] stands for vector + * [lu] stands for LU factorization + */ + cusolverStatus_t CUSOLVERAPI cusolverSpScsrlsvluHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const float * b, + float tol, + int reorder, + float * x, + int * singularity); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrlsvluHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const double * b, + double tol, + int reorder, + double * x, + int * singularity); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrlsvluHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const cuComplex * b, + float tol, + int reorder, + cuComplex * x, + int * singularity); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrlsvluHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const cuDoubleComplex * b, + double tol, + int reorder, + cuDoubleComplex * x, + int * singularity); + + /* -------- GPU linear solver by QR factorization + * solve A*x = b, A can be singular + * [ls] stands for linear solve + * [v] stands for vector + * [qr] stands for QR factorization + */ + cusolverStatus_t CUSOLVERAPI cusolverSpScsrlsvqr( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const float * csrVal, + const int * csrRowPtr, + const int * csrColInd, + const float * b, + float tol, + int reorder, + float * x, + int * singularity); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrlsvqr( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const double * csrVal, + const int * csrRowPtr, + const int * csrColInd, + const double * b, + double tol, + int reorder, + double * x, + int * singularity); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrlsvqr( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex * csrVal, + const int * csrRowPtr, + const int * csrColInd, + const cuComplex * b, + float tol, + int reorder, + cuComplex * x, + int * singularity); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrlsvqr( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrVal, + const int * csrRowPtr, + const int * csrColInd, + const cuDoubleComplex * b, + double tol, + int reorder, + cuDoubleComplex * x, + int * singularity); + + /* -------- CPU linear solver by QR factorization + * solve A*x = b, A can be singular + * [ls] stands for linear solve + * [v] stands for vector + * [qr] stands for QR factorization + */ + cusolverStatus_t CUSOLVERAPI cusolverSpScsrlsvqrHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const float * b, + float tol, + int reorder, + float * x, + int * singularity); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrlsvqrHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const double * b, + double tol, + int reorder, + double * x, + int * singularity); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrlsvqrHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const cuComplex * b, + float tol, + int reorder, + cuComplex * x, + int * singularity); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrlsvqrHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const cuDoubleComplex * b, + double tol, + int reorder, + cuDoubleComplex * x, + int * singularity); + + /* -------- CPU linear solver by Cholesky factorization + * solve A*x = b, A can be singular + * [ls] stands for linear solve + * [v] stands for vector + * [chol] stands for Cholesky factorization + * + * Only works for symmetric positive definite matrix. + * The upper part of A is ignored. + */ + cusolverStatus_t CUSOLVERAPI cusolverSpScsrlsvcholHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const float * csrVal, + const int * csrRowPtr, + const int * csrColInd, + const float * b, + float tol, + int reorder, + float * x, + int * singularity); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrlsvcholHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const double * csrVal, + const int * csrRowPtr, + const int * csrColInd, + const double * b, + double tol, + int reorder, + double * x, + int * singularity); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrlsvcholHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex * csrVal, + const int * csrRowPtr, + const int * csrColInd, + const cuComplex * b, + float tol, + int reorder, + cuComplex * x, + int * singularity); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrlsvcholHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrVal, + const int * csrRowPtr, + const int * csrColInd, + const cuDoubleComplex * b, + double tol, + int reorder, + cuDoubleComplex * x, + int * singularity); + + /* -------- GPU linear solver by Cholesky factorization + * solve A*x = b, A can be singular + * [ls] stands for linear solve + * [v] stands for vector + * [chol] stands for Cholesky factorization + * + * Only works for symmetric positive definite matrix. + * The upper part of A is ignored. + */ + cusolverStatus_t CUSOLVERAPI cusolverSpScsrlsvchol( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const float * csrVal, + const int * csrRowPtr, + const int * csrColInd, + const float * b, + float tol, + int reorder, + // output + float *x, + int * singularity); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrlsvchol( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const double * csrVal, + const int * csrRowPtr, + const int * csrColInd, + const double * b, + double tol, + int reorder, + // output + double *x, + int * singularity); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrlsvchol( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex * csrVal, + const int * csrRowPtr, + const int * csrColInd, + const cuComplex * b, + float tol, + int reorder, + // output + cuComplex *x, + int * singularity); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrlsvchol( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrVal, + const int * csrRowPtr, + const int * csrColInd, + const cuDoubleComplex * b, + double tol, + int reorder, + // output + cuDoubleComplex *x, + int * singularity); + + /* ----------- CPU least square solver by QR factorization + * solve min|b - A*x| + * [lsq] stands for least square + * [v] stands for vector + * [qr] stands for QR factorization + */ + cusolverStatus_t CUSOLVERAPI cusolverSpScsrlsqvqrHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const float * b, + float tol, + int * rankA, + float * x, + int * p, + float * min_norm); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrlsqvqrHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const double * b, + double tol, + int * rankA, + double * x, + int * p, + double * min_norm); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrlsqvqrHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const cuComplex * b, + float tol, + int * rankA, + cuComplex * x, + int * p, + float * min_norm); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrlsqvqrHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const cuDoubleComplex * b, + double tol, + int * rankA, + cuDoubleComplex * x, + int * p, + double * min_norm); + + /* --------- CPU eigenvalue solver by shift inverse + * solve A*x = lambda * x + * where lambda is the eigenvalue nearest mu0. + * [eig] stands for eigenvalue solver + * [si] stands for shift-inverse + */ + cusolverStatus_t CUSOLVERAPI cusolverSpScsreigvsiHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + float mu0, + const float * x0, + int maxite, + float tol, + float * mu, + float * x); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsreigvsiHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + double mu0, + const double * x0, + int maxite, + double tol, + double * mu, + double * x); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsreigvsiHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + cuComplex mu0, + const cuComplex * x0, + int maxite, + float tol, + cuComplex * mu, + cuComplex * x); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsreigvsiHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + cuDoubleComplex mu0, + const cuDoubleComplex * x0, + int maxite, + double tol, + cuDoubleComplex * mu, + cuDoubleComplex * x); + + /* --------- GPU eigenvalue solver by shift inverse + * solve A*x = lambda * x + * where lambda is the eigenvalue nearest mu0. + * [eig] stands for eigenvalue solver + * [si] stands for shift-inverse + */ + cusolverStatus_t CUSOLVERAPI cusolverSpScsreigvsi( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + float mu0, + const float * x0, + int maxite, + float eps, + float * mu, + float * x); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsreigvsi( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + double mu0, + const double * x0, + int maxite, + double eps, + double * mu, + double * x); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsreigvsi( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + cuComplex mu0, + const cuComplex * x0, + int maxite, + float eps, + cuComplex * mu, + cuComplex * x); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsreigvsi( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + cuDoubleComplex mu0, + const cuDoubleComplex * x0, + int maxite, + double eps, + cuDoubleComplex * mu, + cuDoubleComplex * x); + + // ----------- enclosed eigenvalues + + cusolverStatus_t CUSOLVERAPI cusolverSpScsreigsHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + cuComplex left_bottom_corner, + cuComplex right_upper_corner, + int * num_eigs); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsreigsHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + cuDoubleComplex left_bottom_corner, + cuDoubleComplex right_upper_corner, + int * num_eigs); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsreigsHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + cuComplex left_bottom_corner, + cuComplex right_upper_corner, + int * num_eigs); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsreigsHost( + cusolverSpHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + cuDoubleComplex left_bottom_corner, + cuDoubleComplex right_upper_corner, + int * num_eigs); + + /* --------- CPU symrcm + * Symmetric reverse Cuthill McKee permutation + * + */ + cusolverStatus_t CUSOLVERAPI cusolverSpXcsrsymrcmHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const int * csrRowPtrA, + const int * csrColIndA, + int * p); + + /* --------- CPU symmdq + * Symmetric minimum degree algorithm by quotient graph + * + */ + cusolverStatus_t CUSOLVERAPI cusolverSpXcsrsymmdqHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const int * csrRowPtrA, + const int * csrColIndA, + int * p); + + /* --------- CPU symmdq + * Symmetric Approximate minimum degree algorithm by quotient graph + * + */ + cusolverStatus_t CUSOLVERAPI cusolverSpXcsrsymamdHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const int * csrRowPtrA, + const int * csrColIndA, + int * p); + + /* --------- CPU metis + * symmetric reordering + */ + cusolverStatus_t CUSOLVERAPI cusolverSpXcsrmetisndHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const int * csrRowPtrA, + const int * csrColIndA, + const int64_t * options, + int * p); + + /* --------- CPU zfd + * Zero free diagonal reordering + */ + cusolverStatus_t CUSOLVERAPI cusolverSpScsrzfdHost( + cusolverSpHandle_t handle, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + int * P, + int * numnz); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrzfdHost( + cusolverSpHandle_t handle, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + int * P, + int * numnz); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrzfdHost( + cusolverSpHandle_t handle, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + int * P, + int * numnz); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrzfdHost( + cusolverSpHandle_t handle, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + int * P, + int * numnz); + + /* --------- CPU permuation + * P*A*Q^T + * + */ + cusolverStatus_t CUSOLVERAPI cusolverSpXcsrperm_bufferSizeHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const int * csrRowPtrA, + const int * csrColIndA, + const int * p, + const int * q, + size_t * bufferSizeInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpXcsrpermHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + int * csrRowPtrA, + int * csrColIndA, + const int * p, + const int * q, + int * map, + void * pBuffer); + + /* + * Low-level API: Batched QR + * + */ + + cusolverStatus_t CUSOLVERAPI cusolverSpCreateCsrqrInfo(csrqrInfo_t *info); + + cusolverStatus_t CUSOLVERAPI cusolverSpDestroyCsrqrInfo(csrqrInfo_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpXcsrqrAnalysisBatched( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const int * csrRowPtrA, + const int * csrColIndA, + csrqrInfo_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrBufferInfoBatched( + cusolverSpHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const float * csrVal, + const int * csrRowPtr, + const int * csrColInd, + int batchSize, + csrqrInfo_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrBufferInfoBatched( + cusolverSpHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const double * csrVal, + const int * csrRowPtr, + const int * csrColInd, + int batchSize, + csrqrInfo_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrBufferInfoBatched( + cusolverSpHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex * csrVal, + const int * csrRowPtr, + const int * csrColInd, + int batchSize, + csrqrInfo_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrBufferInfoBatched( + cusolverSpHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrVal, + const int * csrRowPtr, + const int * csrColInd, + int batchSize, + csrqrInfo_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrsvBatched( + cusolverSpHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const float * b, + float * x, + int batchSize, + csrqrInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrsvBatched( + cusolverSpHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const double * b, + double * x, + int batchSize, + csrqrInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrsvBatched( + cusolverSpHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const cuComplex * b, + cuComplex * x, + int batchSize, + csrqrInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrsvBatched( + cusolverSpHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + const cuDoubleComplex * b, + cuDoubleComplex * x, + int batchSize, + csrqrInfo_t info, + void * pBuffer); + + #if defined(__cplusplus) +} + #endif /* __cplusplus */ + +#endif // define CUSOLVERSP_H_ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolverSp_LOWLEVEL_PREVIEW.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolverSp_LOWLEVEL_PREVIEW.h new file mode 100644 index 0000000000000000000000000000000000000000..e660bb87ea5d89cc1d430dce6c50df006d796809 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolverSp_LOWLEVEL_PREVIEW.h @@ -0,0 +1,1107 @@ +/* + * Copyright 2015 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(CUSOLVERSP_LOWLEVEL_PREVIEW_H_) + #define CUSOLVERSP_LOWLEVEL_PREVIEW_H_ + + #include "cusolverSp.h" + + #if defined(__cplusplus) +extern "C" { + #endif /* __cplusplus */ + + struct csrluInfoHost; + typedef struct csrluInfoHost *csrluInfoHost_t; + + struct csrqrInfoHost; + typedef struct csrqrInfoHost *csrqrInfoHost_t; + + struct csrcholInfoHost; + typedef struct csrcholInfoHost *csrcholInfoHost_t; + + struct csrcholInfo; + typedef struct csrcholInfo *csrcholInfo_t; + + /* + * Low level API for CPU LU + * + */ + cusolverStatus_t CUSOLVERAPI + cusolverSpCreateCsrluInfoHost(csrluInfoHost_t *info); + + cusolverStatus_t CUSOLVERAPI + cusolverSpDestroyCsrluInfoHost(csrluInfoHost_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpXcsrluAnalysisHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const int * csrRowPtrA, + const int * csrColIndA, + csrluInfoHost_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrluBufferInfoHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrluInfoHost_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrluBufferInfoHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrluInfoHost_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrluBufferInfoHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrluInfoHost_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrluBufferInfoHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrluInfoHost_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrluFactorHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrluInfoHost_t info, + float pivot_threshold, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrluFactorHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrluInfoHost_t info, + double pivot_threshold, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrluFactorHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrluInfoHost_t info, + float pivot_threshold, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrluFactorHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrluInfoHost_t info, + double pivot_threshold, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrluZeroPivotHost( + cusolverSpHandle_t handle, + csrluInfoHost_t info, + float tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrluZeroPivotHost( + cusolverSpHandle_t handle, + csrluInfoHost_t info, + double tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrluZeroPivotHost( + cusolverSpHandle_t handle, + csrluInfoHost_t info, + float tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrluZeroPivotHost( + cusolverSpHandle_t handle, + csrluInfoHost_t info, + double tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrluSolveHost( + cusolverSpHandle_t handle, + int n, + const float * b, + float * x, + csrluInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrluSolveHost( + cusolverSpHandle_t handle, + int n, + const double * b, + double * x, + csrluInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrluSolveHost( + cusolverSpHandle_t handle, + int n, + const cuComplex * b, + cuComplex * x, + csrluInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrluSolveHost( + cusolverSpHandle_t handle, + int n, + const cuDoubleComplex *b, + cuDoubleComplex * x, + csrluInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpXcsrluNnzHost( + cusolverSpHandle_t handle, + int * nnzLRef, + int * nnzURef, + csrluInfoHost_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrluExtractHost( + cusolverSpHandle_t handle, + int * P, + int * Q, + const cusparseMatDescr_t descrL, + float * csrValL, + int * csrRowPtrL, + int * csrColIndL, + const cusparseMatDescr_t descrU, + float * csrValU, + int * csrRowPtrU, + int * csrColIndU, + csrluInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrluExtractHost( + cusolverSpHandle_t handle, + int * P, + int * Q, + const cusparseMatDescr_t descrL, + double * csrValL, + int * csrRowPtrL, + int * csrColIndL, + const cusparseMatDescr_t descrU, + double * csrValU, + int * csrRowPtrU, + int * csrColIndU, + csrluInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrluExtractHost( + cusolverSpHandle_t handle, + int * P, + int * Q, + const cusparseMatDescr_t descrL, + cuComplex * csrValL, + int * csrRowPtrL, + int * csrColIndL, + const cusparseMatDescr_t descrU, + cuComplex * csrValU, + int * csrRowPtrU, + int * csrColIndU, + csrluInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrluExtractHost( + cusolverSpHandle_t handle, + int * P, + int * Q, + const cusparseMatDescr_t descrL, + cuDoubleComplex * csrValL, + int * csrRowPtrL, + int * csrColIndL, + const cusparseMatDescr_t descrU, + cuDoubleComplex * csrValU, + int * csrRowPtrU, + int * csrColIndU, + csrluInfoHost_t info, + void * pBuffer); + + /* + * Low level API for CPU QR + * + */ + cusolverStatus_t CUSOLVERAPI + cusolverSpCreateCsrqrInfoHost(csrqrInfoHost_t *info); + + cusolverStatus_t CUSOLVERAPI + cusolverSpDestroyCsrqrInfoHost(csrqrInfoHost_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpXcsrqrAnalysisHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const int * csrRowPtrA, + const int * csrColIndA, + csrqrInfoHost_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrBufferInfoHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrqrInfoHost_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrBufferInfoHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrqrInfoHost_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrBufferInfoHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrqrInfoHost_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrBufferInfoHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrqrInfoHost_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrSetupHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + float mu, + csrqrInfoHost_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrSetupHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + double mu, + csrqrInfoHost_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrSetupHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + cuComplex mu, + csrqrInfoHost_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrSetupHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + cuDoubleComplex mu, + csrqrInfoHost_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrFactorHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + float * b, + float * x, + csrqrInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrFactorHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + double * b, + double * x, + csrqrInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrFactorHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + cuComplex * b, + cuComplex * x, + csrqrInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrFactorHost( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + cuDoubleComplex * b, + cuDoubleComplex * x, + csrqrInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrZeroPivotHost( + cusolverSpHandle_t handle, + csrqrInfoHost_t info, + float tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrZeroPivotHost( + cusolverSpHandle_t handle, + csrqrInfoHost_t info, + double tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrZeroPivotHost( + cusolverSpHandle_t handle, + csrqrInfoHost_t info, + float tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrZeroPivotHost( + cusolverSpHandle_t handle, + csrqrInfoHost_t info, + double tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrSolveHost( + cusolverSpHandle_t handle, + int m, + int n, + float * b, + float * x, + csrqrInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrSolveHost( + cusolverSpHandle_t handle, + int m, + int n, + double * b, + double * x, + csrqrInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrSolveHost( + cusolverSpHandle_t handle, + int m, + int n, + cuComplex * b, + cuComplex * x, + csrqrInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrSolveHost( + cusolverSpHandle_t handle, + int m, + int n, + cuDoubleComplex * b, + cuDoubleComplex * x, + csrqrInfoHost_t info, + void * pBuffer); + + /* + * Low level API for GPU QR + * + */ + cusolverStatus_t CUSOLVERAPI cusolverSpXcsrqrAnalysis( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const int * csrRowPtrA, + const int * csrColIndA, + csrqrInfo_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrBufferInfo( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrqrInfo_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrBufferInfo( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrqrInfo_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrBufferInfo( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrqrInfo_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrBufferInfo( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrqrInfo_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrSetup( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + float mu, + csrqrInfo_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrSetup( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + double mu, + csrqrInfo_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrSetup( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + cuComplex mu, + csrqrInfo_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrSetup( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + cuDoubleComplex mu, + csrqrInfo_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrFactor( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + float * b, + float * x, + csrqrInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrFactor( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + double * b, + double * x, + csrqrInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrFactor( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + cuComplex * b, + cuComplex * x, + csrqrInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrFactor( + cusolverSpHandle_t handle, + int m, + int n, + int nnzA, + cuDoubleComplex * b, + cuDoubleComplex * x, + csrqrInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrZeroPivot( + cusolverSpHandle_t handle, + csrqrInfo_t info, + float tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrZeroPivot( + cusolverSpHandle_t handle, + csrqrInfo_t info, + double tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrZeroPivot( + cusolverSpHandle_t handle, + csrqrInfo_t info, + float tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrZeroPivot( + cusolverSpHandle_t handle, + csrqrInfo_t info, + double tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrqrSolve( + cusolverSpHandle_t handle, + int m, + int n, + float * b, + float * x, + csrqrInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrqrSolve( + cusolverSpHandle_t handle, + int m, + int n, + double * b, + double * x, + csrqrInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrqrSolve( + cusolverSpHandle_t handle, + int m, + int n, + cuComplex * b, + cuComplex * x, + csrqrInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrqrSolve( + cusolverSpHandle_t handle, + int m, + int n, + cuDoubleComplex * b, + cuDoubleComplex * x, + csrqrInfo_t info, + void * pBuffer); + + /* + * Low level API for CPU Cholesky + * + */ + cusolverStatus_t CUSOLVERAPI + cusolverSpCreateCsrcholInfoHost(csrcholInfoHost_t *info); + + cusolverStatus_t CUSOLVERAPI + cusolverSpDestroyCsrcholInfoHost(csrcholInfoHost_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpXcsrcholAnalysisHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfoHost_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholBufferInfoHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfoHost_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholBufferInfoHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfoHost_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholBufferInfoHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfoHost_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholBufferInfoHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfoHost_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholFactorHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholFactorHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholFactorHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholFactorHost( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholZeroPivotHost( + cusolverSpHandle_t handle, + csrcholInfoHost_t info, + float tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholZeroPivotHost( + cusolverSpHandle_t handle, + csrcholInfoHost_t info, + double tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholZeroPivotHost( + cusolverSpHandle_t handle, + csrcholInfoHost_t info, + float tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholZeroPivotHost( + cusolverSpHandle_t handle, + csrcholInfoHost_t info, + double tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholSolveHost( + cusolverSpHandle_t handle, + int n, + const float * b, + float * x, + csrcholInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholSolveHost( + cusolverSpHandle_t handle, + int n, + const double * b, + double * x, + csrcholInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholSolveHost( + cusolverSpHandle_t handle, + int n, + const cuComplex * b, + cuComplex * x, + csrcholInfoHost_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholSolveHost( + cusolverSpHandle_t handle, + int n, + const cuDoubleComplex *b, + cuDoubleComplex * x, + csrcholInfoHost_t info, + void * pBuffer); + + /* + * Low level API for GPU Cholesky + * + */ + cusolverStatus_t CUSOLVERAPI cusolverSpCreateCsrcholInfo(csrcholInfo_t *info); + + cusolverStatus_t CUSOLVERAPI cusolverSpDestroyCsrcholInfo(csrcholInfo_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpXcsrcholAnalysis( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfo_t info); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholBufferInfo( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfo_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholBufferInfo( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfo_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholBufferInfo( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfo_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholBufferInfo( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfo_t info, + size_t * internalDataInBytes, + size_t * workspaceInBytes); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholFactor( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholFactor( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholFactor( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholFactor( + cusolverSpHandle_t handle, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrcholInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholZeroPivot( + cusolverSpHandle_t handle, + csrcholInfo_t info, + float tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholZeroPivot( + cusolverSpHandle_t handle, + csrcholInfo_t info, + double tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholZeroPivot( + cusolverSpHandle_t handle, + csrcholInfo_t info, + float tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholZeroPivot( + cusolverSpHandle_t handle, + csrcholInfo_t info, + double tol, + int * position); + + cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholSolve( + cusolverSpHandle_t handle, + int n, + const float * b, + float * x, + csrcholInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholSolve( + cusolverSpHandle_t handle, + int n, + const double * b, + double * x, + csrcholInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholSolve( + cusolverSpHandle_t handle, + int n, + const cuComplex * b, + cuComplex * x, + csrcholInfo_t info, + void * pBuffer); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholSolve( + cusolverSpHandle_t handle, + int n, + const cuDoubleComplex *b, + cuDoubleComplex * x, + csrcholInfo_t info, + void * pBuffer); + + /* + * "diag" is a device array of size N. + * cusolverSpcsrcholDiag returns diag(L) to "diag" where A(P,P) = L*L**T + * "diag" can estimate det(A) because det(A(P,P)) = det(A) = det(L)^2 if A = + * L*L**T. + * + * cusolverSpcsrcholDiag must be called after cusolverSpcsrcholFactor. + * otherwise "diag" is wrong. + */ + cusolverStatus_t CUSOLVERAPI cusolverSpScsrcholDiag( + cusolverSpHandle_t handle, + csrcholInfo_t info, + float * diag); + + cusolverStatus_t CUSOLVERAPI cusolverSpDcsrcholDiag( + cusolverSpHandle_t handle, + csrcholInfo_t info, + double * diag); + + cusolverStatus_t CUSOLVERAPI cusolverSpCcsrcholDiag( + cusolverSpHandle_t handle, + csrcholInfo_t info, + float * diag); + + cusolverStatus_t CUSOLVERAPI cusolverSpZcsrcholDiag( + cusolverSpHandle_t handle, + csrcholInfo_t info, + double * diag); + + #if defined(__cplusplus) +} + #endif /* __cplusplus */ + +#endif // CUSOLVERSP_LOWLEVEL_PREVIEW_H_ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolver_common.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolver_common.h new file mode 100644 index 0000000000000000000000000000000000000000..13762a357dd2470efc03a8f373299020b84c6091 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/include/cusolver_common.h @@ -0,0 +1,261 @@ +/* + * Copyright 2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(CUSOLVER_COMMON_H_) + #define CUSOLVER_COMMON_H_ + + #include "library_types.h" + + #ifndef CUSOLVERAPI + #ifdef _WIN32 + #define CUSOLVERAPI __stdcall + #else + #define CUSOLVERAPI + #endif + #endif + + #if defined(_MSC_VER) +typedef __int64 int64_t; + #else + #include + #endif + +typedef int cusolver_int_t; + + #define CUSOLVER_VER_MAJOR 11 + #define CUSOLVER_VER_MINOR 7 + #define CUSOLVER_VER_PATCH 1 + #define CUSOLVER_VER_BUILD 2 + #define CUSOLVER_VERSION \ + (CUSOLVER_VER_MAJOR * 1000 + CUSOLVER_VER_MINOR * 100 + CUSOLVER_VER_PATCH) + +//------------------------------------------------------------------------------ + + #if !defined(_MSC_VER) + #define CUSOLVER_CPP_VERSION __cplusplus + #elif _MSC_FULL_VER >= 190024210 // Visual Studio 2015 Update 3 + #define CUSOLVER_CPP_VERSION _MSVC_LANG + #else + #define CUSOLVER_CPP_VERSION 0 + #endif + +//------------------------------------------------------------------------------ + + #if !defined(DISABLE_CUSOLVER_DEPRECATED) + + #if CUSOLVER_CPP_VERSION >= 201402L + + #define CUSOLVER_DEPRECATED(new_func) \ + [[deprecated("please use " #new_func " instead")]] + + #elif defined(_MSC_VER) + + #define CUSOLVER_DEPRECATED(new_func) \ + __declspec(deprecated("please use " #new_func " instead")) + + #elif defined(__INTEL_COMPILER) || defined(__clang__) || \ + (defined(__GNUC__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))) + + #define CUSOLVER_DEPRECATED(new_func) \ + __attribute__((deprecated("please use " #new_func " instead"))) + + #elif defined(__GNUC__) || defined(__xlc__) + + #define CUSOLVER_DEPRECATED(new_func) __attribute__((deprecated)) + + #else + + #define CUSOLVER_DEPRECATED(new_func) + + #endif // defined(__cplusplus) && __cplusplus >= 201402L + //------------------------------------------------------------------------------ + + #if CUSOLVER_CPP_VERSION >= 201703L + + #define CUSOLVER_DEPRECATED_ENUM(new_enum) \ + [[deprecated("please use " #new_enum " instead")]] + + #elif defined(__clang__) || \ + (defined(__GNUC__) && __GNUC__ >= 6 && !defined(__PGI)) + + #define CUSOLVER_DEPRECATED_ENUM(new_enum) \ + __attribute__((deprecated("please use " #new_enum " instead"))) + + #else + + #define CUSOLVER_DEPRECATED_ENUM(new_enum) + + #endif // defined(__cplusplus) && __cplusplus >= 201402L + + #else // defined(DISABLE_CUSOLVER_DEPRECATED) + + #define CUSOLVER_DEPRECATED(new_func) + #define CUSOLVER_DEPRECATED_ENUM(new_enum) + + #endif // !defined(DISABLE_CUSOLVER_DEPRECATED) + + #undef CUSOLVER_CPP_VERSION + + #if defined(__cplusplus) +extern "C" { + #endif /* __cplusplus */ + + typedef enum { + CUSOLVER_STATUS_SUCCESS = 0, + CUSOLVER_STATUS_NOT_INITIALIZED = 1, + CUSOLVER_STATUS_ALLOC_FAILED = 2, + CUSOLVER_STATUS_INVALID_VALUE = 3, + CUSOLVER_STATUS_ARCH_MISMATCH = 4, + CUSOLVER_STATUS_MAPPING_ERROR = 5, + CUSOLVER_STATUS_EXECUTION_FAILED = 6, + CUSOLVER_STATUS_INTERNAL_ERROR = 7, + CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED = 8, + CUSOLVER_STATUS_NOT_SUPPORTED = 9, + CUSOLVER_STATUS_ZERO_PIVOT = 10, + CUSOLVER_STATUS_INVALID_LICENSE = 11, + CUSOLVER_STATUS_IRS_PARAMS_NOT_INITIALIZED = 12, + CUSOLVER_STATUS_IRS_PARAMS_INVALID = 13, + CUSOLVER_STATUS_IRS_PARAMS_INVALID_PREC = 14, + CUSOLVER_STATUS_IRS_PARAMS_INVALID_REFINE = 15, + CUSOLVER_STATUS_IRS_PARAMS_INVALID_MAXITER = 16, + CUSOLVER_STATUS_IRS_INTERNAL_ERROR = 20, + CUSOLVER_STATUS_IRS_NOT_SUPPORTED = 21, + CUSOLVER_STATUS_IRS_OUT_OF_RANGE = 22, + CUSOLVER_STATUS_IRS_NRHS_NOT_SUPPORTED_FOR_REFINE_GMRES = 23, + CUSOLVER_STATUS_IRS_INFOS_NOT_INITIALIZED = 25, + CUSOLVER_STATUS_IRS_INFOS_NOT_DESTROYED = 26, + CUSOLVER_STATUS_IRS_MATRIX_SINGULAR = 30, + CUSOLVER_STATUS_INVALID_WORKSPACE = 31 + } cusolverStatus_t; + + typedef enum { + CUSOLVER_EIG_TYPE_1 = 1, + CUSOLVER_EIG_TYPE_2 = 2, + CUSOLVER_EIG_TYPE_3 = 3 + } cusolverEigType_t; + + typedef enum { + CUSOLVER_EIG_MODE_NOVECTOR = 0, + CUSOLVER_EIG_MODE_VECTOR = 1 + } cusolverEigMode_t; + + typedef enum { + CUSOLVER_EIG_RANGE_ALL = 1001, + CUSOLVER_EIG_RANGE_I = 1002, + CUSOLVER_EIG_RANGE_V = 1003, + } cusolverEigRange_t; + + typedef enum { + CUSOLVER_INF_NORM = 104, + CUSOLVER_MAX_NORM = 105, + CUSOLVER_ONE_NORM = 106, + CUSOLVER_FRO_NORM = 107, + } cusolverNorm_t; + + typedef enum { + CUSOLVER_IRS_REFINE_NOT_SET = 1100, + CUSOLVER_IRS_REFINE_NONE = 1101, + CUSOLVER_IRS_REFINE_CLASSICAL = 1102, + CUSOLVER_IRS_REFINE_CLASSICAL_GMRES = 1103, + CUSOLVER_IRS_REFINE_GMRES = 1104, + CUSOLVER_IRS_REFINE_GMRES_GMRES = 1105, + CUSOLVER_IRS_REFINE_GMRES_NOPCOND = 1106, + + CUSOLVER_PREC_DD = 1150, + CUSOLVER_PREC_SS = 1151, + CUSOLVER_PREC_SHT = 1152, + + } cusolverIRSRefinement_t; + + typedef enum { + CUSOLVER_R_8I = 1201, + CUSOLVER_R_8U = 1202, + CUSOLVER_R_64F = 1203, + CUSOLVER_R_32F = 1204, + CUSOLVER_R_16F = 1205, + CUSOLVER_R_16BF = 1206, + CUSOLVER_R_TF32 = 1207, + CUSOLVER_R_AP = 1208, + CUSOLVER_C_8I = 1211, + CUSOLVER_C_8U = 1212, + CUSOLVER_C_64F = 1213, + CUSOLVER_C_32F = 1214, + CUSOLVER_C_16F = 1215, + CUSOLVER_C_16BF = 1216, + CUSOLVER_C_TF32 = 1217, + CUSOLVER_C_AP = 1218, + } cusolverPrecType_t; + + typedef enum { + CUSOLVER_ALG_0 = 0, /* default algorithm */ + CUSOLVER_ALG_1 = 1, + CUSOLVER_ALG_2 = 2 + } cusolverAlgMode_t; + + typedef enum { + CUBLAS_STOREV_COLUMNWISE = 0, + CUBLAS_STOREV_ROWWISE = 1 + } cusolverStorevMode_t; + + typedef enum { + CUBLAS_DIRECT_FORWARD = 0, + CUBLAS_DIRECT_BACKWARD = 1 + } cusolverDirectMode_t; + + cusolverStatus_t CUSOLVERAPI + cusolverGetProperty(libraryPropertyType type, int *value); + + cusolverStatus_t CUSOLVERAPI cusolverGetVersion(int *version); + + #if defined(__cplusplus) +} + #endif /* __cplusplus */ + +#endif // CUSOLVER_COMMON_H_ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/lib/__init__.py b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/lib/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/lib/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d69ba6d9e7dac0b5cb351fff27a594983d97caf9 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/lib/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/lib/libcusolver.so.11 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/lib/libcusolver.so.11 new file mode 100644 index 0000000000000000000000000000000000000000..2726df605733c64fa35c59c8873cc7657f99a2a0 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/lib/libcusolver.so.11 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56b5fe97ad73b016206e5f940f108c938b87ed8ed63416c75ac3ac5c71f1fb8a +size 153126304 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/lib/libcusolverMg.so.11 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/lib/libcusolverMg.so.11 new file mode 100644 index 0000000000000000000000000000000000000000..cce74030a341af2ad657a098c298d11c8bc176e7 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/lib/libcusolverMg.so.11 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe476eece4d00e501e839d9b6fad574219b98df1300b6b9866af0db1c6ad2577 +size 90190968 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ffd63d050eb70c2d010253476f5cf53c9284e04 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/include/__init__.py b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/include/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/include/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9477fe9ba706f5676b88b798404283749150c22d Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/include/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/include/cusparse.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/include/cusparse.h new file mode 100644 index 0000000000000000000000000000000000000000..53220a0c3845cc8b8099fcd8908a16f89403b38e --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/include/cusparse.h @@ -0,0 +1,6107 @@ +/* + * Copyright 1993-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ +#if !defined(CUSPARSE_H_) +#define CUSPARSE_H_ + +#include // cuComplex +#include // cudaStream_t +#include // CUDA_R_32F +#include // int64_t +#include // FILE* + +#if defined(__cplusplus) +# include // __half +#endif // defined(__cplusplus) + +//############################################################################## +//# CUSPARSE VERSION INFORMATION +//############################################################################## + +#define CUSPARSE_VER_MAJOR 12 +#define CUSPARSE_VER_MINOR 5 +#define CUSPARSE_VER_PATCH 4 +#define CUSPARSE_VER_BUILD 2 +#define CUSPARSE_VERSION (CUSPARSE_VER_MAJOR * 1000 + \ + CUSPARSE_VER_MINOR * 100 + \ + CUSPARSE_VER_PATCH) + +// ############################################################################# +// # BASIC MACROS +// ############################################################################# + +#if !defined(CUSPARSEAPI) +# if defined(_WIN32) +# define CUSPARSEAPI __stdcall +# else +# define CUSPARSEAPI +# endif +#endif + +//------------------------------------------------------------------------------ + +#if !defined(_MSC_VER) +# define CUSPARSE_CPP_VERSION __cplusplus +#elif _MSC_FULL_VER >= 190024210 // Visual Studio 2015 Update 3 +# define CUSPARSE_CPP_VERSION _MSVC_LANG +#else +# define CUSPARSE_CPP_VERSION 0 +#endif + +// ############################################################################# +// # CUSPARSE_DEPRECATED MACRO +// ############################################################################# + +#if !defined(DISABLE_CUSPARSE_DEPRECATED) + +# if CUSPARSE_CPP_VERSION >= 201402L + +# define CUSPARSE_DEPRECATED_REPLACE_WITH(new_func) \ + [[deprecated("please use " #new_func " instead")]] + +# define CUSPARSE_DEPRECATED \ + [[deprecated("The routine will be removed in the next major release")]] + +# define CUSPARSE_DEPRECATED_TYPE \ + [[deprecated("The type will be removed in the next major release")]] + +# define CUSPARSE_DEPRECATED_TYPE_MSVC + +# elif defined(_MSC_VER) + +# define CUSPARSE_DEPRECATED_REPLACE_WITH(new_func) \ + __declspec(deprecated("please use " #new_func " instead")) + +# define CUSPARSE_DEPRECATED \ + __declspec(deprecated( \ + "The routine will be removed in the next major release")) + +# define CUSPARSE_DEPRECATED_TYPE + +# define CUSPARSE_DEPRECATED_TYPE_MSVC + __declspec(deprecated( \ + "The type will be removed in the next major release")) + +# elif defined(__INTEL_COMPILER) || defined(__clang__) || \ + (defined(__GNUC__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))) + +# define CUSPARSE_DEPRECATED_REPLACE_WITH(new_func) \ + __attribute__((deprecated("please use " #new_func " instead"))) + +# define CUSPARSE_DEPRECATED \ + __attribute__((deprecated( \ + "The routine will be removed in the next major release"))) + +# define CUSPARSE_DEPRECATED_TYPE \ + __attribute__((deprecated( \ + "The type will be removed in the next major release"))) + +# define CUSPARSE_DEPRECATED_TYPE_MSVC + +# elif defined(__GNUC__) + +# define CUSPARSE_DEPRECATED_REPLACE_WITH(new_func) \ + __attribute__((deprecated)) + +# define CUSPARSE_DEPRECATED __attribute__((deprecated)) +# define CUSPARSE_DEPRECATED_TYPE __attribute__((deprecated)) +# define CUSPARSE_DEPRECATED_TYPE_MSVC + +# else + +# define CUSPARSE_DEPRECATED_REPLACE_WITH(new_func) +# define CUSPARSE_DEPRECATED +# define CUSPARSE_DEPRECATED_TYPE +# define CUSPARSE_DEPRECATED_TYPE_MSVC + +# endif // defined(__cplusplus) && __cplusplus >= 201402L +//------------------------------------------------------------------------------ + +# if CUSPARSE_CPP_VERSION >= 201703L + +# define CUSPARSE_DEPRECATED_ENUM_REPLACE_WITH(new_enum) \ + [[deprecated("please use " #new_enum " instead")]] + +# define CUSPARSE_DEPRECATED_ENUM \ + [[deprecated("The enum will be removed in the next major release")]] + +# elif defined(__clang__) || \ + (defined(__GNUC__) && __GNUC__ >= 6 && !defined(__PGI)) + +# define CUSPARSE_DEPRECATED_ENUM_REPLACE_WITH(new_enum) \ + __attribute__((deprecated("please use " #new_enum " instead"))) + +# define CUSPARSE_DEPRECATED_ENUM \ + __attribute__((deprecated( \ + "The enum will be removed in the next major release"))) + +# else + +# define CUSPARSE_DEPRECATED_ENUM_REPLACE_WITH(new_enum) +# define CUSPARSE_DEPRECATED_ENUM + +# endif // defined(__cplusplus) && __cplusplus >= 201402L + +#else // defined(DISABLE_CUSPARSE_DEPRECATED) + +# define CUSPARSE_DEPRECATED_REPLACE_WITH(new_func) +# define CUSPARSE_DEPRECATED +# define CUSPARSE_DEPRECATED_TYPE +# define CUSPARSE_DEPRECATED_TYPE_MSVC +# define CUSPARSE_DEPRECATED_ENUM_REPLACE_WITH(new_enum) +# define CUSPARSE_DEPRECATED_ENUM + +#endif // !defined(DISABLE_CUSPARSE_DEPRECATED) + +#undef CUSPARSE_CPP_VERSION + +//------------------------------------------------------------------------------ + +#if defined(__cplusplus) +extern "C" { +#endif // defined(__cplusplus) + +//############################################################################## +//# OPAQUE DATA STRUCTURES +//############################################################################## + +struct cusparseContext; +typedef struct cusparseContext* cusparseHandle_t; + +struct cusparseMatDescr; +typedef struct cusparseMatDescr* cusparseMatDescr_t; + +struct bsrsv2Info; +typedef CUSPARSE_DEPRECATED_TYPE_MSVC +struct bsrsv2Info* bsrsv2Info_t CUSPARSE_DEPRECATED_TYPE; + +struct bsrsm2Info; +typedef CUSPARSE_DEPRECATED_TYPE_MSVC +struct bsrsm2Info* bsrsm2Info_t CUSPARSE_DEPRECATED_TYPE; + +struct csric02Info; +typedef CUSPARSE_DEPRECATED_TYPE_MSVC +struct csric02Info* csric02Info_t CUSPARSE_DEPRECATED_TYPE; + +struct bsric02Info; +typedef CUSPARSE_DEPRECATED_TYPE_MSVC +struct bsric02Info* bsric02Info_t CUSPARSE_DEPRECATED_TYPE; + +struct csrilu02Info; +typedef CUSPARSE_DEPRECATED_TYPE_MSVC +struct csrilu02Info* csrilu02Info_t CUSPARSE_DEPRECATED_TYPE; + +struct bsrilu02Info; +typedef CUSPARSE_DEPRECATED_TYPE_MSVC +struct bsrilu02Info* bsrilu02Info_t CUSPARSE_DEPRECATED_TYPE; + +struct csru2csrInfo; +typedef CUSPARSE_DEPRECATED_TYPE_MSVC +struct csru2csrInfo* csru2csrInfo_t CUSPARSE_DEPRECATED_TYPE; + +struct cusparseColorInfo; +typedef CUSPARSE_DEPRECATED_TYPE_MSVC +struct cusparseColorInfo* cusparseColorInfo_t CUSPARSE_DEPRECATED_TYPE; + +struct pruneInfo; +typedef CUSPARSE_DEPRECATED_TYPE_MSVC +struct pruneInfo* pruneInfo_t CUSPARSE_DEPRECATED_TYPE; + +//############################################################################## +//# ENUMERATORS +//############################################################################## + +typedef enum { + CUSPARSE_STATUS_SUCCESS = 0, + CUSPARSE_STATUS_NOT_INITIALIZED = 1, + CUSPARSE_STATUS_ALLOC_FAILED = 2, + CUSPARSE_STATUS_INVALID_VALUE = 3, + CUSPARSE_STATUS_ARCH_MISMATCH = 4, + CUSPARSE_STATUS_MAPPING_ERROR = 5, + CUSPARSE_STATUS_EXECUTION_FAILED = 6, + CUSPARSE_STATUS_INTERNAL_ERROR = 7, + CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED = 8, + CUSPARSE_STATUS_ZERO_PIVOT = 9, + CUSPARSE_STATUS_NOT_SUPPORTED = 10, + CUSPARSE_STATUS_INSUFFICIENT_RESOURCES = 11 +} cusparseStatus_t; + +typedef enum { + CUSPARSE_POINTER_MODE_HOST = 0, + CUSPARSE_POINTER_MODE_DEVICE = 1 +} cusparsePointerMode_t; + +typedef enum { + CUSPARSE_ACTION_SYMBOLIC = 0, + CUSPARSE_ACTION_NUMERIC = 1 +} cusparseAction_t; + +typedef enum { + CUSPARSE_MATRIX_TYPE_GENERAL = 0, + CUSPARSE_MATRIX_TYPE_SYMMETRIC = 1, + CUSPARSE_MATRIX_TYPE_HERMITIAN = 2, + CUSPARSE_MATRIX_TYPE_TRIANGULAR = 3 +} cusparseMatrixType_t; + +typedef enum { + CUSPARSE_FILL_MODE_LOWER = 0, + CUSPARSE_FILL_MODE_UPPER = 1 +} cusparseFillMode_t; + +typedef enum { + CUSPARSE_DIAG_TYPE_NON_UNIT = 0, + CUSPARSE_DIAG_TYPE_UNIT = 1 +} cusparseDiagType_t; + +typedef enum { + CUSPARSE_INDEX_BASE_ZERO = 0, + CUSPARSE_INDEX_BASE_ONE = 1 +} cusparseIndexBase_t; + +typedef enum { + CUSPARSE_OPERATION_NON_TRANSPOSE = 0, + CUSPARSE_OPERATION_TRANSPOSE = 1, + CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE = 2 +} cusparseOperation_t; + +typedef enum { + CUSPARSE_DIRECTION_ROW = 0, + CUSPARSE_DIRECTION_COLUMN = 1 +} cusparseDirection_t; + +typedef enum { + CUSPARSE_SOLVE_POLICY_NO_LEVEL = 0, + CUSPARSE_SOLVE_POLICY_USE_LEVEL = 1 +} cusparseSolvePolicy_t CUSPARSE_DEPRECATED_TYPE; + +typedef enum { + CUSPARSE_COLOR_ALG0 = 0, // default + CUSPARSE_COLOR_ALG1 = 1 +} cusparseColorAlg_t CUSPARSE_DEPRECATED_TYPE; + +//############################################################################## +//# INITIALIZATION AND MANAGEMENT ROUTINES +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseCreate(cusparseHandle_t* handle); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroy(cusparseHandle_t handle); + +cusparseStatus_t CUSPARSEAPI +cusparseGetVersion(cusparseHandle_t handle, + int* version); + +cusparseStatus_t CUSPARSEAPI +cusparseGetProperty(libraryPropertyType type, + int* value); + +const char* CUSPARSEAPI +cusparseGetErrorName(cusparseStatus_t status); + +const char* CUSPARSEAPI +cusparseGetErrorString(cusparseStatus_t status); + +cusparseStatus_t CUSPARSEAPI +cusparseSetStream(cusparseHandle_t handle, + cudaStream_t streamId); + +cusparseStatus_t CUSPARSEAPI +cusparseGetStream(cusparseHandle_t handle, + cudaStream_t* streamId); + +cusparseStatus_t CUSPARSEAPI +cusparseGetPointerMode(cusparseHandle_t handle, + cusparsePointerMode_t* mode); + +cusparseStatus_t CUSPARSEAPI +cusparseSetPointerMode(cusparseHandle_t handle, + cusparsePointerMode_t mode); + +//############################################################################## +//# LOGGING APIs +//############################################################################## + +typedef void (*cusparseLoggerCallback_t)(int logLevel, + const char* functionName, + const char* message); + +cusparseStatus_t CUSPARSEAPI +cusparseLoggerSetCallback(cusparseLoggerCallback_t callback); + +cusparseStatus_t CUSPARSEAPI +cusparseLoggerSetFile(FILE* file); + +cusparseStatus_t CUSPARSEAPI +cusparseLoggerOpenFile(const char* logFile); + +cusparseStatus_t CUSPARSEAPI +cusparseLoggerSetLevel(int level); + +cusparseStatus_t CUSPARSEAPI +cusparseLoggerSetMask(int mask); + +cusparseStatus_t CUSPARSEAPI +cusparseLoggerForceDisable(void); + +//############################################################################## +//# HELPER ROUTINES +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseCreateMatDescr(cusparseMatDescr_t* descrA); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroyMatDescr(cusparseMatDescr_t descrA); + +cusparseStatus_t CUSPARSEAPI +cusparseSetMatType(cusparseMatDescr_t descrA, + cusparseMatrixType_t type); + +cusparseMatrixType_t CUSPARSEAPI +cusparseGetMatType(const cusparseMatDescr_t descrA); + +cusparseStatus_t CUSPARSEAPI +cusparseSetMatFillMode(cusparseMatDescr_t descrA, + cusparseFillMode_t fillMode); + +cusparseFillMode_t CUSPARSEAPI +cusparseGetMatFillMode(const cusparseMatDescr_t descrA); + +cusparseStatus_t CUSPARSEAPI +cusparseSetMatDiagType(cusparseMatDescr_t descrA, + cusparseDiagType_t diagType); + +cusparseDiagType_t CUSPARSEAPI +cusparseGetMatDiagType(const cusparseMatDescr_t descrA); + +cusparseStatus_t CUSPARSEAPI +cusparseSetMatIndexBase(cusparseMatDescr_t descrA, + cusparseIndexBase_t base); + +cusparseIndexBase_t CUSPARSEAPI +cusparseGetMatIndexBase(const cusparseMatDescr_t descrA); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCreateCsric02Info(csric02Info_t* info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDestroyCsric02Info(csric02Info_t info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCreateBsric02Info(bsric02Info_t* info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDestroyBsric02Info(bsric02Info_t info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCreateCsrilu02Info(csrilu02Info_t* info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDestroyCsrilu02Info(csrilu02Info_t info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCreateBsrilu02Info(bsrilu02Info_t* info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDestroyBsrilu02Info(bsrilu02Info_t info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCreateBsrsv2Info(bsrsv2Info_t* info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDestroyBsrsv2Info(bsrsv2Info_t info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCreateBsrsm2Info(bsrsm2Info_t* info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDestroyBsrsm2Info(bsrsm2Info_t info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCreateCsru2csrInfo(csru2csrInfo_t* info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDestroyCsru2csrInfo(csru2csrInfo_t info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCreateColorInfo(cusparseColorInfo_t* info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDestroyColorInfo(cusparseColorInfo_t info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCreatePruneInfo(pruneInfo_t* info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDestroyPruneInfo(pruneInfo_t info); + +//############################################################################## +//# SPARSE LEVEL 2 ROUTINES +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseSgemvi(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + const float* alpha, + const float* A, + int lda, + int nnz, + const float* xVal, + const int* xInd, + const float* beta, + float* y, + cusparseIndexBase_t idxBase, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgemvi_bufferSize(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + int nnz, + int* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDgemvi(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + const double* alpha, + const double* A, + int lda, + int nnz, + const double* xVal, + const int* xInd, + const double* beta, + double* y, + cusparseIndexBase_t idxBase, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgemvi_bufferSize(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + int nnz, + int* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseCgemvi(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + int nnz, + const cuComplex* xVal, + const int* xInd, + const cuComplex* beta, + cuComplex* y, + cusparseIndexBase_t idxBase, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgemvi_bufferSize(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + int nnz, + int* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseZgemvi(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + int nnz, + const cuDoubleComplex* xVal, + const int* xInd, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + cusparseIndexBase_t idxBase, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgemvi_bufferSize(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + int nnz, + int* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nb, + int nnzb, + const float* alpha, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const float* x, + const float* beta, + float* y); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nb, + int nnzb, + const double* alpha, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const double* x, + const double* beta, + double* y); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nb, + int nnzb, + const cuComplex* alpha, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cuComplex* x, + const cuComplex* beta, + cuComplex* y); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nb, + int nnzb, + const cuDoubleComplex* alpha, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cuDoubleComplex* x, + const cuDoubleComplex* beta, + cuDoubleComplex* y); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsrxmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int sizeOfMask, + int mb, + int nb, + int nnzb, + const float* alpha, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedMaskPtrA, + const int* bsrSortedRowPtrA, + const int* bsrSortedEndPtrA, + const int* bsrSortedColIndA, + int blockDim, + const float* x, + const float* beta, + float* y); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsrxmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int sizeOfMask, + int mb, + int nb, + int nnzb, + const double* alpha, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedMaskPtrA, + const int* bsrSortedRowPtrA, + const int* bsrSortedEndPtrA, + const int* bsrSortedColIndA, + int blockDim, + const double* x, + const double* beta, + double* y); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsrxmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int sizeOfMask, + int mb, + int nb, + int nnzb, + const cuComplex* alpha, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedMaskPtrA, + const int* bsrSortedRowPtrA, + const int* bsrSortedEndPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cuComplex* x, + const cuComplex* beta, + cuComplex* y); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsrxmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int sizeOfMask, + int mb, + int nb, + int nnzb, + const cuDoubleComplex* alpha, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedMaskPtrA, + const int* bsrSortedRowPtrA, + const int* bsrSortedEndPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cuDoubleComplex* x, + const cuDoubleComplex* beta, + cuDoubleComplex* y); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseXbsrsv2_zeroPivot(cusparseHandle_t handle, + bsrsv2Info_t info, + int* position); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsv2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsv2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsv2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsv2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsv2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockSize, + bsrsv2Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsv2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockSize, + bsrsv2Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsv2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockSize, + bsrsv2Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsv2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockSize, + bsrsv2Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsv2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsv2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsv2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsv2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsv2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const float* alpha, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + const float* f, + float* x, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsv2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const double* alpha, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + const double* f, + double* x, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsv2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cuComplex* alpha, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + const cuComplex* f, + cuComplex* x, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsv2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cuDoubleComplex* alpha, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + const cuDoubleComplex* f, + cuDoubleComplex* x, + cusparseSolvePolicy_t policy, + void* pBuffer); + +//############################################################################## +//# SPARSE LEVEL 3 ROUTINES +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrmm(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int kb, + int nnzb, + const float* alpha, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + const int blockSize, + const float* B, + const int ldb, + const float* beta, + float* C, + int ldc); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrmm(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int kb, + int nnzb, + const double* alpha, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + const int blockSize, + const double* B, + const int ldb, + const double* beta, + double* C, + int ldc); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrmm(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int kb, + int nnzb, + const cuComplex* alpha, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + const int blockSize, + const cuComplex* B, + const int ldb, + const cuComplex* beta, + cuComplex* C, + int ldc); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrmm(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int kb, + int nnzb, + const cuDoubleComplex* alpha, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + const int blockSize, + const cuDoubleComplex* B, + const int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseXbsrsm2_zeroPivot(cusparseHandle_t handle, + bsrsm2Info_t info, + int* position); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsm2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsm2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsm2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsm2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsm2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsm2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsm2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsm2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsm2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + const float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsm2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + const double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsm2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsm2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsm2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const float* alpha, + const cusparseMatDescr_t descrA, + const float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + const float* B, + int ldb, + float* X, + int ldx, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsm2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const double* alpha, + const cusparseMatDescr_t descrA, + const double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + const double* B, + int ldb, + double* X, + int ldx, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsm2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cuComplex* alpha, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + const cuComplex* B, + int ldb, + cuComplex* X, + int ldx, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsm2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cuDoubleComplex* alpha, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + const cuDoubleComplex* B, + int ldb, + cuDoubleComplex* X, + int ldx, + cusparseSolvePolicy_t policy, + void* pBuffer); + +//############################################################################## +//# PRECONDITIONERS +//############################################################################## + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseScsrilu02_numericBoost(cusparseHandle_t handle, + csrilu02Info_t info, + int enable_boost, + double* tol, + float* boost_val); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDcsrilu02_numericBoost(cusparseHandle_t handle, + csrilu02Info_t info, + int enable_boost, + double* tol, + double* boost_val); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCcsrilu02_numericBoost(cusparseHandle_t handle, + csrilu02Info_t info, + int enable_boost, + double* tol, + cuComplex* boost_val); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZcsrilu02_numericBoost(cusparseHandle_t handle, + csrilu02Info_t info, + int enable_boost, + double* tol, + cuDoubleComplex* boost_val); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseXcsrilu02_zeroPivot(cusparseHandle_t handle, + csrilu02Info_t info, + int* position); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseScsrilu02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDcsrilu02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCcsrilu02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZcsrilu02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseScsrilu02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + float* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csrilu02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDcsrilu02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + double* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csrilu02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCcsrilu02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csrilu02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZcsrilu02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csrilu02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseScsrilu02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDcsrilu02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCcsrilu02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZcsrilu02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseScsrilu02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + float* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDcsrilu02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + double* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCcsrilu02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZcsrilu02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsrilu02_numericBoost(cusparseHandle_t handle, + bsrilu02Info_t info, + int enable_boost, + double* tol, + float* boost_val); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsrilu02_numericBoost(cusparseHandle_t handle, + bsrilu02Info_t info, + int enable_boost, + double* tol, + double* boost_val); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsrilu02_numericBoost(cusparseHandle_t handle, + bsrilu02Info_t info, + int enable_boost, + double* tol, + cuComplex* boost_val); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsrilu02_numericBoost(cusparseHandle_t handle, + bsrilu02Info_t info, + int enable_boost, + double* tol, + cuDoubleComplex* boost_val); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseXbsrilu02_zeroPivot(cusparseHandle_t handle, + bsrilu02Info_t info, + int* position); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsrilu02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsrilu02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsrilu02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsrilu02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsrilu02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrilu02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsrilu02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrilu02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsrilu02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrilu02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsrilu02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrilu02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsrilu02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsrilu02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsrilu02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsrilu02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsrilu02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsrilu02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsrilu02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsrilu02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseXcsric02_zeroPivot(cusparseHandle_t handle, + csric02Info_t info, + int* position); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseScsric02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDcsric02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCcsric02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZcsric02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseScsric02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + float* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csric02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDcsric02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + double* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csric02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCcsric02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csric02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZcsric02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csric02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseScsric02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDcsric02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCcsric02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZcsric02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseScsric02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + float* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDcsric02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + double* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCcsric02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZcsric02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseXbsric02_zeroPivot(cusparseHandle_t handle, + bsric02Info_t info, + int* position); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsric02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsric02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsric02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsric02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + int* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsric02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsric02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsric02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsric02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsric02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsric02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsric02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsric02Info_t info, + size_t* pBufferSize); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsric02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pInputBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsric02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pInputBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsric02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pInputBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsric02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pInputBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSbsric02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDbsric02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCbsric02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* + bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZbsric02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsv2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const float* dl, + const float* d, + const float* du, + const float* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsv2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const double* dl, + const double* d, + const double* du, + const double* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsv2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + const cuComplex* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsv2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + const cuDoubleComplex* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsv2(cusparseHandle_t handle, + int m, + int n, + const float* dl, + const float* d, + const float* du, + float* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsv2(cusparseHandle_t handle, + int m, + int n, + const double* dl, + const double* d, + const double* du, + double* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsv2(cusparseHandle_t handle, + int m, + int n, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + cuComplex* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsv2(cusparseHandle_t handle, + int m, + int n, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + cuDoubleComplex* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsv2_nopivot_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const float* dl, + const float* d, + const float* du, + const float* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsv2_nopivot_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const double* dl, + const double* d, + const double* du, + const double* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsv2_nopivot_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + const cuComplex* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsv2_nopivot_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + const cuDoubleComplex* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsv2_nopivot(cusparseHandle_t handle, + int m, + int n, + const float* dl, + const float* d, + const float* du, + float* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsv2_nopivot(cusparseHandle_t handle, + int m, + int n, + const double* dl, + const double* d, + const double* du, + double* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsv2_nopivot(cusparseHandle_t handle, + int m, + int n, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + cuComplex* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsv2_nopivot(cusparseHandle_t handle, + int m, + int n, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + cuDoubleComplex* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsv2StridedBatch_bufferSizeExt(cusparseHandle_t handle, + int m, + const float* dl, + const float* d, + const float* du, + const float* x, + int batchCount, + int batchStride, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsv2StridedBatch_bufferSizeExt(cusparseHandle_t handle, + int m, + const double* dl, + const double* d, + const double* du, + const double* x, + int batchCount, + int batchStride, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsv2StridedBatch_bufferSizeExt(cusparseHandle_t handle, + int m, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + const cuComplex* x, + int batchCount, + int batchStride, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsv2StridedBatch_bufferSizeExt(cusparseHandle_t handle, + int m, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + const cuDoubleComplex* x, + int batchCount, + int batchStride, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsv2StridedBatch(cusparseHandle_t handle, + int m, + const float* dl, + const float* d, + const float* du, + float* x, + int batchCount, + int batchStride, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsv2StridedBatch(cusparseHandle_t handle, + int m, + const double* dl, + const double* d, + const double* du, + double* x, + int batchCount, + int batchStride, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsv2StridedBatch(cusparseHandle_t handle, + int m, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + cuComplex* x, + int batchCount, + int batchStride, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsv2StridedBatch(cusparseHandle_t handle, + int m, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + cuDoubleComplex* x, + int batchCount, + int batchStride, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const float* dl, + const float* d, + const float* du, + const float* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const double* dl, + const double* d, + const double* du, + const double* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + const cuComplex* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + const cuDoubleComplex* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + float* dl, + float* d, + float* du, + float* x, + int batchCount, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + double* dl, + double* d, + double* du, + double* x, + int batchCount, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + cuComplex* dl, + cuComplex* d, + cuComplex* du, + cuComplex* x, + int batchCount, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + cuDoubleComplex* dl, + cuDoubleComplex* d, + cuDoubleComplex* du, + cuDoubleComplex* x, + int batchCount, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgpsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const float* ds, + const float* dl, + const float* d, + const float* du, + const float* dw, + const float* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDgpsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const double* ds, + const double* dl, + const double* d, + const double* du, + const double* dw, + const double* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCgpsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const cuComplex* ds, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + const cuComplex* dw, + const cuComplex* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZgpsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const cuDoubleComplex* ds, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + const cuDoubleComplex* dw, + const cuDoubleComplex* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSgpsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + float* ds, + float* dl, + float* d, + float* du, + float* dw, + float* x, + int batchCount, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgpsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + double* ds, + double* dl, + double* d, + double* du, + double* dw, + double* x, + int batchCount, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgpsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + cuComplex* ds, + cuComplex* dl, + cuComplex* d, + cuComplex* du, + cuComplex* dw, + cuComplex* x, + int batchCount, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgpsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + cuDoubleComplex* ds, + cuDoubleComplex* dl, + cuDoubleComplex* d, + cuDoubleComplex* du, + cuDoubleComplex* dw, + cuDoubleComplex* x, + int batchCount, + void* pBuffer); + +//############################################################################## +//# EXTRA ROUTINES +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseScsrgeam2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const float* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const float* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const float* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + const float* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsrgeam2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const double* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const double* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const double* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + const double* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsrgeam2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const cuComplex* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cuComplex* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const cuComplex* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + const cuComplex* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsrgeam2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const cuDoubleComplex* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cuDoubleComplex* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const cuDoubleComplex* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + const cuDoubleComplex* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseXcsrgeam2Nnz(cusparseHandle_t handle, + int m, + int n, + const cusparseMatDescr_t descrA, + int nnzA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cusparseMatDescr_t descrB, + int nnzB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + void* workspace); + +cusparseStatus_t CUSPARSEAPI +cusparseScsrgeam2(cusparseHandle_t handle, + int m, + int n, + const float* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const float* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const float* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + float* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsrgeam2(cusparseHandle_t handle, + int m, + int n, + const double* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const double* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const double* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + double* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsrgeam2(cusparseHandle_t handle, + int m, + int n, + const cuComplex* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cuComplex* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const cuComplex* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + cuComplex* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsrgeam2(cusparseHandle_t handle, + int m, + int n, + const cuDoubleComplex* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cuDoubleComplex* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const cuDoubleComplex* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + cuDoubleComplex* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +//############################################################################## +//# SPARSE MATRIX REORDERING +//############################################################################## + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseScsrcolor(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const float* fractionToColor, + int* ncolors, + int* coloring, + int* reordering, + const cusparseColorInfo_t info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDcsrcolor(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const double* fractionToColor, + int* ncolors, + int* coloring, + int* reordering, + const cusparseColorInfo_t info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCcsrcolor(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const float* fractionToColor, + int* ncolors, + int* coloring, + int* reordering, + const cusparseColorInfo_t info); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZcsrcolor(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const double* fractionToColor, + int* ncolors, + int* coloring, + int* reordering, + const cusparseColorInfo_t info); + +//############################################################################## +//# SPARSE FORMAT CONVERSION +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseSnnz(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const float* A, + int lda, + int* nnzPerRowCol, + int* nnzTotalDevHostPtr); + +cusparseStatus_t CUSPARSEAPI +cusparseDnnz(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const double* A, + int lda, + int* nnzPerRowCol, + int* nnzTotalDevHostPtr); + +cusparseStatus_t CUSPARSEAPI +cusparseCnnz(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuComplex* A, + int lda, + int* nnzPerRowCol, + int* nnzTotalDevHostPtr); + +cusparseStatus_t CUSPARSEAPI +cusparseZnnz(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* A, + int lda, + int* nnzPerRowCol, + int* nnzTotalDevHostPtr); + +//############################################################################## +//# SPARSE FORMAT CONVERSION +//############################################################################## + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSnnz_compress(cusparseHandle_t handle, + int m, + const cusparseMatDescr_t descr, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + int* nnzPerRow, + int* nnzC, + float tol); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDnnz_compress(cusparseHandle_t handle, + int m, + const cusparseMatDescr_t descr, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + int* nnzPerRow, + int* nnzC, + double tol); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCnnz_compress(cusparseHandle_t handle, + int m, + const cusparseMatDescr_t descr, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + int* nnzPerRow, + int* nnzC, + cuComplex tol); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZnnz_compress(cusparseHandle_t handle, + int m, + const cusparseMatDescr_t descr, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + int* nnzPerRow, + int* nnzC, + cuDoubleComplex tol); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseScsr2csr_compress(cusparseHandle_t handle, + int m, + int n, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedColIndA, + const int* csrSortedRowPtrA, + int nnzA, + const int* nnzPerRow, + float* csrSortedValC, + int* csrSortedColIndC, + int* csrSortedRowPtrC, + float tol); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDcsr2csr_compress(cusparseHandle_t handle, + int m, + int n, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedColIndA, + const int* csrSortedRowPtrA, + int nnzA, + const int* nnzPerRow, + double* csrSortedValC, + int* csrSortedColIndC, + int* csrSortedRowPtrC, + double tol); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCcsr2csr_compress(cusparseHandle_t handle, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedColIndA, + const int* csrSortedRowPtrA, + int nnzA, + const int* nnzPerRow, + cuComplex* csrSortedValC, + int* csrSortedColIndC, + int* csrSortedRowPtrC, + cuComplex tol); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZcsr2csr_compress(cusparseHandle_t handle, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedColIndA, + const int* csrSortedRowPtrA, + int nnzA, + const int* nnzPerRow, + cuDoubleComplex* csrSortedValC, + int* csrSortedColIndC, + int* csrSortedRowPtrC, + cuDoubleComplex tol); + +cusparseStatus_t CUSPARSEAPI +cusparseXcoo2csr(cusparseHandle_t handle, + const int* cooRowInd, + int nnz, + int m, + int* csrSortedRowPtr, + cusparseIndexBase_t idxBase); + +cusparseStatus_t CUSPARSEAPI +cusparseXcsr2coo(cusparseHandle_t handle, + const int* csrSortedRowPtr, + int nnz, + int m, + int* cooRowInd, + cusparseIndexBase_t idxBase); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseXcsr2bsrNnz(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + int* bsrSortedRowPtrC, + int* nnzTotalDevHostPtr); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseScsr2bsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + float* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDcsr2bsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + double* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCcsr2bsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + cuComplex* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZcsr2bsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + cuDoubleComplex* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + float* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + double* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + cuComplex* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + cuDoubleComplex* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseSgebsr2gebsc_bufferSize(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDgebsr2gebsc_bufferSize(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCgebsr2gebsc_bufferSize(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZgebsr2gebsc_bufferSize(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSgebsr2gebsc_bufferSizeExt(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDgebsr2gebsc_bufferSizeExt(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseCgebsr2gebsc_bufferSizeExt(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseZgebsr2gebsc_bufferSizeExt(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSgebsr2gebsc(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + float* bscVal, + int* bscRowInd, + int* bscColPtr, + cusparseAction_t copyValues, + cusparseIndexBase_t idxBase, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgebsr2gebsc(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + double* bscVal, + int* bscRowInd, + int* bscColPtr, + cusparseAction_t copyValues, + cusparseIndexBase_t idxBase, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgebsr2gebsc(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + cuComplex* bscVal, + int* bscRowInd, + int* bscColPtr, + cusparseAction_t copyValues, + cusparseIndexBase_t idxBase, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgebsr2gebsc(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + cuDoubleComplex* bscVal, + int* bscRowInd, + int* bscColPtr, + cusparseAction_t copyValues, + cusparseIndexBase_t idxBase, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseXgebsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDim, + int colBlockDim, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSgebsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDim, + int colBlockDim, + const cusparseMatDescr_t descrC, + float* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDgebsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDim, + int colBlockDim, + const cusparseMatDescr_t descrC, + double* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCgebsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDim, + int colBlockDim, + const cusparseMatDescr_t descrC, + cuComplex* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZgebsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDim, + int colBlockDim, + const cusparseMatDescr_t descrC, + cuDoubleComplex* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseScsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseScsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseXcsr2gebsrNnz(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cusparseMatDescr_t descrC, + int* bsrSortedRowPtrC, + int rowBlockDim, + int colBlockDim, + int* nnzTotalDevHostPtr, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseScsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cusparseMatDescr_t descrC, + float* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDim, + int colBlockDim, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cusparseMatDescr_t descrC, + double* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDim, + int colBlockDim, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cusparseMatDescr_t descrC, + cuComplex* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDim, + int colBlockDim, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cusparseMatDescr_t descrC, + cuDoubleComplex* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDim, + int colBlockDim, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgebsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDgebsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCgebsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZgebsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSgebsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDgebsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseCgebsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseZgebsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseXgebsr2gebsrNnz(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + const cusparseMatDescr_t descrC, + int* bsrSortedRowPtrC, + int rowBlockDimC, + int colBlockDimC, + int* nnzTotalDevHostPtr, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgebsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + const cusparseMatDescr_t descrC, + float* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDimC, + int colBlockDimC, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgebsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + const cusparseMatDescr_t descrC, + double* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDimC, + int colBlockDimC, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgebsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + const cusparseMatDescr_t descrC, + cuComplex* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDimC, + int colBlockDimC, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgebsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + const cusparseMatDescr_t descrC, + cuDoubleComplex* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDimC, + int colBlockDimC, + void* pBuffer); + +//############################################################################## +//# SPARSE MATRIX SORTING +//############################################################################## + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCreateIdentityPermutation(cusparseHandle_t handle, + int n, + int* p); + +cusparseStatus_t CUSPARSEAPI +cusparseXcoosort_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnz, + const int* cooRowsA, + const int* cooColsA, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseXcoosortByRow(cusparseHandle_t handle, + int m, + int n, + int nnz, + int* cooRowsA, + int* cooColsA, + int* P, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseXcoosortByColumn(cusparseHandle_t handle, + int m, + int n, + int nnz, + int* cooRowsA, + int* cooColsA, + int* P, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseXcsrsort_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnz, + const int* csrRowPtrA, + const int* csrColIndA, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseXcsrsort(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const int* csrRowPtrA, + int* csrColIndA, + int* P, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseXcscsort_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnz, + const int* cscColPtrA, + const int* cscRowIndA, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseXcscsort(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const int* cscColPtrA, + int* cscRowIndA, + int* P, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseScsru2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnz, + float* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + size_t* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDcsru2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnz, + double* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + size_t* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCcsru2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnz, + cuComplex* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + size_t* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZcsru2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnz, + cuDoubleComplex* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + size_t* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseScsru2csr(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + float* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDcsru2csr(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + double* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCcsru2csr(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZcsru2csr(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseScsr2csru(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + float* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDcsr2csru(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + double* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseCcsr2csru(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseZcsr2csru(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +#if defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseHpruneDense2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const __half* A, + int lda, + const __half* threshold, + const cusparseMatDescr_t descrC, + const __half* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); +#endif // defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSpruneDense2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const float* A, + int lda, + const float* threshold, + const cusparseMatDescr_t descrC, + const float* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDpruneDense2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const double* A, + int lda, + const double* threshold, + const cusparseMatDescr_t descrC, + const double* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +#if defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseHpruneDense2csrNnz(cusparseHandle_t handle, + int m, + int n, + const __half* A, + int lda, + const __half* threshold, + const cusparseMatDescr_t descrC, + int* csrRowPtrC, + int* nnzTotalDevHostPtr, + void* pBuffer); +#endif // defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSpruneDense2csrNnz(cusparseHandle_t handle, + int m, + int n, + const float* A, + int lda, + const float* threshold, + const cusparseMatDescr_t descrC, + int* csrRowPtrC, + int* nnzTotalDevHostPtr, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDpruneDense2csrNnz(cusparseHandle_t handle, + int m, + int n, + const double* A, + int lda, + const double* threshold, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + void* pBuffer); + +#if defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseHpruneDense2csr(cusparseHandle_t handle, + int m, + int n, + const __half* A, + int lda, + const __half* threshold, + const cusparseMatDescr_t descrC, + __half* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); +#endif // defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSpruneDense2csr(cusparseHandle_t handle, + int m, + int n, + const float* A, + int lda, + const float* threshold, + const cusparseMatDescr_t descrC, + float* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDpruneDense2csr(cusparseHandle_t handle, + int m, + int n, + const double* A, + int lda, + const double* threshold, + const cusparseMatDescr_t descrC, + double* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +#if defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseHpruneCsr2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const __half* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const __half* threshold, + const cusparseMatDescr_t descrC, + const __half* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); +#endif // defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSpruneCsr2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const float* threshold, + const cusparseMatDescr_t descrC, + const float* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDpruneCsr2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const double* threshold, + const cusparseMatDescr_t descrC, + const double* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +#if defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseHpruneCsr2csrNnz(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const __half* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const __half* threshold, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + void* pBuffer); +#endif // defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSpruneCsr2csrNnz(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const float* threshold, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDpruneCsr2csrNnz(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const double* threshold, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + void* pBuffer); + +#if defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseHpruneCsr2csr(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const __half* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const __half* threshold, + const cusparseMatDescr_t descrC, + __half* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); +#endif // defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSpruneCsr2csr(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const float* threshold, + const cusparseMatDescr_t descrC, + float* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDpruneCsr2csr(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const double* threshold, + const cusparseMatDescr_t descrC, + double* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +#if defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseHpruneDense2csrByPercentage_bufferSizeExt( + cusparseHandle_t handle, + int m, + int n, + const __half* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + const __half* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + pruneInfo_t info, + size_t* pBufferSizeInBytes); + +#endif // defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSpruneDense2csrByPercentage_bufferSizeExt( + cusparseHandle_t handle, + int m, + int n, + const float* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + const float* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + pruneInfo_t info, + size_t* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDpruneDense2csrByPercentage_bufferSizeExt( + cusparseHandle_t handle, + int m, + int n, + const double* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + const double* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + pruneInfo_t info, + size_t* pBufferSizeInBytes); + +#if defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseHpruneDense2csrNnzByPercentage( + cusparseHandle_t handle, + int m, + int n, + const __half* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + int* csrRowPtrC, + int* nnzTotalDevHostPtr, + pruneInfo_t info, + void* pBuffer); +#endif // defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSpruneDense2csrNnzByPercentage( + cusparseHandle_t handle, + int m, + int n, + const float* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + int* csrRowPtrC, + int* nnzTotalDevHostPtr, + pruneInfo_t info, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDpruneDense2csrNnzByPercentage( + cusparseHandle_t handle, + int m, + int n, + const double* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + int* csrRowPtrC, + int* nnzTotalDevHostPtr, + pruneInfo_t info, + void* pBuffer); + +#if defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseHpruneDense2csrByPercentage(cusparseHandle_t handle, + int m, + int n, + const __half* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + __half* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + pruneInfo_t info, + void* pBuffer); + +#endif // defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSpruneDense2csrByPercentage(cusparseHandle_t handle, + int m, + int n, + const float* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + float* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + pruneInfo_t info, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDpruneDense2csrByPercentage(cusparseHandle_t handle, + int m, + int n, + const double* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + double* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + pruneInfo_t info, + void* pBuffer); + +#if defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseHpruneCsr2csrByPercentage_bufferSizeExt( + cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const __half* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + const __half* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + pruneInfo_t info, + size_t* pBufferSizeInBytes); + +#endif // defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSpruneCsr2csrByPercentage_bufferSizeExt( + cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + const float* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + pruneInfo_t info, + size_t* pBufferSizeInBytes); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDpruneCsr2csrByPercentage_bufferSizeExt( + cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + const double* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + pruneInfo_t info, + size_t* pBufferSizeInBytes); + +#if defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseHpruneCsr2csrNnzByPercentage( + cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const __half* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + pruneInfo_t info, + void* pBuffer); + +#endif // defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSpruneCsr2csrNnzByPercentage( + cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + pruneInfo_t info, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDpruneCsr2csrNnzByPercentage( + cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + pruneInfo_t info, + void* pBuffer); + +#if defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseHpruneCsr2csrByPercentage(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const __half* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, /* between 0 to 100 */ + const cusparseMatDescr_t descrC, + __half* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + pruneInfo_t info, + void* pBuffer); + +#endif // defined(__cplusplus) + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseSpruneCsr2csrByPercentage(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + float* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + pruneInfo_t info, + void* pBuffer); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseDpruneCsr2csrByPercentage(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + double* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + pruneInfo_t info, + void* pBuffer); + +//############################################################################## +//# CSR2CSC +//############################################################################## + +typedef enum { + CUSPARSE_CSR2CSC_ALG_DEFAULT = 1, + CUSPARSE_CSR2CSC_ALG1 = 1 +} cusparseCsr2CscAlg_t; + +cusparseStatus_t CUSPARSEAPI +cusparseCsr2cscEx2(cusparseHandle_t handle, + int m, + int n, + int nnz, + const void* csrVal, + const int* csrRowPtr, + const int* csrColInd, + void* cscVal, + int* cscColPtr, + int* cscRowInd, + cudaDataType valType, + cusparseAction_t copyValues, + cusparseIndexBase_t idxBase, + cusparseCsr2CscAlg_t alg, + void* buffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCsr2cscEx2_bufferSize(cusparseHandle_t handle, + int m, + int n, + int nnz, + const void* csrVal, + const int* csrRowPtr, + const int* csrColInd, + void* cscVal, + int* cscColPtr, + int* cscRowInd, + cudaDataType valType, + cusparseAction_t copyValues, + cusparseIndexBase_t idxBase, + cusparseCsr2CscAlg_t alg, + size_t* bufferSize); + +// ############################################################################# +// # GENERIC APIs - Enumerators and Opaque Data Structures +// ############################################################################# + +typedef enum { + CUSPARSE_FORMAT_CSR = 1, ///< Compressed Sparse Row (CSR) + CUSPARSE_FORMAT_CSC = 2, ///< Compressed Sparse Column (CSC) + CUSPARSE_FORMAT_COO = 3, ///< Coordinate (COO) - Structure of Arrays + CUSPARSE_FORMAT_BLOCKED_ELL = 5, ///< Blocked ELL + CUSPARSE_FORMAT_BSR = 6, ///< Blocked Compressed Sparse Row (BSR) + CUSPARSE_FORMAT_SLICED_ELLPACK = 7 ///< Sliced ELL +} cusparseFormat_t; + +typedef enum { + CUSPARSE_ORDER_COL = 1, ///< Column-Major Order - Matrix memory layout + CUSPARSE_ORDER_ROW = 2 ///< Row-Major Order - Matrix memory layout +} cusparseOrder_t; + +typedef enum { + CUSPARSE_INDEX_16U = 1, ///< 16-bit unsigned integer for matrix/vector + ///< indices + CUSPARSE_INDEX_32I = 2, ///< 32-bit signed integer for matrix/vector indices + CUSPARSE_INDEX_64I = 3 ///< 64-bit signed integer for matrix/vector indices +} cusparseIndexType_t; + +//------------------------------------------------------------------------------ + +struct cusparseSpVecDescr; +struct cusparseDnVecDescr; +struct cusparseSpMatDescr; +struct cusparseDnMatDescr; + +typedef struct cusparseSpVecDescr* cusparseSpVecDescr_t; +typedef struct cusparseDnVecDescr* cusparseDnVecDescr_t; +typedef struct cusparseSpMatDescr* cusparseSpMatDescr_t; +typedef struct cusparseDnMatDescr* cusparseDnMatDescr_t; + +typedef struct cusparseSpVecDescr const* cusparseConstSpVecDescr_t; +typedef struct cusparseDnVecDescr const* cusparseConstDnVecDescr_t; +typedef struct cusparseSpMatDescr const* cusparseConstSpMatDescr_t; +typedef struct cusparseDnMatDescr const* cusparseConstDnMatDescr_t; + +// ############################################################################# +// # SPARSE VECTOR DESCRIPTOR +// ############################################################################# + +cusparseStatus_t CUSPARSEAPI +cusparseCreateSpVec(cusparseSpVecDescr_t* spVecDescr, + int64_t size, + int64_t nnz, + void* indices, + void* values, + cusparseIndexType_t idxType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstSpVec(cusparseConstSpVecDescr_t* spVecDescr, + int64_t size, + int64_t nnz, + const void* indices, + const void* values, + cusparseIndexType_t idxType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroySpVec(cusparseConstSpVecDescr_t spVecDescr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpVecGet(cusparseSpVecDescr_t spVecDescr, + int64_t* size, + int64_t* nnz, + void** indices, + void** values, + cusparseIndexType_t* idxType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseConstSpVecGet(cusparseConstSpVecDescr_t spVecDescr, + int64_t* size, + int64_t* nnz, + const void** indices, + const void** values, + cusparseIndexType_t* idxType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseSpVecGetIndexBase(cusparseConstSpVecDescr_t spVecDescr, + cusparseIndexBase_t* idxBase); + +cusparseStatus_t CUSPARSEAPI +cusparseSpVecGetValues(cusparseSpVecDescr_t spVecDescr, + void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseConstSpVecGetValues(cusparseConstSpVecDescr_t spVecDescr, + const void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseSpVecSetValues(cusparseSpVecDescr_t spVecDescr, + void* values); + +// ############################################################################# +// # DENSE VECTOR DESCRIPTOR +// ############################################################################# + +cusparseStatus_t CUSPARSEAPI +cusparseCreateDnVec(cusparseDnVecDescr_t* dnVecDescr, + int64_t size, + void* values, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstDnVec(cusparseConstDnVecDescr_t* dnVecDescr, + int64_t size, + const void* values, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroyDnVec(cusparseConstDnVecDescr_t dnVecDescr); + +cusparseStatus_t CUSPARSEAPI +cusparseDnVecGet(cusparseDnVecDescr_t dnVecDescr, + int64_t* size, + void** values, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseConstDnVecGet(cusparseConstDnVecDescr_t dnVecDescr, + int64_t* size, + const void** values, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseDnVecGetValues(cusparseDnVecDescr_t dnVecDescr, + void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseConstDnVecGetValues(cusparseConstDnVecDescr_t dnVecDescr, + const void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseDnVecSetValues(cusparseDnVecDescr_t dnVecDescr, + void* values); + +// ############################################################################# +// # SPARSE MATRIX DESCRIPTOR +// ############################################################################# + +cusparseStatus_t CUSPARSEAPI +cusparseDestroySpMat(cusparseConstSpMatDescr_t spMatDescr); + + cusparseStatus_t CUSPARSEAPI +cusparseSpMatGetFormat(cusparseConstSpMatDescr_t spMatDescr, + cusparseFormat_t* format); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMatGetIndexBase(cusparseConstSpMatDescr_t spMatDescr, + cusparseIndexBase_t* idxBase); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMatGetValues(cusparseSpMatDescr_t spMatDescr, + void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseConstSpMatGetValues(cusparseConstSpMatDescr_t spMatDescr, + const void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMatSetValues(cusparseSpMatDescr_t spMatDescr, + void* values); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMatGetSize(cusparseConstSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* nnz); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMatGetStridedBatch(cusparseConstSpMatDescr_t spMatDescr, + int* batchCount); + +cusparseStatus_t CUSPARSEAPI +cusparseCooSetStridedBatch(cusparseSpMatDescr_t spMatDescr, + int batchCount, + int64_t batchStride); + +cusparseStatus_t CUSPARSEAPI +cusparseCsrSetStridedBatch(cusparseSpMatDescr_t spMatDescr, + int batchCount, + int64_t offsetsBatchStride, + int64_t columnsValuesBatchStride); + +cusparseStatus_t CUSPARSEAPI +cusparseBsrSetStridedBatch(cusparseSpMatDescr_t spMatDescr, + int batchCount, + int64_t offsetsBatchStride, + int64_t columnsBatchStride, + int64_t ValuesBatchStride); + +typedef enum { + CUSPARSE_SPMAT_FILL_MODE, + CUSPARSE_SPMAT_DIAG_TYPE +} cusparseSpMatAttribute_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSpMatGetAttribute(cusparseConstSpMatDescr_t spMatDescr, + cusparseSpMatAttribute_t attribute, + void* data, + size_t dataSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMatSetAttribute(cusparseSpMatDescr_t spMatDescr, + cusparseSpMatAttribute_t attribute, + void* data, + size_t dataSize); + +//------------------------------------------------------------------------------ +// ### CSR ### + +cusparseStatus_t CUSPARSEAPI +cusparseCreateCsr(cusparseSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + void* csrRowOffsets, + void* csrColInd, + void* csrValues, + cusparseIndexType_t csrRowOffsetsType, + cusparseIndexType_t csrColIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstCsr(cusparseConstSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + const void* csrRowOffsets, + const void* csrColInd, + const void* csrValues, + cusparseIndexType_t csrRowOffsetsType, + cusparseIndexType_t csrColIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateCsc(cusparseSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + void* cscColOffsets, + void* cscRowInd, + void* cscValues, + cusparseIndexType_t cscColOffsetsType, + cusparseIndexType_t cscRowIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstCsc(cusparseConstSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + const void* cscColOffsets, + const void* cscRowInd, + const void* cscValues, + cusparseIndexType_t cscColOffsetsType, + cusparseIndexType_t cscRowIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCsrGet(cusparseSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* nnz, + void** csrRowOffsets, + void** csrColInd, + void** csrValues, + cusparseIndexType_t* csrRowOffsetsType, + cusparseIndexType_t* csrColIndType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseConstCsrGet(cusparseConstSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* nnz, + const void** csrRowOffsets, + const void** csrColInd, + const void** csrValues, + cusparseIndexType_t* csrRowOffsetsType, + cusparseIndexType_t* csrColIndType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCscGet(cusparseSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* nnz, + void** cscColOffsets, + void** cscRowInd, + void** cscValues, + cusparseIndexType_t* cscColOffsetsType, + cusparseIndexType_t* cscRowIndType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseConstCscGet(cusparseConstSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* nnz, + const void** cscColOffsets, + const void** cscRowInd, + const void** cscValues, + cusparseIndexType_t* cscColOffsetsType, + cusparseIndexType_t* cscRowIndType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCsrSetPointers(cusparseSpMatDescr_t spMatDescr, + void* csrRowOffsets, + void* csrColInd, + void* csrValues); + +cusparseStatus_t CUSPARSEAPI +cusparseCscSetPointers(cusparseSpMatDescr_t spMatDescr, + void* cscColOffsets, + void* cscRowInd, + void* cscValues); + +//------------------------------------------------------------------------------ +// ### BSR ### + +cusparseStatus_t CUSPARSEAPI +cusparseCreateBsr(cusparseSpMatDescr_t* spMatDescr, + int64_t brows, + int64_t bcols, + int64_t bnnz, + int64_t rowBlockSize, + int64_t colBlockSize, + void* bsrRowOffsets, + void* bsrColInd, + void* bsrValues, + cusparseIndexType_t bsrRowOffsetsType, + cusparseIndexType_t bsrColIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType, + cusparseOrder_t order); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstBsr(cusparseConstSpMatDescr_t* spMatDescr, + int64_t brows, + int64_t bcols, + int64_t bnnz, + int64_t rowBlockDim, + int64_t colBlockDim, + const void* bsrRowOffsets, + const void* bsrColInd, + const void* bsrValues, + cusparseIndexType_t bsrRowOffsetsType, + cusparseIndexType_t bsrColIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType, + cusparseOrder_t order); + +//------------------------------------------------------------------------------ +// ### COO ### + +cusparseStatus_t CUSPARSEAPI +cusparseCreateCoo(cusparseSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + void* cooRowInd, + void* cooColInd, + void* cooValues, + cusparseIndexType_t cooIdxType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstCoo(cusparseConstSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + const void* cooRowInd, + const void* cooColInd, + const void* cooValues, + cusparseIndexType_t cooIdxType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCooGet(cusparseSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* nnz, + void** cooRowInd, // COO row indices + void** cooColInd, // COO column indices + void** cooValues, // COO values + cusparseIndexType_t* idxType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseConstCooGet(cusparseConstSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* nnz, + const void** cooRowInd, // COO row indices + const void** cooColInd, // COO column indices + const void** cooValues, // COO values + cusparseIndexType_t* idxType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCooSetPointers(cusparseSpMatDescr_t spMatDescr, + void* cooRows, + void* cooColumns, + void* cooValues); + +//------------------------------------------------------------------------------ +// ### BLOCKED ELL ### + +cusparseStatus_t CUSPARSEAPI +cusparseCreateBlockedEll(cusparseSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t ellBlockSize, + int64_t ellCols, + void* ellColInd, + void* ellValue, + cusparseIndexType_t ellIdxType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstBlockedEll(cusparseConstSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t ellBlockSize, + int64_t ellCols, + const void* ellColInd, + const void* ellValue, + cusparseIndexType_t ellIdxType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseBlockedEllGet(cusparseSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* ellBlockSize, + int64_t* ellCols, + void** ellColInd, + void** ellValue, + cusparseIndexType_t* ellIdxType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseConstBlockedEllGet(cusparseConstSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* ellBlockSize, + int64_t* ellCols, + const void** ellColInd, + const void** ellValue, + cusparseIndexType_t* ellIdxType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +//------------------------------------------------------------------------------ +// ### Sliced ELLPACK ### + +cusparseStatus_t CUSPARSEAPI +cusparseCreateSlicedEll(cusparseSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + int64_t sellValuesSize, + int64_t sliceSize, + void* sellSliceOffsets, + void* sellColInd, + void* sellValues, + cusparseIndexType_t sellSliceOffsetsType, + cusparseIndexType_t sellColIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstSlicedEll(cusparseConstSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + int64_t sellValuesSize, + int64_t sliceSize, + const void* sellSliceOffsets, + const void* sellColInd, + const void* sellValues, + cusparseIndexType_t sellSliceOffsetsType, + cusparseIndexType_t sellColIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +// ############################################################################# +// # DENSE MATRIX DESCRIPTOR +// ############################################################################# + +cusparseStatus_t CUSPARSEAPI +cusparseCreateDnMat(cusparseDnMatDescr_t* dnMatDescr, + int64_t rows, + int64_t cols, + int64_t ld, + void* values, + cudaDataType valueType, + cusparseOrder_t order); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstDnMat(cusparseConstDnMatDescr_t* dnMatDescr, + int64_t rows, + int64_t cols, + int64_t ld, + const void* values, + cudaDataType valueType, + cusparseOrder_t order); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroyDnMat(cusparseConstDnMatDescr_t dnMatDescr); + +cusparseStatus_t CUSPARSEAPI +cusparseDnMatGet(cusparseDnMatDescr_t dnMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* ld, + void** values, + cudaDataType* type, + cusparseOrder_t* order); + +cusparseStatus_t CUSPARSEAPI +cusparseConstDnMatGet(cusparseConstDnMatDescr_t dnMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* ld, + const void** values, + cudaDataType* type, + cusparseOrder_t* order); + +cusparseStatus_t CUSPARSEAPI +cusparseDnMatGetValues(cusparseDnMatDescr_t dnMatDescr, + void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseConstDnMatGetValues(cusparseConstDnMatDescr_t dnMatDescr, + const void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseDnMatSetValues(cusparseDnMatDescr_t dnMatDescr, + void* values); + +cusparseStatus_t CUSPARSEAPI +cusparseDnMatSetStridedBatch(cusparseDnMatDescr_t dnMatDescr, + int batchCount, + int64_t batchStride); + +cusparseStatus_t CUSPARSEAPI +cusparseDnMatGetStridedBatch(cusparseConstDnMatDescr_t dnMatDescr, + int* batchCount, + int64_t* batchStride); + +// ############################################################################# +// # VECTOR-VECTOR OPERATIONS +// ############################################################################# + +cusparseStatus_t CUSPARSEAPI +cusparseAxpby(cusparseHandle_t handle, + const void* alpha, + cusparseConstSpVecDescr_t vecX, + const void* beta, + cusparseDnVecDescr_t vecY); + +cusparseStatus_t CUSPARSEAPI +cusparseGather(cusparseHandle_t handle, + cusparseConstDnVecDescr_t vecY, + cusparseSpVecDescr_t vecX); + +cusparseStatus_t CUSPARSEAPI +cusparseScatter(cusparseHandle_t handle, + cusparseConstSpVecDescr_t vecX, + cusparseDnVecDescr_t vecY); + +CUSPARSE_DEPRECATED +cusparseStatus_t CUSPARSEAPI +cusparseRot(cusparseHandle_t handle, + const void* c_coeff, + const void* s_coeff, + cusparseSpVecDescr_t vecX, + cusparseDnVecDescr_t vecY); + +cusparseStatus_t CUSPARSEAPI +cusparseSpVV_bufferSize(cusparseHandle_t handle, + cusparseOperation_t opX, + cusparseConstSpVecDescr_t vecX, + cusparseConstDnVecDescr_t vecY, + const void* result, + cudaDataType computeType, + size_t* bufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSpVV(cusparseHandle_t handle, + cusparseOperation_t opX, + cusparseConstSpVecDescr_t vecX, + cusparseConstDnVecDescr_t vecY, + void* result, + cudaDataType computeType, + void* externalBuffer); + +// ############################################################################# +// # SPARSE TO DENSE +// ############################################################################# + +typedef enum { + CUSPARSE_SPARSETODENSE_ALG_DEFAULT = 0 +} cusparseSparseToDenseAlg_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSparseToDense_bufferSize(cusparseHandle_t handle, + cusparseConstSpMatDescr_t matA, + cusparseDnMatDescr_t matB, + cusparseSparseToDenseAlg_t alg, + size_t* bufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSparseToDense(cusparseHandle_t handle, + cusparseConstSpMatDescr_t matA, + cusparseDnMatDescr_t matB, + cusparseSparseToDenseAlg_t alg, + void* externalBuffer); + +// ############################################################################# +// # DENSE TO SPARSE +// ############################################################################# + +typedef enum { + CUSPARSE_DENSETOSPARSE_ALG_DEFAULT = 0 +} cusparseDenseToSparseAlg_t; + +cusparseStatus_t CUSPARSEAPI +cusparseDenseToSparse_bufferSize(cusparseHandle_t handle, + cusparseConstDnMatDescr_t matA, + cusparseSpMatDescr_t matB, + cusparseDenseToSparseAlg_t alg, + size_t* bufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDenseToSparse_analysis(cusparseHandle_t handle, + cusparseConstDnMatDescr_t matA, + cusparseSpMatDescr_t matB, + cusparseDenseToSparseAlg_t alg, + void* externalBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDenseToSparse_convert(cusparseHandle_t handle, + cusparseConstDnMatDescr_t matA, + cusparseSpMatDescr_t matB, + cusparseDenseToSparseAlg_t alg, + void* externalBuffer); + +// ############################################################################# +// # SPARSE MATRIX-VECTOR MULTIPLICATION +// ############################################################################# + +typedef enum { + CUSPARSE_SPMV_ALG_DEFAULT = 0, + CUSPARSE_SPMV_CSR_ALG1 = 2, + CUSPARSE_SPMV_CSR_ALG2 = 3, + CUSPARSE_SPMV_COO_ALG1 = 1, + CUSPARSE_SPMV_COO_ALG2 = 4, + CUSPARSE_SPMV_SELL_ALG1 = 5 +} cusparseSpMVAlg_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSpMV(cusparseHandle_t handle, + cusparseOperation_t opA, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnVecDescr_t vecX, + const void* beta, + cusparseDnVecDescr_t vecY, + cudaDataType computeType, + cusparseSpMVAlg_t alg, + void* externalBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMV_bufferSize(cusparseHandle_t handle, + cusparseOperation_t opA, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnVecDescr_t vecX, + const void* beta, + cusparseDnVecDescr_t vecY, + cudaDataType computeType, + cusparseSpMVAlg_t alg, + size_t* bufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMV_preprocess(cusparseHandle_t handle, + cusparseOperation_t opA, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnVecDescr_t vecX, + const void* beta, + cusparseDnVecDescr_t vecY, + cudaDataType computeType, + cusparseSpMVAlg_t alg, + void* externalBuffer); +// ############################################################################# +// # SPARSE TRIANGULAR VECTOR SOLVE +// ############################################################################# + +typedef enum { + CUSPARSE_SPSV_ALG_DEFAULT = 0, +} cusparseSpSVAlg_t; + +typedef enum { + CUSPARSE_SPSV_UPDATE_GENERAL = 0, + CUSPARSE_SPSV_UPDATE_DIAGONAL = 1 +} cusparseSpSVUpdate_t; + +struct cusparseSpSVDescr; +typedef struct cusparseSpSVDescr* cusparseSpSVDescr_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSpSV_createDescr(cusparseSpSVDescr_t* descr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSV_destroyDescr(cusparseSpSVDescr_t descr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSV_bufferSize(cusparseHandle_t handle, + cusparseOperation_t opA, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnVecDescr_t vecX, + cusparseDnVecDescr_t vecY, + cudaDataType computeType, + cusparseSpSVAlg_t alg, + cusparseSpSVDescr_t spsvDescr, + size_t* bufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSV_analysis(cusparseHandle_t handle, + cusparseOperation_t opA, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnVecDescr_t vecX, + cusparseDnVecDescr_t vecY, + cudaDataType computeType, + cusparseSpSVAlg_t alg, + cusparseSpSVDescr_t spsvDescr, + void* externalBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSV_solve(cusparseHandle_t handle, + cusparseOperation_t opA, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnVecDescr_t vecX, + cusparseDnVecDescr_t vecY, + cudaDataType computeType, + cusparseSpSVAlg_t alg, + cusparseSpSVDescr_t spsvDescr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSV_updateMatrix(cusparseHandle_t handle, + cusparseSpSVDescr_t spsvDescr, + void* newValues, + cusparseSpSVUpdate_t updatePart); + + + +// ############################################################################# +// # SPARSE TRIANGULAR MATRIX SOLVE +// ############################################################################# + +typedef enum { + CUSPARSE_SPSM_ALG_DEFAULT = 0, +} cusparseSpSMAlg_t; + +typedef enum { + CUSPARSE_SPSM_UPDATE_GENERAL = 0, + CUSPARSE_SPSM_UPDATE_DIAGONAL = 1 +} cusparseSpSMUpdate_t; + +struct cusparseSpSMDescr; +typedef struct cusparseSpSMDescr* cusparseSpSMDescr_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSpSM_createDescr(cusparseSpSMDescr_t* descr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSM_destroyDescr(cusparseSpSMDescr_t descr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSM_bufferSize(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + cusparseDnMatDescr_t matC, + cudaDataType computeType, + cusparseSpSMAlg_t alg, + cusparseSpSMDescr_t spsmDescr, + size_t* bufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSM_analysis(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + cusparseDnMatDescr_t matC, + cudaDataType computeType, + cusparseSpSMAlg_t alg, + cusparseSpSMDescr_t spsmDescr, + void* externalBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSM_solve(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + cusparseDnMatDescr_t matC, + cudaDataType computeType, + cusparseSpSMAlg_t alg, + cusparseSpSMDescr_t spsmDescr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSM_updateMatrix(cusparseHandle_t handle, + cusparseSpSMDescr_t spsmDescr, + void* newValues, + cusparseSpSMUpdate_t updatePart); + +// ############################################################################# +// # SPARSE MATRIX-MATRIX MULTIPLICATION +// ############################################################################# + +typedef enum { + CUSPARSE_SPMM_ALG_DEFAULT = 0, + CUSPARSE_SPMM_COO_ALG1 = 1, + CUSPARSE_SPMM_COO_ALG2 = 2, + CUSPARSE_SPMM_COO_ALG3 = 3, + CUSPARSE_SPMM_COO_ALG4 = 5, + CUSPARSE_SPMM_CSR_ALG1 = 4, + CUSPARSE_SPMM_CSR_ALG2 = 6, + CUSPARSE_SPMM_CSR_ALG3 = 12, + CUSPARSE_SPMM_BLOCKED_ELL_ALG1 = 13, + CUSPARSE_SPMM_BSR_ALG1 = 14 +} cusparseSpMMAlg_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSpMM_bufferSize(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + const void* beta, + cusparseDnMatDescr_t matC, + cudaDataType computeType, + cusparseSpMMAlg_t alg, + size_t* bufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMM_preprocess(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + const void* beta, + cusparseDnMatDescr_t matC, + cudaDataType computeType, + cusparseSpMMAlg_t alg, + void* externalBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMM(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + const void* beta, + cusparseDnMatDescr_t matC, + cudaDataType computeType, + cusparseSpMMAlg_t alg, + void* externalBuffer); + +// ############################################################################# +// # SPARSE MATRIX - SPARSE MATRIX MULTIPLICATION (SpGEMM) +// ############################################################################# + +typedef enum { + CUSPARSE_SPGEMM_DEFAULT = 0, + CUSPARSE_SPGEMM_CSR_ALG_DETERMINITIC = 1, + CUSPARSE_SPGEMM_CSR_ALG_NONDETERMINITIC = 2, + CUSPARSE_SPGEMM_ALG1 = 3, + CUSPARSE_SPGEMM_ALG2 = 4, + CUSPARSE_SPGEMM_ALG3 = 5 +} cusparseSpGEMMAlg_t; + +struct cusparseSpGEMMDescr; +typedef struct cusparseSpGEMMDescr* cusparseSpGEMMDescr_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMM_createDescr(cusparseSpGEMMDescr_t* descr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMM_destroyDescr(cusparseSpGEMMDescr_t descr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMM_workEstimation(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr, + size_t* bufferSize1, + void* externalBuffer1); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMM_getNumProducts(cusparseSpGEMMDescr_t spgemmDescr, + int64_t* num_prods); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMM_estimateMemory(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr, + float chunk_fraction, + size_t* bufferSize3, + void* externalBuffer3, + size_t* bufferSize2); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMM_compute(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr, + size_t* bufferSize2, + void* externalBuffer2); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMM_copy(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr); + +// ############################################################################# +// # SPARSE MATRIX - SPARSE MATRIX MULTIPLICATION (SpGEMM) STRUCTURE REUSE +// ############################################################################# + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMMreuse_workEstimation(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + cusparseSpMatDescr_t matC, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr, + size_t* bufferSize1, + void* externalBuffer1); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMMreuse_nnz(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + cusparseSpMatDescr_t matC, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr, + size_t* bufferSize2, + void* externalBuffer2, + size_t* bufferSize3, + void* externalBuffer3, + size_t* bufferSize4, + void* externalBuffer4); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMMreuse_copy(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + cusparseSpMatDescr_t matC, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr, + size_t* bufferSize5, + void* externalBuffer5); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMMreuse_compute(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr); + +// ############################################################################# +// # SAMPLED DENSE-DENSE MATRIX MULTIPLICATION +// ############################################################################# + +typedef enum { + CUSPARSE_SDDMM_ALG_DEFAULT = 0 +} cusparseSDDMMAlg_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSDDMM_bufferSize(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstDnMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSDDMMAlg_t alg, + size_t* bufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSDDMM_preprocess(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstDnMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSDDMMAlg_t alg, + void* externalBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSDDMM(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstDnMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSDDMMAlg_t alg, + void* externalBuffer); + +// ############################################################################# +// # GENERIC APIs WITH CUSTOM OPERATORS (PREVIEW) +// ############################################################################# + +struct cusparseSpMMOpPlan; +typedef struct cusparseSpMMOpPlan* cusparseSpMMOpPlan_t; + +typedef enum { + CUSPARSE_SPMM_OP_ALG_DEFAULT +} cusparseSpMMOpAlg_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSpMMOp_createPlan(cusparseHandle_t handle, + cusparseSpMMOpPlan_t* plan, + cusparseOperation_t opA, + cusparseOperation_t opB, + cusparseConstSpMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + cusparseDnMatDescr_t matC, + cudaDataType computeType, + cusparseSpMMOpAlg_t alg, + const void* addOperationNvvmBuffer, + size_t addOperationBufferSize, + const void* mulOperationNvvmBuffer, + size_t mulOperationBufferSize, + const void* epilogueNvvmBuffer, + size_t epilogueBufferSize, + size_t* SpMMWorkspaceSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMMOp(cusparseSpMMOpPlan_t plan, + void* externalBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMMOp_destroyPlan(cusparseSpMMOpPlan_t plan); + +//------------------------------------------------------------------------------ + +#if defined(__cplusplus) +} // extern "C" +#endif // defined(__cplusplus) + +#undef CUSPARSE_DEPRECATED_REPLACE_WITH +#undef CUSPARSE_DEPRECATED +#undef CUSPARSE_DEPRECATED_TYPE +#undef CUSPARSE_DEPRECATED_TYPE_MSVC +#undef CUSPARSE_DEPRECATED_ENUM_REPLACE_WITH +#undef CUSPARSE_DEPRECATED_ENUM + +#endif // !defined(CUSPARSE_H_) diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/include/cusparse_v2.h b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/include/cusparse_v2.h new file mode 100644 index 0000000000000000000000000000000000000000..f889e1f569d46d1116fe6e302429b3855de43c21 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/include/cusparse_v2.h @@ -0,0 +1,54 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ +#if !defined(CUSPARSE_V2_H_) +#define CUSPARSE_V2_H_ + +#include "cusparse.h" + +#endif diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/lib/__init__.py b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/lib/__pycache__/__init__.cpython-312.pyc b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/lib/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82eafea284817ed9534ab04f44fccede0d94c3a2 Binary files /dev/null and b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/lib/__pycache__/__init__.cpython-312.pyc differ diff --git a/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/lib/libcusparse.so.12 b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/lib/libcusparse.so.12 new file mode 100644 index 0000000000000000000000000000000000000000..542e8bf136735913e5729bcce61b2e33d169dcf0 --- /dev/null +++ b/tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/lib/libcusparse.so.12 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b721248d43e9a5d190e0edf2e524ce38c4536330135bb7efa8ca379ec8a852f3 +size 293618392