diff --git a/.gitattributes b/.gitattributes index c66fef971cdae816003b555365f64c198b911f37..c47d1e14b3b44aca037828f4e4e1bd99ee642d54 100644 --- a/.gitattributes +++ b/.gitattributes @@ -68,3 +68,4 @@ parrot/lib/libz.so.1 filter=lfs diff=lfs merge=lfs -text parrot/lib/libatomic.so filter=lfs diff=lfs merge=lfs -text parrot/bin/lzma filter=lfs diff=lfs merge=lfs -text parrot/lib/libncursesw.so.6 filter=lfs diff=lfs merge=lfs -text +parrot/lib/libncurses.so.6.4 filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/lib/libncurses.so.6.4 b/parrot/lib/libncurses.so.6.4 new file mode 100644 index 0000000000000000000000000000000000000000..0745e17b73f2d5570bc5f8868bab0381bd7da794 --- /dev/null +++ b/parrot/lib/libncurses.so.6.4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fa4e5e93804d78660b0eef727cdb4211209e1742e4ad3669348022668d90962 +size 271304 diff --git a/parrot/lib/python3.10/idlelib/Icons/idle_32.gif b/parrot/lib/python3.10/idlelib/Icons/idle_32.gif new file mode 100644 index 0000000000000000000000000000000000000000..89a600532f00e4d206bbc8a59b6baad82fd9dab1 --- /dev/null +++ b/parrot/lib/python3.10/idlelib/Icons/idle_32.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe70991cfccd1267922e94d91e02e9a58d2d29fd3382a2f4975280b9023cb7b9 +size 1019 diff --git a/parrot/lib/python3.10/idlelib/Icons/python.gif b/parrot/lib/python3.10/idlelib/Icons/python.gif new file mode 100644 index 0000000000000000000000000000000000000000..2d73d19be477b8af5f50c7712a2ce1be9db9c20d --- /dev/null +++ b/parrot/lib/python3.10/idlelib/Icons/python.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:158c31382f8e5b41fded0c2aa9cc66a382928b003cdd8b5b0518836ad9c89377 +size 380 diff --git a/parrot/lib/python3.10/importlib/metadata/__pycache__/_collections.cpython-310.pyc b/parrot/lib/python3.10/importlib/metadata/__pycache__/_collections.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..233beb7785af71c670ba283b97f4c381379f6e41 Binary files /dev/null and b/parrot/lib/python3.10/importlib/metadata/__pycache__/_collections.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/importlib/metadata/__pycache__/_meta.cpython-310.pyc b/parrot/lib/python3.10/importlib/metadata/__pycache__/_meta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f778cfdb5bfdf5ff357d0c95c2f4ba83bd3d46d Binary files /dev/null and b/parrot/lib/python3.10/importlib/metadata/__pycache__/_meta.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/lib2to3/PatternGrammar3.10.16.final.0.pickle b/parrot/lib/python3.10/lib2to3/PatternGrammar3.10.16.final.0.pickle new file mode 100644 index 0000000000000000000000000000000000000000..e9de5e3aa351535c40c4d95139afac6eb97e675d --- /dev/null +++ b/parrot/lib/python3.10/lib2to3/PatternGrammar3.10.16.final.0.pickle @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36ee934395b9209737b13893ddaff05fad8e239c2fdfac29d401d3fceeb30768 +size 1225 diff --git a/parrot/lib/python3.10/site-packages/COPYING b/parrot/lib/python3.10/site-packages/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..c14f01beb96afe6119f936d08a30ee2069bb0fb2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/COPYING @@ -0,0 +1,29 @@ +Copyright (c) 2011, Stavros Korokithakis +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +Neither the name of Stochastic Technologies nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/parrot/lib/python3.10/site-packages/cv2/LICENSE-3RD-PARTY.txt b/parrot/lib/python3.10/site-packages/cv2/LICENSE-3RD-PARTY.txt new file mode 100644 index 0000000000000000000000000000000000000000..0462eee383c3d7d24ad85bf75c4024caa4634e34 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cv2/LICENSE-3RD-PARTY.txt @@ -0,0 +1,3090 @@ +OpenCV library is redistributed within opencv-python package. +This license applies to OpenCV binary in the directory cv2/. + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------------------ +libvpx is redistributed within all opencv-python Linux packages. +This license applies to libvpx binary in the directory cv2/. + +Copyright (c) 2010, The WebM Project authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + * Neither the name of Google, nor the WebM Project, nor the names + of its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ +FFmpeg is redistributed within all opencv-python packages. + +Libbluray, libgnutls, libnettle, libhogweed, libintl, libmp3lame, libp11, +librtmp, libsoxr and libtasn1 are redistributed within all opencv-python macOS packages. + +This license applies to the above library binaries in the directory cv2/. + + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + +------------------------------------------------------------------------------ +Qt 5 is redistributed within non-headless opencv-python Linux and macOS packages. +libgmp is redistributed within opencv-python macOS packages. +libidn2 is redistributed within opencv-python macOS packages. +libunistring is redistributed within opencv-python macOS packages. +This license applies to the above binaries in the directory cv2/. + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. + +------------------------------------------------------------------------------ +bzip2 is redistributed within all opencv-python Linux packages. +This license applies to libbz2 binary in the directory cv2/. + +This program, "bzip2", the associated library "libbzip2", and all +documentation, are copyright (C) 1996-2010 Julian R Seward. All +rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. The origin of this software must not be misrepresented; you must + not claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + +3. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + +4. The name of the author may not be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Julian Seward, jseward@bzip.org +bzip2/libbzip2 version 1.0.6 of 6 September 2010 + +------------------------------------------------------------------------------ +libcrypto and libssl are redistributed within all opencv-python Linux and macOS packages. +libopencore-amrnb and libopencore-amrwb are redistributed within all opencv-python Linux and macOS packages. +This license applies to above binaries in the directory cv2/. + + LICENSE ISSUES + ============== + + The OpenSSL toolkit stays under a double license, i.e. both the conditions of + the OpenSSL License and the original SSLeay license apply to the toolkit. + See below for the actual license texts. + + OpenSSL License + --------------- + +/* ==================================================================== + * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ + + Original SSLeay License + ----------------------- + +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are adhered to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the routines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publicly available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +------------------------------------------------------------------------------ +libfontconfig is redistributed within all opencv-python macOS packages. +This license applies to libfontconfig binary in the directory cv2/. + +Copyright © 2000,2001,2002,2003,2004,2006,2007 Keith Packard +Copyright © 2005 Patrick Lam +Copyright © 2009 Roozbeh Pournader +Copyright © 2008,2009 Red Hat, Inc. +Copyright © 2008 Danilo Šegan +Copyright © 2012 Google, Inc. + + +Permission to use, copy, modify, distribute, and sell this software and its +documentation for any purpose is hereby granted without fee, provided that +the above copyright notice appear in all copies and that both that +copyright notice and this permission notice appear in supporting +documentation, and that the name of the author(s) not be used in +advertising or publicity pertaining to distribution of the software without +specific, written prior permission. The authors make no +representations about the suitability of this software for any purpose. It +is provided "as is" without express or implied warranty. + +THE AUTHOR(S) DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY SPECIAL, INDIRECT OR +CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. + +------------------------------------------------------------------------------ +libfreetype is redistributed within opencv-python Linux and macOS packages. +This license applies to libfreetype binary in the directory cv2/. + + The FreeType Project LICENSE + ---------------------------- + + 2006-Jan-27 + + Copyright 1996-2002, 2006 by + David Turner, Robert Wilhelm, and Werner Lemberg + + + +Introduction +============ + + The FreeType Project is distributed in several archive packages; + some of them may contain, in addition to the FreeType font engine, + various tools and contributions which rely on, or relate to, the + FreeType Project. + + This license applies to all files found in such packages, and + which do not fall under their own explicit license. The license + affects thus the FreeType font engine, the test programs, + documentation and makefiles, at the very least. + + This license was inspired by the BSD, Artistic, and IJG + (Independent JPEG Group) licenses, which all encourage inclusion + and use of free software in commercial and freeware products + alike. As a consequence, its main points are that: + + o We don't promise that this software works. However, we will be + interested in any kind of bug reports. (`as is' distribution) + + o You can use this software for whatever you want, in parts or + full form, without having to pay us. (`royalty-free' usage) + + o You may not pretend that you wrote this software. If you use + it, or only parts of it, in a program, you must acknowledge + somewhere in your documentation that you have used the + FreeType code. (`credits') + + We specifically permit and encourage the inclusion of this + software, with or without modifications, in commercial products. + We disclaim all warranties covering The FreeType Project and + assume no liability related to The FreeType Project. + + + Finally, many people asked us for a preferred form for a + credit/disclaimer to use in compliance with this license. We thus + encourage you to use the following text: + + """ + Portions of this software are copyright © The FreeType + Project (www.freetype.org). All rights reserved. + """ + + Please replace with the value from the FreeType version you + actually use. + + +Legal Terms +=========== + +0. Definitions +-------------- + + Throughout this license, the terms `package', `FreeType Project', + and `FreeType archive' refer to the set of files originally + distributed by the authors (David Turner, Robert Wilhelm, and + Werner Lemberg) as the `FreeType Project', be they named as alpha, + beta or final release. + + `You' refers to the licensee, or person using the project, where + `using' is a generic term including compiling the project's source + code as well as linking it to form a `program' or `executable'. + This program is referred to as `a program using the FreeType + engine'. + + This license applies to all files distributed in the original + FreeType Project, including all source code, binaries and + documentation, unless otherwise stated in the file in its + original, unmodified form as distributed in the original archive. + If you are unsure whether or not a particular file is covered by + this license, you must contact us to verify this. + + The FreeType Project is copyright (C) 1996-2000 by David Turner, + Robert Wilhelm, and Werner Lemberg. All rights reserved except as + specified below. + +1. No Warranty +-------------- + + THE FREETYPE PROJECT IS PROVIDED `AS IS' WITHOUT WARRANTY OF ANY + KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. IN NO EVENT WILL ANY OF THE AUTHORS OR COPYRIGHT HOLDERS + BE LIABLE FOR ANY DAMAGES CAUSED BY THE USE OR THE INABILITY TO + USE, OF THE FREETYPE PROJECT. + +2. Redistribution +----------------- + + This license grants a worldwide, royalty-free, perpetual and + irrevocable right and license to use, execute, perform, compile, + display, copy, create derivative works of, distribute and + sublicense the FreeType Project (in both source and object code + forms) and derivative works thereof for any purpose; and to + authorize others to exercise some or all of the rights granted + herein, subject to the following conditions: + + o Redistribution of source code must retain this license file + (`FTL.TXT') unaltered; any additions, deletions or changes to + the original files must be clearly indicated in accompanying + documentation. The copyright notices of the unaltered, + original files must be preserved in all copies of source + files. + + o Redistribution in binary form must provide a disclaimer that + states that the software is based in part of the work of the + FreeType Team, in the distribution documentation. We also + encourage you to put an URL to the FreeType web page in your + documentation, though this isn't mandatory. + + These conditions apply to any software derived from or based on + the FreeType Project, not just the unmodified files. If you use + our work, you must acknowledge us. However, no fee need be paid + to us. + +3. Advertising +-------------- + + Neither the FreeType authors and contributors nor you shall use + the name of the other for commercial, advertising, or promotional + purposes without specific prior written permission. + + We suggest, but do not require, that you use one or more of the + following phrases to refer to this software in your documentation + or advertising materials: `FreeType Project', `FreeType Engine', + `FreeType library', or `FreeType Distribution'. + + As you have not signed this license, you are not required to + accept it. However, as the FreeType Project is copyrighted + material, only this license, or another one contracted with the + authors, grants you the right to use, distribute, and modify it. + Therefore, by using, distributing, or modifying the FreeType + Project, you indicate that you understand and accept all the terms + of this license. + +4. Contacts +----------- + + There are two mailing lists related to FreeType: + + o freetype@nongnu.org + + Discusses general use and applications of FreeType, as well as + future and wanted additions to the library and distribution. + If you are looking for support, start in this list if you + haven't found anything to help you in the documentation. + + o freetype-devel@nongnu.org + + Discusses bugs, as well as engine internals, design issues, + specific licenses, porting, etc. + + Our home page can be found at + + https://www.freetype.org + +------------------------------------------------------------------------------ +libpng is redistributed within all opencv-python Linux and macOS packages. +This license applies to libpng binary in the directory cv2/. + +PNG Reference Library License version 2 +--------------------------------------- + + * Copyright (c) 1995-2019 The PNG Reference Library Authors. + * Copyright (c) 2018-2019 Cosmin Truta. + * Copyright (c) 2000-2002, 2004, 2006-2018 Glenn Randers-Pehrson. + * Copyright (c) 1996-1997 Andreas Dilger. + * Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc. + +The software is supplied "as is", without warranty of any kind, +express or implied, including, without limitation, the warranties +of merchantability, fitness for a particular purpose, title, and +non-infringement. In no event shall the Copyright owners, or +anyone distributing the software, be liable for any damages or +other liability, whether in contract, tort or otherwise, arising +from, out of, or in connection with the software, or the use or +other dealings in the software, even if advised of the possibility +of such damage. + +Permission is hereby granted to use, copy, modify, and distribute +this software, or portions hereof, for any purpose, without fee, +subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you + must not claim that you wrote the original software. If you + use this software in a product, an acknowledgment in the product + documentation would be appreciated, but is not required. + + 2. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + + 3. This Copyright notice may not be removed or altered from any + source or altered source distribution. + + +PNG Reference Library License version 1 (for libpng 0.5 through 1.6.35) +----------------------------------------------------------------------- + +libpng versions 1.0.7, July 1, 2000, through 1.6.35, July 15, 2018 are +Copyright (c) 2000-2002, 2004, 2006-2018 Glenn Randers-Pehrson, are +derived from libpng-1.0.6, and are distributed according to the same +disclaimer and license as libpng-1.0.6 with the following individuals +added to the list of Contributing Authors: + + Simon-Pierre Cadieux + Eric S. Raymond + Mans Rullgard + Cosmin Truta + Gilles Vollant + James Yu + Mandar Sahastrabuddhe + Google Inc. + Vadim Barkov + +and with the following additions to the disclaimer: + + There is no warranty against interference with your enjoyment of + the library or against infringement. There is no warranty that our + efforts or the library will fulfill any of your particular purposes + or needs. This library is provided with all faults, and the entire + risk of satisfactory quality, performance, accuracy, and effort is + with the user. + +Some files in the "contrib" directory and some configure-generated +files that are distributed with libpng have other copyright owners, and +are released under other open source licenses. + +libpng versions 0.97, January 1998, through 1.0.6, March 20, 2000, are +Copyright (c) 1998-2000 Glenn Randers-Pehrson, are derived from +libpng-0.96, and are distributed according to the same disclaimer and +license as libpng-0.96, with the following individuals added to the +list of Contributing Authors: + + Tom Lane + Glenn Randers-Pehrson + Willem van Schaik + +libpng versions 0.89, June 1996, through 0.96, May 1997, are +Copyright (c) 1996-1997 Andreas Dilger, are derived from libpng-0.88, +and are distributed according to the same disclaimer and license as +libpng-0.88, with the following individuals added to the list of +Contributing Authors: + + John Bowler + Kevin Bracey + Sam Bushell + Magnus Holmgren + Greg Roelofs + Tom Tanner + +Some files in the "scripts" directory have other copyright owners, +but are released under this license. + +libpng versions 0.5, May 1995, through 0.88, January 1996, are +Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc. + +For the purposes of this copyright and license, "Contributing Authors" +is defined as the following set of individuals: + + Andreas Dilger + Dave Martindale + Guy Eric Schalnat + Paul Schmidt + Tim Wegner + +The PNG Reference Library is supplied "AS IS". The Contributing +Authors and Group 42, Inc. disclaim all warranties, expressed or +implied, including, without limitation, the warranties of +merchantability and of fitness for any purpose. The Contributing +Authors and Group 42, Inc. assume no liability for direct, indirect, +incidental, special, exemplary, or consequential damages, which may +result from the use of the PNG Reference Library, even if advised of +the possibility of such damage. + +Permission is hereby granted to use, copy, modify, and distribute this +source code, or portions hereof, for any purpose, without fee, subject +to the following restrictions: + + 1. The origin of this source code must not be misrepresented. + + 2. Altered versions must be plainly marked as such and must not + be misrepresented as being the original source. + + 3. This Copyright notice may not be removed or altered from any + source or altered source distribution. + +The Contributing Authors and Group 42, Inc. specifically permit, +without fee, and encourage the use of this source code as a component +to supporting the PNG file format in commercial products. If you use +this source code in a product, acknowledgment is not required but would +be appreciated. + +------------------------------------------------------------------------------ +libz is redistributed within all opencv-python Linux packages. +This license applies to libz binary in the directory cv2/. + + Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + +------------------------------------------------------------------------------ +libdav1d is redistributed within opencv-python macOS packages. +This license applies to libdav1d binary in the directory cv2/. + +Copyright © 2018-2019, VideoLAN and dav1d authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ +libffi is redistributed within opencv-python macOS packages. +This license applies to libffi binary in the directory cv2/. + +libffi - Copyright (c) 1996-2020 Anthony Green, Red Hat, Inc and others. +See source files for details. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +------------------------------------------------------------------------------ +libogg is redistributed within opencv-python macOS packages. +This license applies to libogg binary in the directory cv2/. + +Copyright (c) 2002, Xiph.org Foundation + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +- Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +- Neither the name of the Xiph.org Foundation nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ +libopenjp2 is redistributed within opencv-python macOS packages. +This license applies to libopenjp2 binary in the directory cv2/. + +The copyright in this software is being made available under the 2-clauses +BSD License, included below. This software may be subject to other third +party and contributor rights, including patent rights, and no such rights +are granted under this license. + +Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium +Copyright (c) 2002-2014, Professor Benoit Macq +Copyright (c) 2003-2014, Antonin Descampe +Copyright (c) 2003-2009, Francois-Olivier Devaux +Copyright (c) 2005, Herve Drolon, FreeImage Team +Copyright (c) 2002-2003, Yannick Verschueren +Copyright (c) 2001-2003, David Janssens +Copyright (c) 2011-2012, Centre National d'Etudes Spatiales (CNES), France +Copyright (c) 2012, CS Systemes d'Information, France + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ +libopus is redistributed within opencv-python macOS packages. +This license applies to libopus binary in the directory cv2/. + +Copyright 2001-2011 Xiph.Org, Skype Limited, Octasic, + Jean-Marc Valin, Timothy B. Terriberry, + CSIRO, Gregory Maxwell, Mark Borgerding, + Erik de Castro Lopo + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +- Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +- Neither the name of Internet Society, IETF or IETF Trust, nor the +names of specific contributors, may be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Opus is subject to the royalty-free patent licenses which are +specified at: + +Xiph.Org Foundation: +https://datatracker.ietf.org/ipr/1524/ + +Microsoft Corporation: +https://datatracker.ietf.org/ipr/1914/ + +Broadcom Corporation: +https://datatracker.ietf.org/ipr/1526/ + +------------------------------------------------------------------------------ +librav1e is redistributed within opencv-python macOS packages. +This license applies to librav1e binary in the directory cv2/. + +BSD 2-Clause License + +Copyright (c) 2017-2020, the rav1e contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ +libsnappy is redistributed within opencv-python macOS packages. +This license applies to libsnappy binary in the directory cv2/. + +Copyright 2011, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ +libspeex is redistributed within opencv-python macOS packages. +This license applies to libspeex binary in the directory cv2/. + +Copyright 2002-2008 Xiph.org Foundation +Copyright 2002-2008 Jean-Marc Valin +Copyright 2005-2007 Analog Devices Inc. +Copyright 2005-2008 Commonwealth Scientific and Industrial Research + Organisation (CSIRO) +Copyright 1993, 2002, 2006 David Rowe +Copyright 2003 EpicGames +Copyright 1992-1994 Jutta Degener, Carsten Bormann + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +- Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +- Neither the name of the Xiph.org Foundation nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ +libsrt is redistributed within opencv-python macOS packages. +This license applies to libsrt binary in the directory cv2/. + +/* + * + * Copyright (c) 2001-2017 Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * Neither the name of the Cisco Systems, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + + Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. + +------------------------------------------------------------------------------ +libtheoradec and libtheoraenc are redistributed within opencv-python macOS packages. +This license applies to libtheoradec and libtheoraenc binaries in the directory cv2/. + + Copyright (C) 2002-2009 Xiph.org Foundation + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +- Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +- Neither the name of the Xiph.org Foundation nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ +libwebp and libwebpmux are redistributed within all opencv-python packages. +This license applies to libwebp and libwebpmux binaries in the directory cv2/. + +Copyright (c) 2010, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + * Neither the name of Google nor the names of its contributors may + be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ +libvorbis and libvorbisenc are redistributed within opencv-python macOS packages. +This license applies to libvorbis and libvorbisenc binaries in the directory cv2/. + +Copyright (c) 2002-2020 Xiph.org Foundation + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +- Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +- Neither the name of the Xiph.org Foundation nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ +Libxcb utility libraries are redistributed within opencv-python non-headless Linux packages. +This license applies to libxcb related binaries in the directory cv2/. + +Copyright (C) 2001-2006 Bart Massey, Jamey Sharp, and Josh Triplett. +All Rights Reserved. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall +be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the names of the authors +or their institutions shall not be used in advertising or +otherwise to promote the sale, use or other dealings in this +Software without prior written authorization from the +authors. + +------------------------------------------------------------------------------ +Libxcb-image is redistributed within opencv-python non-headless Linux packages. +This license applies to libxcb-image binary in the directory cv2/. + +Copyright © 2007-2008 Bart Massey +Copyright © 2008 Julien Danjou +Copyright © 2008 Keith Packard + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF +CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the names of the authors or +their institutions shall not be used in advertising or otherwise to +promote the sale, use or other dealings in this Software without +prior written authorization from the authors. + +------------------------------------------------------------------------------ +Libxcb-util is redistributed within opencv-python non-headless Linux packages. +This license applies to libxcb-util binary in the directory cv2/. + +Copyright © 2008 Bart Massey +Copyright © 2008 Ian Osgood +Copyright © 2008 Jamey Sharp +Copyright © 2008 Josh Triplett +Copyright © 2008-2009 Julien Danjou + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF +CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the names of the authors or +their institutions shall not be used in advertising or otherwise to +promote the sale, use or other dealings in this Software without +prior written authorization from the authors. + +------------------------------------------------------------------------------ +Libxcb-render-util is redistributed within opencv-python non-headless Linux packages. +This license applies to libxcb-render-util binary in the directory cv2/. + +Copyright © 2000 Keith Packard + +Permission to use, copy, modify, distribute, and sell this software and its +documentation for any purpose is hereby granted without fee, provided that +the above copyright notice appear in all copies and that both that +copyright notice and this permission notice appear in supporting +documentation, and that the name of Keith Packard not be used in +advertising or publicity pertaining to distribution of the software without +specific, written prior permission. Keith Packard makes no +representations about the suitability of this software for any purpose. It +is provided "as is" without express or implied warranty. + +KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR +CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. + +Copyright © 2006 Jamey Sharp. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the names of the authors or their +institutions shall not be used in advertising or otherwise to promote the +sale, use or other dealings in this Software without prior written +authorization from the authors. + +Copyright © 2006 Ian Osgood + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the names of the authors or their +institutions shall not be used in advertising or otherwise to promote the +sale, use or other dealings in this Software without prior written +authorization from the authors. + +------------------------------------------------------------------------------ +Libxcb-icccm is redistributed within opencv-python non-headless Linux packages. +This license applies to Libxcb-icccm binary in the directory cv2/. + +Copyright © 2008-2011 Arnaud Fontaine +Copyright © 2007-2008 Vincent Torri + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF +CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the names of the authors or +their institutions shall not be used in advertising or otherwise to +promote the sale, use or other dealings in this Software without +prior written authorization from the authors. + +------------------------------------------------------------------------------ +libXau is redistributed within opencv-python non-headless Linux packages. +This license applies to libXau binary in the directory cv2/. + +Copyright 1988, 1993, 1994, 1998 The Open Group + +Permission to use, copy, modify, distribute, and sell this software and its +documentation for any purpose is hereby granted without fee, provided that +the above copyright notice appear in all copies and that both that +copyright notice and this permission notice appear in supporting +documentation. + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the name of The Open Group shall not be +used in advertising or otherwise to promote the sale, use or other dealings +in this Software without prior written authorization from The Open Group. + +------------------------------------------------------------------------------ +Vulkan headers are redistributed within all opencv-python packages. +This license applies to Vulkan headers in the directory 3rdparty/include/vulkan. + +Copyright (c) 2015-2018 The Khronos Group Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +------------------------------------------------------------------------------ +Libjpeg-turbo is redistributed within all opencv-python packages as build option. + +libjpeg-turbo Licenses +====================== + +libjpeg-turbo is covered by three compatible BSD-style open source licenses: + +- The IJG (Independent JPEG Group) License, which is listed in + [README.ijg](README.ijg) + + This license applies to the libjpeg API library and associated programs + (any code inherited from libjpeg, and any modifications to that code.) + +- The Modified (3-clause) BSD License, which is listed below + + This license covers the TurboJPEG API library and associated programs, as + well as the build system. + +- The [zlib License](https://opensource.org/licenses/Zlib) + + This license is a subset of the other two, and it covers the libjpeg-turbo + SIMD extensions. + + +Complying with the libjpeg-turbo Licenses +========================================= + +This section provides a roll-up of the libjpeg-turbo licensing terms, to the +best of our understanding. + +1. If you are distributing a modified version of the libjpeg-turbo source, + then: + + 1. You cannot alter or remove any existing copyright or license notices + from the source. + + **Origin** + - Clause 1 of the IJG License + - Clause 1 of the Modified BSD License + - Clauses 1 and 3 of the zlib License + + 2. You must add your own copyright notice to the header of each source + file you modified, so others can tell that you modified that file (if + there is not an existing copyright header in that file, then you can + simply add a notice stating that you modified the file.) + + **Origin** + - Clause 1 of the IJG License + - Clause 2 of the zlib License + + 3. You must include the IJG README file, and you must not alter any of the + copyright or license text in that file. + + **Origin** + - Clause 1 of the IJG License + +2. If you are distributing only libjpeg-turbo binaries without the source, or + if you are distributing an application that statically links with + libjpeg-turbo, then: + + 1. Your product documentation must include a message stating: + + This software is based in part on the work of the Independent JPEG + Group. + + **Origin** + - Clause 2 of the IJG license + + 2. If your binary distribution includes or uses the TurboJPEG API, then + your product documentation must include the text of the Modified BSD + License (see below.) + + **Origin** + - Clause 2 of the Modified BSD License + +3. You cannot use the name of the IJG or The libjpeg-turbo Project or the + contributors thereof in advertising, publicity, etc. + + **Origin** + - IJG License + - Clause 3 of the Modified BSD License + +4. The IJG and The libjpeg-turbo Project do not warrant libjpeg-turbo to be + free of defects, nor do we accept any liability for undesirable + consequences resulting from your use of the software. + + **Origin** + - IJG License + - Modified BSD License + - zlib License + + +The Modified (3-clause) BSD License +=================================== + +Copyright (C)2009-2022 D. R. Commander. All Rights Reserved.
+Copyright (C)2015 Viktor Szathmáry. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +- Neither the name of the libjpeg-turbo Project nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS", +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + + +Why Three Licenses? +=================== + +The zlib License could have been used instead of the Modified (3-clause) BSD +License, and since the IJG License effectively subsumes the distribution +conditions of the zlib License, this would have effectively placed +libjpeg-turbo binary distributions under the IJG License. However, the IJG +License specifically refers to the Independent JPEG Group and does not extend +attribution and endorsement protections to other entities. Thus, it was +desirable to choose a license that granted us the same protections for new code +that were granted to the IJG for code derived from their software. + +------------------------------------------------------------------------------ +Libspng is redistributed within all opencv-python packages as build option. + +BSD 2-Clause License + +Copyright (c) 2018-2022, Randy +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ +QUIRC library is redistributed within all opencv-python packages. + +quirc -- QR-code recognition library +Copyright (C) 2010-2012 Daniel Beer + +Permission to use, copy, modify, and/or distribute this software for +any purpose with or without fee is hereby granted, provided that the +above copyright notice and this permission notice appear in all +copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL +WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE +AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL +DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR +PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. + +------------------------------------------------------------------------------ +Flatbuffers library is redistributed within all opencv-python packages. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------------------ +Protobuf library is redistributed within all opencv-python packages. + +Copyright 2008 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. + +------------------------------------------------------------------------------ +OpenJPEG library is redistributed within all opencv-python packages. + +/* + * The copyright in this software is being made available under the 2-clauses + * BSD License, included below. This software may be subject to other third + * party and contributor rights, including patent rights, and no such rights + * are granted under this license. + * + * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium + * Copyright (c) 2002-2014, Professor Benoit Macq + * Copyright (c) 2003-2014, Antonin Descampe + * Copyright (c) 2003-2009, Francois-Olivier Devaux + * Copyright (c) 2005, Herve Drolon, FreeImage Team + * Copyright (c) 2002-2003, Yannick Verschueren + * Copyright (c) 2001-2003, David Janssens + * Copyright (c) 2011-2012, Centre National d'Etudes Spatiales (CNES), France + * Copyright (c) 2012, CS Systemes d'Information, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +------------------------------------------------------------------------------ +TIFF library is redistributed within all opencv-python packages. + +Copyright (c) 1988-1997 Sam Leffler +Copyright (c) 1991-1997 Silicon Graphics, Inc. + +Permission to use, copy, modify, distribute, and sell this software and +its documentation for any purpose is hereby granted without fee, provided +that (i) the above copyright notices and this permission notice appear in +all copies of the software and related documentation, and (ii) the names of +Sam Leffler and Silicon Graphics may not be used in any advertising or +publicity relating to the software without the specific, prior written +permission of Sam Leffler and Silicon Graphics. + +THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, +EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY +WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + +IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR +ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, +OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF +LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +OF THIS SOFTWARE. + +------------------------------------------------------------------------------ +OpenEXR library is redistributed within all opencv-python packages. + +Copyright (c) 2006, Industrial Light & Magic, a division of Lucasfilm +Entertainment Company Ltd. Portions contributed and copyright held by +others as indicated. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above + copyright notice, this list of conditions and the following + disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided with + the distribution. + + * Neither the name of Industrial Light & Magic nor the names of + any other contributors to this software may be used to endorse or + promote products derived from this software without specific prior + written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ +Intel(R) IPP ICV library statically linked within x86 and x86_64 opencv-python packages. + +Intel(R) Integrated Performance Primitives 2021 Update 10 + +Intel Simplified Software License (Version October 2022) + +Intel(R) Integrated Performance Primitives (Intel(R) IPP) : Copyright (C) 1997 Intel Corporation + +Use and Redistribution. You may use and redistribute the software, which is +provided in binary form only, (the "Software"), without modification, +provided the following conditions are met: + +* Redistributions must reproduce the above copyright notice and these + terms of use in the Software and in the documentation and/or other materials + provided with the distribution. +* Neither the name of Intel nor the names of its suppliers may be used to + endorse or promote products derived from this Software without specific + prior written permission. +* No reverse engineering, decompilation, or disassembly of the Software is + permitted, nor any modification or alteration of the Software or its operation + at any time, including during execution. + +No other licenses. Except as provided in the preceding section, Intel grants no +licenses or other rights by implication, estoppel or otherwise to, patent, +copyright, trademark, trade name, service mark or other intellectual property +licenses or rights of Intel. + +Third party software. "Third Party Software" means the files (if any) listed +in the "third-party-software.txt" or other similarly-named text file that may +be included with the Software. Third Party Software, even if included with the +distribution of the Software, may be governed by separate license terms, including +without limitation, third party license terms, open source software notices and +terms, and/or other Intel software license terms. These separate license terms +solely govern Your use of the Third Party Software. + +DISCLAIMER. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT ARE +DISCLAIMED. THIS SOFTWARE IS NOT INTENDED FOR USE IN SYSTEMS OR APPLICATIONS +WHERE FAILURE OF THE SOFTWARE MAY CAUSE PERSONAL INJURY OR DEATH AND YOU AGREE +THAT YOU ARE FULLY RESPONSIBLE FOR ANY CLAIMS, COSTS, DAMAGES, EXPENSES, AND +ATTORNEYS' FEES ARISING OUT OF ANY SUCH USE, EVEN IF ANY CLAIM ALLEGES THAT +INTEL WAS NEGLIGENT REGARDING THE DESIGN OR MANUFACTURE OF THE SOFTWARE. + +LIMITATION OF LIABILITY. IN NO EVENT WILL INTEL BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +No support. Intel may make changes to the Software, at any time without notice, +and is not obligated to support, update or provide training for the Software. + +Termination. Your right to use the Software is terminated in the event of your +breach of this license. + +Feedback. Should you provide Intel with comments, modifications, corrections, +enhancements or other input ("Feedback") related to the Software, Intel will be +free to use, disclose, reproduce, license or otherwise distribute or exploit the +Feedback in its sole discretion without any obligations or restrictions of any +kind, including without limitation, intellectual property rights or licensing +obligations. + +Compliance with laws. You agree to comply with all relevant laws and regulations +governing your use, transfer, import or export (or prohibition thereof) of the +Software. + +Governing law. All disputes will be governed by the laws of the United States of +America and the State of Delaware without reference to conflict of law +principles and subject to the exclusive jurisdiction of the state or federal +courts sitting in the State of Delaware, and each party agrees that it submits +to the personal jurisdiction and venue of those courts and waives any +objections. THE UNITED NATIONS CONVENTION ON CONTRACTS FOR THE INTERNATIONAL +SALE OF GOODS (1980) IS SPECIFICALLY EXCLUDED AND WILL NOT APPLY TO THE SOFTWARE. + +------------------------------------------------------------------------------ +Orbbec SDK distributed with arm64 MacOS packages. + +MIT License + +Copyright (c) 2023 OrbbecDeveloper + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/parrot/lib/python3.10/site-packages/cv2/LICENSE.txt b/parrot/lib/python3.10/site-packages/cv2/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..328bf50632a988cf1cc494d557936d84fec16335 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cv2/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Olli-Pekka Heinisuo + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/parrot/lib/python3.10/site-packages/cv2/__init__.py b/parrot/lib/python3.10/site-packages/cv2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7e148fc9f2b93a2b51b3bc6ec49187dbdcdfc5cb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cv2/__init__.py @@ -0,0 +1,181 @@ +''' +OpenCV Python binary extension loader +''' +import os +import importlib +import sys + +__all__ = [] + +try: + import numpy + import numpy.core.multiarray +except ImportError: + print('OpenCV bindings requires "numpy" package.') + print('Install it via command:') + print(' pip install numpy') + raise + +# TODO +# is_x64 = sys.maxsize > 2**32 + + +def __load_extra_py_code_for_module(base, name, enable_debug_print=False): + module_name = "{}.{}".format(__name__, name) + export_module_name = "{}.{}".format(base, name) + native_module = sys.modules.pop(module_name, None) + try: + py_module = importlib.import_module(module_name) + except ImportError as err: + if enable_debug_print: + print("Can't load Python code for module:", module_name, + ". Reason:", err) + # Extension doesn't contain extra py code + return False + + if base in sys.modules and not hasattr(sys.modules[base], name): + setattr(sys.modules[base], name, py_module) + sys.modules[export_module_name] = py_module + # If it is C extension module it is already loaded by cv2 package + if native_module: + setattr(py_module, "_native", native_module) + for k, v in filter(lambda kv: not hasattr(py_module, kv[0]), + native_module.__dict__.items()): + if enable_debug_print: print(' symbol({}): {} = {}'.format(name, k, v)) + setattr(py_module, k, v) + return True + + +def __collect_extra_submodules(enable_debug_print=False): + def modules_filter(module): + return all(( + # module is not internal + not module.startswith("_"), + not module.startswith("python-"), + # it is not a file + os.path.isdir(os.path.join(_extra_submodules_init_path, module)) + )) + if sys.version_info[0] < 3: + if enable_debug_print: + print("Extra submodules is loaded only for Python 3") + return [] + + __INIT_FILE_PATH = os.path.abspath(__file__) + _extra_submodules_init_path = os.path.dirname(__INIT_FILE_PATH) + return filter(modules_filter, os.listdir(_extra_submodules_init_path)) + + +def bootstrap(): + import sys + + import copy + save_sys_path = copy.copy(sys.path) + + if hasattr(sys, 'OpenCV_LOADER'): + print(sys.path) + raise ImportError('ERROR: recursion is detected during loading of "cv2" binary extensions. Check OpenCV installation.') + sys.OpenCV_LOADER = True + + DEBUG = False + if hasattr(sys, 'OpenCV_LOADER_DEBUG'): + DEBUG = True + + import platform + if DEBUG: print('OpenCV loader: os.name="{}" platform.system()="{}"'.format(os.name, str(platform.system()))) + + LOADER_DIR = os.path.dirname(os.path.abspath(os.path.realpath(__file__))) + + PYTHON_EXTENSIONS_PATHS = [] + BINARIES_PATHS = [] + + g_vars = globals() + l_vars = locals().copy() + + if sys.version_info[:2] < (3, 0): + from . load_config_py2 import exec_file_wrapper + else: + from . load_config_py3 import exec_file_wrapper + + def load_first_config(fnames, required=True): + for fname in fnames: + fpath = os.path.join(LOADER_DIR, fname) + if not os.path.exists(fpath): + if DEBUG: print('OpenCV loader: config not found, skip: {}'.format(fpath)) + continue + if DEBUG: print('OpenCV loader: loading config: {}'.format(fpath)) + exec_file_wrapper(fpath, g_vars, l_vars) + return True + if required: + raise ImportError('OpenCV loader: missing configuration file: {}. Check OpenCV installation.'.format(fnames)) + + load_first_config(['config.py'], True) + load_first_config([ + 'config-{}.{}.py'.format(sys.version_info[0], sys.version_info[1]), + 'config-{}.py'.format(sys.version_info[0]) + ], True) + + if DEBUG: print('OpenCV loader: PYTHON_EXTENSIONS_PATHS={}'.format(str(l_vars['PYTHON_EXTENSIONS_PATHS']))) + if DEBUG: print('OpenCV loader: BINARIES_PATHS={}'.format(str(l_vars['BINARIES_PATHS']))) + + applySysPathWorkaround = False + if hasattr(sys, 'OpenCV_REPLACE_SYS_PATH_0'): + applySysPathWorkaround = True + else: + try: + BASE_DIR = os.path.dirname(LOADER_DIR) + if sys.path[0] == BASE_DIR or os.path.realpath(sys.path[0]) == BASE_DIR: + applySysPathWorkaround = True + except: + if DEBUG: print('OpenCV loader: exception during checking workaround for sys.path[0]') + pass # applySysPathWorkaround is False + + for p in reversed(l_vars['PYTHON_EXTENSIONS_PATHS']): + sys.path.insert(1 if not applySysPathWorkaround else 0, p) + + if os.name == 'nt': + if sys.version_info[:2] >= (3, 8): # https://github.com/python/cpython/pull/12302 + for p in l_vars['BINARIES_PATHS']: + try: + os.add_dll_directory(p) + except Exception as e: + if DEBUG: print('Failed os.add_dll_directory(): '+ str(e)) + pass + os.environ['PATH'] = ';'.join(l_vars['BINARIES_PATHS']) + ';' + os.environ.get('PATH', '') + if DEBUG: print('OpenCV loader: PATH={}'.format(str(os.environ['PATH']))) + else: + # amending of LD_LIBRARY_PATH works for sub-processes only + os.environ['LD_LIBRARY_PATH'] = ':'.join(l_vars['BINARIES_PATHS']) + ':' + os.environ.get('LD_LIBRARY_PATH', '') + + if DEBUG: print("Relink everything from native cv2 module to cv2 package") + + py_module = sys.modules.pop("cv2") + + native_module = importlib.import_module("cv2") + + sys.modules["cv2"] = py_module + setattr(py_module, "_native", native_module) + + for item_name, item in filter(lambda kv: kv[0] not in ("__file__", "__loader__", "__spec__", + "__name__", "__package__"), + native_module.__dict__.items()): + if item_name not in g_vars: + g_vars[item_name] = item + + sys.path = save_sys_path # multiprocessing should start from bootstrap code (https://github.com/opencv/opencv/issues/18502) + + try: + del sys.OpenCV_LOADER + except Exception as e: + if DEBUG: + print("Exception during delete OpenCV_LOADER:", e) + + if DEBUG: print('OpenCV loader: binary extension... OK') + + for submodule in __collect_extra_submodules(DEBUG): + if __load_extra_py_code_for_module("cv2", submodule, DEBUG): + if DEBUG: print("Extra Python code for", submodule, "is loaded") + + if DEBUG: print('OpenCV loader: DONE') + + +bootstrap() diff --git a/parrot/lib/python3.10/site-packages/cv2/__init__.pyi b/parrot/lib/python3.10/site-packages/cv2/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..937f8e9540c4c2f2cded778af41a326e59dcc41b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cv2/__init__.pyi @@ -0,0 +1,6305 @@ +__all__: list[str] = [] + +import cv2.aruco +import cv2.cuda +import cv2.dnn +import cv2.gapi +import cv2.gapi.ot +import cv2.gapi.streaming +import cv2.typing +import numpy +import typing as _typing + + +from cv2 import Error as Error +from cv2 import aruco as aruco +from cv2 import barcode as barcode +from cv2 import cuda as cuda +from cv2 import detail as detail +from cv2 import dnn as dnn +from cv2 import fisheye as fisheye +from cv2 import flann as flann +from cv2 import gapi as gapi +from cv2 import ipp as ipp +from cv2 import ml as ml +from cv2 import ocl as ocl +from cv2 import ogl as ogl +from cv2 import parallel as parallel +from cv2 import samples as samples +from cv2 import segmentation as segmentation +from cv2 import typing as typing +from cv2 import utils as utils +from cv2 import videoio_registry as videoio_registry +from cv2.mat_wrapper import Mat as Mat + + +# Enumerations +SORT_EVERY_ROW: int +SORT_EVERY_COLUMN: int +SORT_ASCENDING: int +SORT_DESCENDING: int +SortFlags = int +"""One of [SORT_EVERY_ROW, SORT_EVERY_COLUMN, SORT_ASCENDING, SORT_DESCENDING]""" + +COVAR_SCRAMBLED: int +COVAR_NORMAL: int +COVAR_USE_AVG: int +COVAR_SCALE: int +COVAR_ROWS: int +COVAR_COLS: int +CovarFlags = int +"""One of [COVAR_SCRAMBLED, COVAR_NORMAL, COVAR_USE_AVG, COVAR_SCALE, COVAR_ROWS, COVAR_COLS]""" + +KMEANS_RANDOM_CENTERS: int +KMEANS_PP_CENTERS: int +KMEANS_USE_INITIAL_LABELS: int +KmeansFlags = int +"""One of [KMEANS_RANDOM_CENTERS, KMEANS_PP_CENTERS, KMEANS_USE_INITIAL_LABELS]""" + +REDUCE_SUM: int +REDUCE_AVG: int +REDUCE_MAX: int +REDUCE_MIN: int +REDUCE_SUM2: int +ReduceTypes = int +"""One of [REDUCE_SUM, REDUCE_AVG, REDUCE_MAX, REDUCE_MIN, REDUCE_SUM2]""" + +ROTATE_90_CLOCKWISE: int +ROTATE_180: int +ROTATE_90_COUNTERCLOCKWISE: int +RotateFlags = int +"""One of [ROTATE_90_CLOCKWISE, ROTATE_180, ROTATE_90_COUNTERCLOCKWISE]""" + +Param_INT: int +PARAM_INT: int +Param_BOOLEAN: int +PARAM_BOOLEAN: int +Param_REAL: int +PARAM_REAL: int +Param_STRING: int +PARAM_STRING: int +Param_MAT: int +PARAM_MAT: int +Param_MAT_VECTOR: int +PARAM_MAT_VECTOR: int +Param_ALGORITHM: int +PARAM_ALGORITHM: int +Param_FLOAT: int +PARAM_FLOAT: int +Param_UNSIGNED_INT: int +PARAM_UNSIGNED_INT: int +Param_UINT64: int +PARAM_UINT64: int +Param_UCHAR: int +PARAM_UCHAR: int +Param_SCALAR: int +PARAM_SCALAR: int +Param = int +"""One of [Param_INT, PARAM_INT, Param_BOOLEAN, PARAM_BOOLEAN, Param_REAL, PARAM_REAL, Param_STRING, PARAM_STRING, Param_MAT, PARAM_MAT, Param_MAT_VECTOR, PARAM_MAT_VECTOR, Param_ALGORITHM, PARAM_ALGORITHM, Param_FLOAT, PARAM_FLOAT, Param_UNSIGNED_INT, PARAM_UNSIGNED_INT, Param_UINT64, PARAM_UINT64, Param_UCHAR, PARAM_UCHAR, Param_SCALAR, PARAM_SCALAR]""" + +DECOMP_LU: int +DECOMP_SVD: int +DECOMP_EIG: int +DECOMP_CHOLESKY: int +DECOMP_QR: int +DECOMP_NORMAL: int +DecompTypes = int +"""One of [DECOMP_LU, DECOMP_SVD, DECOMP_EIG, DECOMP_CHOLESKY, DECOMP_QR, DECOMP_NORMAL]""" + +NORM_INF: int +NORM_L1: int +NORM_L2: int +NORM_L2SQR: int +NORM_HAMMING: int +NORM_HAMMING2: int +NORM_TYPE_MASK: int +NORM_RELATIVE: int +NORM_MINMAX: int +NormTypes = int +"""One of [NORM_INF, NORM_L1, NORM_L2, NORM_L2SQR, NORM_HAMMING, NORM_HAMMING2, NORM_TYPE_MASK, NORM_RELATIVE, NORM_MINMAX]""" + +CMP_EQ: int +CMP_GT: int +CMP_GE: int +CMP_LT: int +CMP_LE: int +CMP_NE: int +CmpTypes = int +"""One of [CMP_EQ, CMP_GT, CMP_GE, CMP_LT, CMP_LE, CMP_NE]""" + +GEMM_1_T: int +GEMM_2_T: int +GEMM_3_T: int +GemmFlags = int +"""One of [GEMM_1_T, GEMM_2_T, GEMM_3_T]""" + +DFT_INVERSE: int +DFT_SCALE: int +DFT_ROWS: int +DFT_COMPLEX_OUTPUT: int +DFT_REAL_OUTPUT: int +DFT_COMPLEX_INPUT: int +DCT_INVERSE: int +DCT_ROWS: int +DftFlags = int +"""One of [DFT_INVERSE, DFT_SCALE, DFT_ROWS, DFT_COMPLEX_OUTPUT, DFT_REAL_OUTPUT, DFT_COMPLEX_INPUT, DCT_INVERSE, DCT_ROWS]""" + +BORDER_CONSTANT: int +BORDER_REPLICATE: int +BORDER_REFLECT: int +BORDER_WRAP: int +BORDER_REFLECT_101: int +BORDER_TRANSPARENT: int +BORDER_REFLECT101: int +BORDER_DEFAULT: int +BORDER_ISOLATED: int +BorderTypes = int +"""One of [BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT, BORDER_WRAP, BORDER_REFLECT_101, BORDER_TRANSPARENT, BORDER_REFLECT101, BORDER_DEFAULT, BORDER_ISOLATED]""" + +ACCESS_READ: int +ACCESS_WRITE: int +ACCESS_RW: int +ACCESS_MASK: int +ACCESS_FAST: int +AccessFlag = int +"""One of [ACCESS_READ, ACCESS_WRITE, ACCESS_RW, ACCESS_MASK, ACCESS_FAST]""" + +USAGE_DEFAULT: int +USAGE_ALLOCATE_HOST_MEMORY: int +USAGE_ALLOCATE_DEVICE_MEMORY: int +USAGE_ALLOCATE_SHARED_MEMORY: int +__UMAT_USAGE_FLAGS_32BIT: int +UMatUsageFlags = int +"""One of [USAGE_DEFAULT, USAGE_ALLOCATE_HOST_MEMORY, USAGE_ALLOCATE_DEVICE_MEMORY, USAGE_ALLOCATE_SHARED_MEMORY, __UMAT_USAGE_FLAGS_32BIT]""" + +SOLVELP_LOST: int +SOLVELP_UNBOUNDED: int +SOLVELP_UNFEASIBLE: int +SOLVELP_SINGLE: int +SOLVELP_MULTI: int +SolveLPResult = int +"""One of [SOLVELP_LOST, SOLVELP_UNBOUNDED, SOLVELP_UNFEASIBLE, SOLVELP_SINGLE, SOLVELP_MULTI]""" + +QUAT_ASSUME_NOT_UNIT: int +QUAT_ASSUME_UNIT: int +QuatAssumeType = int +"""One of [QUAT_ASSUME_NOT_UNIT, QUAT_ASSUME_UNIT]""" + +FILTER_SCHARR: int +SpecialFilter = int +"""One of [FILTER_SCHARR]""" + +MORPH_ERODE: int +MORPH_DILATE: int +MORPH_OPEN: int +MORPH_CLOSE: int +MORPH_GRADIENT: int +MORPH_TOPHAT: int +MORPH_BLACKHAT: int +MORPH_HITMISS: int +MorphTypes = int +"""One of [MORPH_ERODE, MORPH_DILATE, MORPH_OPEN, MORPH_CLOSE, MORPH_GRADIENT, MORPH_TOPHAT, MORPH_BLACKHAT, MORPH_HITMISS]""" + +MORPH_RECT: int +MORPH_CROSS: int +MORPH_ELLIPSE: int +MorphShapes = int +"""One of [MORPH_RECT, MORPH_CROSS, MORPH_ELLIPSE]""" + +INTER_NEAREST: int +INTER_LINEAR: int +INTER_CUBIC: int +INTER_AREA: int +INTER_LANCZOS4: int +INTER_LINEAR_EXACT: int +INTER_NEAREST_EXACT: int +INTER_MAX: int +WARP_FILL_OUTLIERS: int +WARP_INVERSE_MAP: int +WARP_RELATIVE_MAP: int +InterpolationFlags = int +"""One of [INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA, INTER_LANCZOS4, INTER_LINEAR_EXACT, INTER_NEAREST_EXACT, INTER_MAX, WARP_FILL_OUTLIERS, WARP_INVERSE_MAP, WARP_RELATIVE_MAP]""" + +WARP_POLAR_LINEAR: int +WARP_POLAR_LOG: int +WarpPolarMode = int +"""One of [WARP_POLAR_LINEAR, WARP_POLAR_LOG]""" + +INTER_BITS: int +INTER_BITS2: int +INTER_TAB_SIZE: int +INTER_TAB_SIZE2: int +InterpolationMasks = int +"""One of [INTER_BITS, INTER_BITS2, INTER_TAB_SIZE, INTER_TAB_SIZE2]""" + +DIST_USER: int +DIST_L1: int +DIST_L2: int +DIST_C: int +DIST_L12: int +DIST_FAIR: int +DIST_WELSCH: int +DIST_HUBER: int +DistanceTypes = int +"""One of [DIST_USER, DIST_L1, DIST_L2, DIST_C, DIST_L12, DIST_FAIR, DIST_WELSCH, DIST_HUBER]""" + +DIST_MASK_3: int +DIST_MASK_5: int +DIST_MASK_PRECISE: int +DistanceTransformMasks = int +"""One of [DIST_MASK_3, DIST_MASK_5, DIST_MASK_PRECISE]""" + +THRESH_BINARY: int +THRESH_BINARY_INV: int +THRESH_TRUNC: int +THRESH_TOZERO: int +THRESH_TOZERO_INV: int +THRESH_MASK: int +THRESH_OTSU: int +THRESH_TRIANGLE: int +ThresholdTypes = int +"""One of [THRESH_BINARY, THRESH_BINARY_INV, THRESH_TRUNC, THRESH_TOZERO, THRESH_TOZERO_INV, THRESH_MASK, THRESH_OTSU, THRESH_TRIANGLE]""" + +ADAPTIVE_THRESH_MEAN_C: int +ADAPTIVE_THRESH_GAUSSIAN_C: int +AdaptiveThresholdTypes = int +"""One of [ADAPTIVE_THRESH_MEAN_C, ADAPTIVE_THRESH_GAUSSIAN_C]""" + +GC_BGD: int +GC_FGD: int +GC_PR_BGD: int +GC_PR_FGD: int +GrabCutClasses = int +"""One of [GC_BGD, GC_FGD, GC_PR_BGD, GC_PR_FGD]""" + +GC_INIT_WITH_RECT: int +GC_INIT_WITH_MASK: int +GC_EVAL: int +GC_EVAL_FREEZE_MODEL: int +GrabCutModes = int +"""One of [GC_INIT_WITH_RECT, GC_INIT_WITH_MASK, GC_EVAL, GC_EVAL_FREEZE_MODEL]""" + +DIST_LABEL_CCOMP: int +DIST_LABEL_PIXEL: int +DistanceTransformLabelTypes = int +"""One of [DIST_LABEL_CCOMP, DIST_LABEL_PIXEL]""" + +FLOODFILL_FIXED_RANGE: int +FLOODFILL_MASK_ONLY: int +FloodFillFlags = int +"""One of [FLOODFILL_FIXED_RANGE, FLOODFILL_MASK_ONLY]""" + +CC_STAT_LEFT: int +CC_STAT_TOP: int +CC_STAT_WIDTH: int +CC_STAT_HEIGHT: int +CC_STAT_AREA: int +CC_STAT_MAX: int +ConnectedComponentsTypes = int +"""One of [CC_STAT_LEFT, CC_STAT_TOP, CC_STAT_WIDTH, CC_STAT_HEIGHT, CC_STAT_AREA, CC_STAT_MAX]""" + +CCL_DEFAULT: int +CCL_WU: int +CCL_GRANA: int +CCL_BOLELLI: int +CCL_SAUF: int +CCL_BBDT: int +CCL_SPAGHETTI: int +ConnectedComponentsAlgorithmsTypes = int +"""One of [CCL_DEFAULT, CCL_WU, CCL_GRANA, CCL_BOLELLI, CCL_SAUF, CCL_BBDT, CCL_SPAGHETTI]""" + +RETR_EXTERNAL: int +RETR_LIST: int +RETR_CCOMP: int +RETR_TREE: int +RETR_FLOODFILL: int +RetrievalModes = int +"""One of [RETR_EXTERNAL, RETR_LIST, RETR_CCOMP, RETR_TREE, RETR_FLOODFILL]""" + +CHAIN_APPROX_NONE: int +CHAIN_APPROX_SIMPLE: int +CHAIN_APPROX_TC89_L1: int +CHAIN_APPROX_TC89_KCOS: int +ContourApproximationModes = int +"""One of [CHAIN_APPROX_NONE, CHAIN_APPROX_SIMPLE, CHAIN_APPROX_TC89_L1, CHAIN_APPROX_TC89_KCOS]""" + +CONTOURS_MATCH_I1: int +CONTOURS_MATCH_I2: int +CONTOURS_MATCH_I3: int +ShapeMatchModes = int +"""One of [CONTOURS_MATCH_I1, CONTOURS_MATCH_I2, CONTOURS_MATCH_I3]""" + +HOUGH_STANDARD: int +HOUGH_PROBABILISTIC: int +HOUGH_MULTI_SCALE: int +HOUGH_GRADIENT: int +HOUGH_GRADIENT_ALT: int +HoughModes = int +"""One of [HOUGH_STANDARD, HOUGH_PROBABILISTIC, HOUGH_MULTI_SCALE, HOUGH_GRADIENT, HOUGH_GRADIENT_ALT]""" + +LSD_REFINE_NONE: int +LSD_REFINE_STD: int +LSD_REFINE_ADV: int +LineSegmentDetectorModes = int +"""One of [LSD_REFINE_NONE, LSD_REFINE_STD, LSD_REFINE_ADV]""" + +HISTCMP_CORREL: int +HISTCMP_CHISQR: int +HISTCMP_INTERSECT: int +HISTCMP_BHATTACHARYYA: int +HISTCMP_HELLINGER: int +HISTCMP_CHISQR_ALT: int +HISTCMP_KL_DIV: int +HistCompMethods = int +"""One of [HISTCMP_CORREL, HISTCMP_CHISQR, HISTCMP_INTERSECT, HISTCMP_BHATTACHARYYA, HISTCMP_HELLINGER, HISTCMP_CHISQR_ALT, HISTCMP_KL_DIV]""" + +COLOR_BGR2BGRA: int +COLOR_RGB2RGBA: int +COLOR_BGRA2BGR: int +COLOR_RGBA2RGB: int +COLOR_BGR2RGBA: int +COLOR_RGB2BGRA: int +COLOR_RGBA2BGR: int +COLOR_BGRA2RGB: int +COLOR_BGR2RGB: int +COLOR_RGB2BGR: int +COLOR_BGRA2RGBA: int +COLOR_RGBA2BGRA: int +COLOR_BGR2GRAY: int +COLOR_RGB2GRAY: int +COLOR_GRAY2BGR: int +COLOR_GRAY2RGB: int +COLOR_GRAY2BGRA: int +COLOR_GRAY2RGBA: int +COLOR_BGRA2GRAY: int +COLOR_RGBA2GRAY: int +COLOR_BGR2BGR565: int +COLOR_RGB2BGR565: int +COLOR_BGR5652BGR: int +COLOR_BGR5652RGB: int +COLOR_BGRA2BGR565: int +COLOR_RGBA2BGR565: int +COLOR_BGR5652BGRA: int +COLOR_BGR5652RGBA: int +COLOR_GRAY2BGR565: int +COLOR_BGR5652GRAY: int +COLOR_BGR2BGR555: int +COLOR_RGB2BGR555: int +COLOR_BGR5552BGR: int +COLOR_BGR5552RGB: int +COLOR_BGRA2BGR555: int +COLOR_RGBA2BGR555: int +COLOR_BGR5552BGRA: int +COLOR_BGR5552RGBA: int +COLOR_GRAY2BGR555: int +COLOR_BGR5552GRAY: int +COLOR_BGR2XYZ: int +COLOR_RGB2XYZ: int +COLOR_XYZ2BGR: int +COLOR_XYZ2RGB: int +COLOR_BGR2YCrCb: int +COLOR_BGR2YCR_CB: int +COLOR_RGB2YCrCb: int +COLOR_RGB2YCR_CB: int +COLOR_YCrCb2BGR: int +COLOR_YCR_CB2BGR: int +COLOR_YCrCb2RGB: int +COLOR_YCR_CB2RGB: int +COLOR_BGR2HSV: int +COLOR_RGB2HSV: int +COLOR_BGR2Lab: int +COLOR_BGR2LAB: int +COLOR_RGB2Lab: int +COLOR_RGB2LAB: int +COLOR_BGR2Luv: int +COLOR_BGR2LUV: int +COLOR_RGB2Luv: int +COLOR_RGB2LUV: int +COLOR_BGR2HLS: int +COLOR_RGB2HLS: int +COLOR_HSV2BGR: int +COLOR_HSV2RGB: int +COLOR_Lab2BGR: int +COLOR_LAB2BGR: int +COLOR_Lab2RGB: int +COLOR_LAB2RGB: int +COLOR_Luv2BGR: int +COLOR_LUV2BGR: int +COLOR_Luv2RGB: int +COLOR_LUV2RGB: int +COLOR_HLS2BGR: int +COLOR_HLS2RGB: int +COLOR_BGR2HSV_FULL: int +COLOR_RGB2HSV_FULL: int +COLOR_BGR2HLS_FULL: int +COLOR_RGB2HLS_FULL: int +COLOR_HSV2BGR_FULL: int +COLOR_HSV2RGB_FULL: int +COLOR_HLS2BGR_FULL: int +COLOR_HLS2RGB_FULL: int +COLOR_LBGR2Lab: int +COLOR_LBGR2LAB: int +COLOR_LRGB2Lab: int +COLOR_LRGB2LAB: int +COLOR_LBGR2Luv: int +COLOR_LBGR2LUV: int +COLOR_LRGB2Luv: int +COLOR_LRGB2LUV: int +COLOR_Lab2LBGR: int +COLOR_LAB2LBGR: int +COLOR_Lab2LRGB: int +COLOR_LAB2LRGB: int +COLOR_Luv2LBGR: int +COLOR_LUV2LBGR: int +COLOR_Luv2LRGB: int +COLOR_LUV2LRGB: int +COLOR_BGR2YUV: int +COLOR_RGB2YUV: int +COLOR_YUV2BGR: int +COLOR_YUV2RGB: int +COLOR_YUV2RGB_NV12: int +COLOR_YUV2BGR_NV12: int +COLOR_YUV2RGB_NV21: int +COLOR_YUV2BGR_NV21: int +COLOR_YUV420sp2RGB: int +COLOR_YUV420SP2RGB: int +COLOR_YUV420sp2BGR: int +COLOR_YUV420SP2BGR: int +COLOR_YUV2RGBA_NV12: int +COLOR_YUV2BGRA_NV12: int +COLOR_YUV2RGBA_NV21: int +COLOR_YUV2BGRA_NV21: int +COLOR_YUV420sp2RGBA: int +COLOR_YUV420SP2RGBA: int +COLOR_YUV420sp2BGRA: int +COLOR_YUV420SP2BGRA: int +COLOR_YUV2RGB_YV12: int +COLOR_YUV2BGR_YV12: int +COLOR_YUV2RGB_IYUV: int +COLOR_YUV2BGR_IYUV: int +COLOR_YUV2RGB_I420: int +COLOR_YUV2BGR_I420: int +COLOR_YUV420p2RGB: int +COLOR_YUV420P2RGB: int +COLOR_YUV420p2BGR: int +COLOR_YUV420P2BGR: int +COLOR_YUV2RGBA_YV12: int +COLOR_YUV2BGRA_YV12: int +COLOR_YUV2RGBA_IYUV: int +COLOR_YUV2BGRA_IYUV: int +COLOR_YUV2RGBA_I420: int +COLOR_YUV2BGRA_I420: int +COLOR_YUV420p2RGBA: int +COLOR_YUV420P2RGBA: int +COLOR_YUV420p2BGRA: int +COLOR_YUV420P2BGRA: int +COLOR_YUV2GRAY_420: int +COLOR_YUV2GRAY_NV21: int +COLOR_YUV2GRAY_NV12: int +COLOR_YUV2GRAY_YV12: int +COLOR_YUV2GRAY_IYUV: int +COLOR_YUV2GRAY_I420: int +COLOR_YUV420sp2GRAY: int +COLOR_YUV420SP2GRAY: int +COLOR_YUV420p2GRAY: int +COLOR_YUV420P2GRAY: int +COLOR_YUV2RGB_UYVY: int +COLOR_YUV2BGR_UYVY: int +COLOR_YUV2RGB_Y422: int +COLOR_YUV2BGR_Y422: int +COLOR_YUV2RGB_UYNV: int +COLOR_YUV2BGR_UYNV: int +COLOR_YUV2RGBA_UYVY: int +COLOR_YUV2BGRA_UYVY: int +COLOR_YUV2RGBA_Y422: int +COLOR_YUV2BGRA_Y422: int +COLOR_YUV2RGBA_UYNV: int +COLOR_YUV2BGRA_UYNV: int +COLOR_YUV2RGB_YUY2: int +COLOR_YUV2BGR_YUY2: int +COLOR_YUV2RGB_YVYU: int +COLOR_YUV2BGR_YVYU: int +COLOR_YUV2RGB_YUYV: int +COLOR_YUV2BGR_YUYV: int +COLOR_YUV2RGB_YUNV: int +COLOR_YUV2BGR_YUNV: int +COLOR_YUV2RGBA_YUY2: int +COLOR_YUV2BGRA_YUY2: int +COLOR_YUV2RGBA_YVYU: int +COLOR_YUV2BGRA_YVYU: int +COLOR_YUV2RGBA_YUYV: int +COLOR_YUV2BGRA_YUYV: int +COLOR_YUV2RGBA_YUNV: int +COLOR_YUV2BGRA_YUNV: int +COLOR_YUV2GRAY_UYVY: int +COLOR_YUV2GRAY_YUY2: int +COLOR_YUV2GRAY_Y422: int +COLOR_YUV2GRAY_UYNV: int +COLOR_YUV2GRAY_YVYU: int +COLOR_YUV2GRAY_YUYV: int +COLOR_YUV2GRAY_YUNV: int +COLOR_RGBA2mRGBA: int +COLOR_RGBA2M_RGBA: int +COLOR_mRGBA2RGBA: int +COLOR_M_RGBA2RGBA: int +COLOR_RGB2YUV_I420: int +COLOR_BGR2YUV_I420: int +COLOR_RGB2YUV_IYUV: int +COLOR_BGR2YUV_IYUV: int +COLOR_RGBA2YUV_I420: int +COLOR_BGRA2YUV_I420: int +COLOR_RGBA2YUV_IYUV: int +COLOR_BGRA2YUV_IYUV: int +COLOR_RGB2YUV_YV12: int +COLOR_BGR2YUV_YV12: int +COLOR_RGBA2YUV_YV12: int +COLOR_BGRA2YUV_YV12: int +COLOR_BayerBG2BGR: int +COLOR_BAYER_BG2BGR: int +COLOR_BayerGB2BGR: int +COLOR_BAYER_GB2BGR: int +COLOR_BayerRG2BGR: int +COLOR_BAYER_RG2BGR: int +COLOR_BayerGR2BGR: int +COLOR_BAYER_GR2BGR: int +COLOR_BayerRGGB2BGR: int +COLOR_BAYER_RGGB2BGR: int +COLOR_BayerGRBG2BGR: int +COLOR_BAYER_GRBG2BGR: int +COLOR_BayerBGGR2BGR: int +COLOR_BAYER_BGGR2BGR: int +COLOR_BayerGBRG2BGR: int +COLOR_BAYER_GBRG2BGR: int +COLOR_BayerRGGB2RGB: int +COLOR_BAYER_RGGB2RGB: int +COLOR_BayerGRBG2RGB: int +COLOR_BAYER_GRBG2RGB: int +COLOR_BayerBGGR2RGB: int +COLOR_BAYER_BGGR2RGB: int +COLOR_BayerGBRG2RGB: int +COLOR_BAYER_GBRG2RGB: int +COLOR_BayerBG2RGB: int +COLOR_BAYER_BG2RGB: int +COLOR_BayerGB2RGB: int +COLOR_BAYER_GB2RGB: int +COLOR_BayerRG2RGB: int +COLOR_BAYER_RG2RGB: int +COLOR_BayerGR2RGB: int +COLOR_BAYER_GR2RGB: int +COLOR_BayerBG2GRAY: int +COLOR_BAYER_BG2GRAY: int +COLOR_BayerGB2GRAY: int +COLOR_BAYER_GB2GRAY: int +COLOR_BayerRG2GRAY: int +COLOR_BAYER_RG2GRAY: int +COLOR_BayerGR2GRAY: int +COLOR_BAYER_GR2GRAY: int +COLOR_BayerRGGB2GRAY: int +COLOR_BAYER_RGGB2GRAY: int +COLOR_BayerGRBG2GRAY: int +COLOR_BAYER_GRBG2GRAY: int +COLOR_BayerBGGR2GRAY: int +COLOR_BAYER_BGGR2GRAY: int +COLOR_BayerGBRG2GRAY: int +COLOR_BAYER_GBRG2GRAY: int +COLOR_BayerBG2BGR_VNG: int +COLOR_BAYER_BG2BGR_VNG: int +COLOR_BayerGB2BGR_VNG: int +COLOR_BAYER_GB2BGR_VNG: int +COLOR_BayerRG2BGR_VNG: int +COLOR_BAYER_RG2BGR_VNG: int +COLOR_BayerGR2BGR_VNG: int +COLOR_BAYER_GR2BGR_VNG: int +COLOR_BayerRGGB2BGR_VNG: int +COLOR_BAYER_RGGB2BGR_VNG: int +COLOR_BayerGRBG2BGR_VNG: int +COLOR_BAYER_GRBG2BGR_VNG: int +COLOR_BayerBGGR2BGR_VNG: int +COLOR_BAYER_BGGR2BGR_VNG: int +COLOR_BayerGBRG2BGR_VNG: int +COLOR_BAYER_GBRG2BGR_VNG: int +COLOR_BayerRGGB2RGB_VNG: int +COLOR_BAYER_RGGB2RGB_VNG: int +COLOR_BayerGRBG2RGB_VNG: int +COLOR_BAYER_GRBG2RGB_VNG: int +COLOR_BayerBGGR2RGB_VNG: int +COLOR_BAYER_BGGR2RGB_VNG: int +COLOR_BayerGBRG2RGB_VNG: int +COLOR_BAYER_GBRG2RGB_VNG: int +COLOR_BayerBG2RGB_VNG: int +COLOR_BAYER_BG2RGB_VNG: int +COLOR_BayerGB2RGB_VNG: int +COLOR_BAYER_GB2RGB_VNG: int +COLOR_BayerRG2RGB_VNG: int +COLOR_BAYER_RG2RGB_VNG: int +COLOR_BayerGR2RGB_VNG: int +COLOR_BAYER_GR2RGB_VNG: int +COLOR_BayerBG2BGR_EA: int +COLOR_BAYER_BG2BGR_EA: int +COLOR_BayerGB2BGR_EA: int +COLOR_BAYER_GB2BGR_EA: int +COLOR_BayerRG2BGR_EA: int +COLOR_BAYER_RG2BGR_EA: int +COLOR_BayerGR2BGR_EA: int +COLOR_BAYER_GR2BGR_EA: int +COLOR_BayerRGGB2BGR_EA: int +COLOR_BAYER_RGGB2BGR_EA: int +COLOR_BayerGRBG2BGR_EA: int +COLOR_BAYER_GRBG2BGR_EA: int +COLOR_BayerBGGR2BGR_EA: int +COLOR_BAYER_BGGR2BGR_EA: int +COLOR_BayerGBRG2BGR_EA: int +COLOR_BAYER_GBRG2BGR_EA: int +COLOR_BayerRGGB2RGB_EA: int +COLOR_BAYER_RGGB2RGB_EA: int +COLOR_BayerGRBG2RGB_EA: int +COLOR_BAYER_GRBG2RGB_EA: int +COLOR_BayerBGGR2RGB_EA: int +COLOR_BAYER_BGGR2RGB_EA: int +COLOR_BayerGBRG2RGB_EA: int +COLOR_BAYER_GBRG2RGB_EA: int +COLOR_BayerBG2RGB_EA: int +COLOR_BAYER_BG2RGB_EA: int +COLOR_BayerGB2RGB_EA: int +COLOR_BAYER_GB2RGB_EA: int +COLOR_BayerRG2RGB_EA: int +COLOR_BAYER_RG2RGB_EA: int +COLOR_BayerGR2RGB_EA: int +COLOR_BAYER_GR2RGB_EA: int +COLOR_BayerBG2BGRA: int +COLOR_BAYER_BG2BGRA: int +COLOR_BayerGB2BGRA: int +COLOR_BAYER_GB2BGRA: int +COLOR_BayerRG2BGRA: int +COLOR_BAYER_RG2BGRA: int +COLOR_BayerGR2BGRA: int +COLOR_BAYER_GR2BGRA: int +COLOR_BayerRGGB2BGRA: int +COLOR_BAYER_RGGB2BGRA: int +COLOR_BayerGRBG2BGRA: int +COLOR_BAYER_GRBG2BGRA: int +COLOR_BayerBGGR2BGRA: int +COLOR_BAYER_BGGR2BGRA: int +COLOR_BayerGBRG2BGRA: int +COLOR_BAYER_GBRG2BGRA: int +COLOR_BayerRGGB2RGBA: int +COLOR_BAYER_RGGB2RGBA: int +COLOR_BayerGRBG2RGBA: int +COLOR_BAYER_GRBG2RGBA: int +COLOR_BayerBGGR2RGBA: int +COLOR_BAYER_BGGR2RGBA: int +COLOR_BayerGBRG2RGBA: int +COLOR_BAYER_GBRG2RGBA: int +COLOR_BayerBG2RGBA: int +COLOR_BAYER_BG2RGBA: int +COLOR_BayerGB2RGBA: int +COLOR_BAYER_GB2RGBA: int +COLOR_BayerRG2RGBA: int +COLOR_BAYER_RG2RGBA: int +COLOR_BayerGR2RGBA: int +COLOR_BAYER_GR2RGBA: int +COLOR_RGB2YUV_UYVY: int +COLOR_BGR2YUV_UYVY: int +COLOR_RGB2YUV_Y422: int +COLOR_BGR2YUV_Y422: int +COLOR_RGB2YUV_UYNV: int +COLOR_BGR2YUV_UYNV: int +COLOR_RGBA2YUV_UYVY: int +COLOR_BGRA2YUV_UYVY: int +COLOR_RGBA2YUV_Y422: int +COLOR_BGRA2YUV_Y422: int +COLOR_RGBA2YUV_UYNV: int +COLOR_BGRA2YUV_UYNV: int +COLOR_RGB2YUV_YUY2: int +COLOR_BGR2YUV_YUY2: int +COLOR_RGB2YUV_YVYU: int +COLOR_BGR2YUV_YVYU: int +COLOR_RGB2YUV_YUYV: int +COLOR_BGR2YUV_YUYV: int +COLOR_RGB2YUV_YUNV: int +COLOR_BGR2YUV_YUNV: int +COLOR_RGBA2YUV_YUY2: int +COLOR_BGRA2YUV_YUY2: int +COLOR_RGBA2YUV_YVYU: int +COLOR_BGRA2YUV_YVYU: int +COLOR_RGBA2YUV_YUYV: int +COLOR_BGRA2YUV_YUYV: int +COLOR_RGBA2YUV_YUNV: int +COLOR_BGRA2YUV_YUNV: int +COLOR_COLORCVT_MAX: int +ColorConversionCodes = int +"""One of [COLOR_BGR2BGRA, COLOR_RGB2RGBA, COLOR_BGRA2BGR, COLOR_RGBA2RGB, COLOR_BGR2RGBA, COLOR_RGB2BGRA, COLOR_RGBA2BGR, COLOR_BGRA2RGB, COLOR_BGR2RGB, COLOR_RGB2BGR, COLOR_BGRA2RGBA, COLOR_RGBA2BGRA, COLOR_BGR2GRAY, COLOR_RGB2GRAY, COLOR_GRAY2BGR, COLOR_GRAY2RGB, COLOR_GRAY2BGRA, COLOR_GRAY2RGBA, COLOR_BGRA2GRAY, COLOR_RGBA2GRAY, COLOR_BGR2BGR565, COLOR_RGB2BGR565, COLOR_BGR5652BGR, COLOR_BGR5652RGB, COLOR_BGRA2BGR565, COLOR_RGBA2BGR565, COLOR_BGR5652BGRA, COLOR_BGR5652RGBA, COLOR_GRAY2BGR565, COLOR_BGR5652GRAY, COLOR_BGR2BGR555, COLOR_RGB2BGR555, COLOR_BGR5552BGR, COLOR_BGR5552RGB, COLOR_BGRA2BGR555, COLOR_RGBA2BGR555, COLOR_BGR5552BGRA, COLOR_BGR5552RGBA, COLOR_GRAY2BGR555, COLOR_BGR5552GRAY, COLOR_BGR2XYZ, COLOR_RGB2XYZ, COLOR_XYZ2BGR, COLOR_XYZ2RGB, COLOR_BGR2YCrCb, COLOR_BGR2YCR_CB, COLOR_RGB2YCrCb, COLOR_RGB2YCR_CB, COLOR_YCrCb2BGR, COLOR_YCR_CB2BGR, COLOR_YCrCb2RGB, COLOR_YCR_CB2RGB, COLOR_BGR2HSV, COLOR_RGB2HSV, COLOR_BGR2Lab, COLOR_BGR2LAB, COLOR_RGB2Lab, COLOR_RGB2LAB, COLOR_BGR2Luv, COLOR_BGR2LUV, COLOR_RGB2Luv, COLOR_RGB2LUV, COLOR_BGR2HLS, COLOR_RGB2HLS, COLOR_HSV2BGR, COLOR_HSV2RGB, COLOR_Lab2BGR, COLOR_LAB2BGR, COLOR_Lab2RGB, COLOR_LAB2RGB, COLOR_Luv2BGR, COLOR_LUV2BGR, COLOR_Luv2RGB, COLOR_LUV2RGB, COLOR_HLS2BGR, COLOR_HLS2RGB, COLOR_BGR2HSV_FULL, COLOR_RGB2HSV_FULL, COLOR_BGR2HLS_FULL, COLOR_RGB2HLS_FULL, COLOR_HSV2BGR_FULL, COLOR_HSV2RGB_FULL, COLOR_HLS2BGR_FULL, COLOR_HLS2RGB_FULL, COLOR_LBGR2Lab, COLOR_LBGR2LAB, COLOR_LRGB2Lab, COLOR_LRGB2LAB, COLOR_LBGR2Luv, COLOR_LBGR2LUV, COLOR_LRGB2Luv, COLOR_LRGB2LUV, COLOR_Lab2LBGR, COLOR_LAB2LBGR, COLOR_Lab2LRGB, COLOR_LAB2LRGB, COLOR_Luv2LBGR, COLOR_LUV2LBGR, COLOR_Luv2LRGB, COLOR_LUV2LRGB, COLOR_BGR2YUV, COLOR_RGB2YUV, COLOR_YUV2BGR, COLOR_YUV2RGB, COLOR_YUV2RGB_NV12, COLOR_YUV2BGR_NV12, COLOR_YUV2RGB_NV21, COLOR_YUV2BGR_NV21, COLOR_YUV420sp2RGB, COLOR_YUV420SP2RGB, COLOR_YUV420sp2BGR, COLOR_YUV420SP2BGR, COLOR_YUV2RGBA_NV12, COLOR_YUV2BGRA_NV12, COLOR_YUV2RGBA_NV21, COLOR_YUV2BGRA_NV21, COLOR_YUV420sp2RGBA, COLOR_YUV420SP2RGBA, COLOR_YUV420sp2BGRA, COLOR_YUV420SP2BGRA, COLOR_YUV2RGB_YV12, COLOR_YUV2BGR_YV12, COLOR_YUV2RGB_IYUV, COLOR_YUV2BGR_IYUV, COLOR_YUV2RGB_I420, COLOR_YUV2BGR_I420, COLOR_YUV420p2RGB, COLOR_YUV420P2RGB, COLOR_YUV420p2BGR, COLOR_YUV420P2BGR, COLOR_YUV2RGBA_YV12, COLOR_YUV2BGRA_YV12, COLOR_YUV2RGBA_IYUV, COLOR_YUV2BGRA_IYUV, COLOR_YUV2RGBA_I420, COLOR_YUV2BGRA_I420, COLOR_YUV420p2RGBA, COLOR_YUV420P2RGBA, COLOR_YUV420p2BGRA, COLOR_YUV420P2BGRA, COLOR_YUV2GRAY_420, COLOR_YUV2GRAY_NV21, COLOR_YUV2GRAY_NV12, COLOR_YUV2GRAY_YV12, COLOR_YUV2GRAY_IYUV, COLOR_YUV2GRAY_I420, COLOR_YUV420sp2GRAY, COLOR_YUV420SP2GRAY, COLOR_YUV420p2GRAY, COLOR_YUV420P2GRAY, COLOR_YUV2RGB_UYVY, COLOR_YUV2BGR_UYVY, COLOR_YUV2RGB_Y422, COLOR_YUV2BGR_Y422, COLOR_YUV2RGB_UYNV, COLOR_YUV2BGR_UYNV, COLOR_YUV2RGBA_UYVY, COLOR_YUV2BGRA_UYVY, COLOR_YUV2RGBA_Y422, COLOR_YUV2BGRA_Y422, COLOR_YUV2RGBA_UYNV, COLOR_YUV2BGRA_UYNV, COLOR_YUV2RGB_YUY2, COLOR_YUV2BGR_YUY2, COLOR_YUV2RGB_YVYU, COLOR_YUV2BGR_YVYU, COLOR_YUV2RGB_YUYV, COLOR_YUV2BGR_YUYV, COLOR_YUV2RGB_YUNV, COLOR_YUV2BGR_YUNV, COLOR_YUV2RGBA_YUY2, COLOR_YUV2BGRA_YUY2, COLOR_YUV2RGBA_YVYU, COLOR_YUV2BGRA_YVYU, COLOR_YUV2RGBA_YUYV, COLOR_YUV2BGRA_YUYV, COLOR_YUV2RGBA_YUNV, COLOR_YUV2BGRA_YUNV, COLOR_YUV2GRAY_UYVY, COLOR_YUV2GRAY_YUY2, COLOR_YUV2GRAY_Y422, COLOR_YUV2GRAY_UYNV, COLOR_YUV2GRAY_YVYU, COLOR_YUV2GRAY_YUYV, COLOR_YUV2GRAY_YUNV, COLOR_RGBA2mRGBA, COLOR_RGBA2M_RGBA, COLOR_mRGBA2RGBA, COLOR_M_RGBA2RGBA, COLOR_RGB2YUV_I420, COLOR_BGR2YUV_I420, COLOR_RGB2YUV_IYUV, COLOR_BGR2YUV_IYUV, COLOR_RGBA2YUV_I420, COLOR_BGRA2YUV_I420, COLOR_RGBA2YUV_IYUV, COLOR_BGRA2YUV_IYUV, COLOR_RGB2YUV_YV12, COLOR_BGR2YUV_YV12, COLOR_RGBA2YUV_YV12, COLOR_BGRA2YUV_YV12, COLOR_BayerBG2BGR, COLOR_BAYER_BG2BGR, COLOR_BayerGB2BGR, COLOR_BAYER_GB2BGR, COLOR_BayerRG2BGR, COLOR_BAYER_RG2BGR, COLOR_BayerGR2BGR, COLOR_BAYER_GR2BGR, COLOR_BayerRGGB2BGR, COLOR_BAYER_RGGB2BGR, COLOR_BayerGRBG2BGR, COLOR_BAYER_GRBG2BGR, COLOR_BayerBGGR2BGR, COLOR_BAYER_BGGR2BGR, COLOR_BayerGBRG2BGR, COLOR_BAYER_GBRG2BGR, COLOR_BayerRGGB2RGB, COLOR_BAYER_RGGB2RGB, COLOR_BayerGRBG2RGB, COLOR_BAYER_GRBG2RGB, COLOR_BayerBGGR2RGB, COLOR_BAYER_BGGR2RGB, COLOR_BayerGBRG2RGB, COLOR_BAYER_GBRG2RGB, COLOR_BayerBG2RGB, COLOR_BAYER_BG2RGB, COLOR_BayerGB2RGB, COLOR_BAYER_GB2RGB, COLOR_BayerRG2RGB, COLOR_BAYER_RG2RGB, COLOR_BayerGR2RGB, COLOR_BAYER_GR2RGB, COLOR_BayerBG2GRAY, COLOR_BAYER_BG2GRAY, COLOR_BayerGB2GRAY, COLOR_BAYER_GB2GRAY, COLOR_BayerRG2GRAY, COLOR_BAYER_RG2GRAY, COLOR_BayerGR2GRAY, COLOR_BAYER_GR2GRAY, COLOR_BayerRGGB2GRAY, COLOR_BAYER_RGGB2GRAY, COLOR_BayerGRBG2GRAY, COLOR_BAYER_GRBG2GRAY, COLOR_BayerBGGR2GRAY, COLOR_BAYER_BGGR2GRAY, COLOR_BayerGBRG2GRAY, COLOR_BAYER_GBRG2GRAY, COLOR_BayerBG2BGR_VNG, COLOR_BAYER_BG2BGR_VNG, COLOR_BayerGB2BGR_VNG, COLOR_BAYER_GB2BGR_VNG, COLOR_BayerRG2BGR_VNG, COLOR_BAYER_RG2BGR_VNG, COLOR_BayerGR2BGR_VNG, COLOR_BAYER_GR2BGR_VNG, COLOR_BayerRGGB2BGR_VNG, COLOR_BAYER_RGGB2BGR_VNG, COLOR_BayerGRBG2BGR_VNG, COLOR_BAYER_GRBG2BGR_VNG, COLOR_BayerBGGR2BGR_VNG, COLOR_BAYER_BGGR2BGR_VNG, COLOR_BayerGBRG2BGR_VNG, COLOR_BAYER_GBRG2BGR_VNG, COLOR_BayerRGGB2RGB_VNG, COLOR_BAYER_RGGB2RGB_VNG, COLOR_BayerGRBG2RGB_VNG, COLOR_BAYER_GRBG2RGB_VNG, COLOR_BayerBGGR2RGB_VNG, COLOR_BAYER_BGGR2RGB_VNG, COLOR_BayerGBRG2RGB_VNG, COLOR_BAYER_GBRG2RGB_VNG, COLOR_BayerBG2RGB_VNG, COLOR_BAYER_BG2RGB_VNG, COLOR_BayerGB2RGB_VNG, COLOR_BAYER_GB2RGB_VNG, COLOR_BayerRG2RGB_VNG, COLOR_BAYER_RG2RGB_VNG, COLOR_BayerGR2RGB_VNG, COLOR_BAYER_GR2RGB_VNG, COLOR_BayerBG2BGR_EA, COLOR_BAYER_BG2BGR_EA, COLOR_BayerGB2BGR_EA, COLOR_BAYER_GB2BGR_EA, COLOR_BayerRG2BGR_EA, COLOR_BAYER_RG2BGR_EA, COLOR_BayerGR2BGR_EA, COLOR_BAYER_GR2BGR_EA, COLOR_BayerRGGB2BGR_EA, COLOR_BAYER_RGGB2BGR_EA, COLOR_BayerGRBG2BGR_EA, COLOR_BAYER_GRBG2BGR_EA, COLOR_BayerBGGR2BGR_EA, COLOR_BAYER_BGGR2BGR_EA, COLOR_BayerGBRG2BGR_EA, COLOR_BAYER_GBRG2BGR_EA, COLOR_BayerRGGB2RGB_EA, COLOR_BAYER_RGGB2RGB_EA, COLOR_BayerGRBG2RGB_EA, COLOR_BAYER_GRBG2RGB_EA, COLOR_BayerBGGR2RGB_EA, COLOR_BAYER_BGGR2RGB_EA, COLOR_BayerGBRG2RGB_EA, COLOR_BAYER_GBRG2RGB_EA, COLOR_BayerBG2RGB_EA, COLOR_BAYER_BG2RGB_EA, COLOR_BayerGB2RGB_EA, COLOR_BAYER_GB2RGB_EA, COLOR_BayerRG2RGB_EA, COLOR_BAYER_RG2RGB_EA, COLOR_BayerGR2RGB_EA, COLOR_BAYER_GR2RGB_EA, COLOR_BayerBG2BGRA, COLOR_BAYER_BG2BGRA, COLOR_BayerGB2BGRA, COLOR_BAYER_GB2BGRA, COLOR_BayerRG2BGRA, COLOR_BAYER_RG2BGRA, COLOR_BayerGR2BGRA, COLOR_BAYER_GR2BGRA, COLOR_BayerRGGB2BGRA, COLOR_BAYER_RGGB2BGRA, COLOR_BayerGRBG2BGRA, COLOR_BAYER_GRBG2BGRA, COLOR_BayerBGGR2BGRA, COLOR_BAYER_BGGR2BGRA, COLOR_BayerGBRG2BGRA, COLOR_BAYER_GBRG2BGRA, COLOR_BayerRGGB2RGBA, COLOR_BAYER_RGGB2RGBA, COLOR_BayerGRBG2RGBA, COLOR_BAYER_GRBG2RGBA, COLOR_BayerBGGR2RGBA, COLOR_BAYER_BGGR2RGBA, COLOR_BayerGBRG2RGBA, COLOR_BAYER_GBRG2RGBA, COLOR_BayerBG2RGBA, COLOR_BAYER_BG2RGBA, COLOR_BayerGB2RGBA, COLOR_BAYER_GB2RGBA, COLOR_BayerRG2RGBA, COLOR_BAYER_RG2RGBA, COLOR_BayerGR2RGBA, COLOR_BAYER_GR2RGBA, COLOR_RGB2YUV_UYVY, COLOR_BGR2YUV_UYVY, COLOR_RGB2YUV_Y422, COLOR_BGR2YUV_Y422, COLOR_RGB2YUV_UYNV, COLOR_BGR2YUV_UYNV, COLOR_RGBA2YUV_UYVY, COLOR_BGRA2YUV_UYVY, COLOR_RGBA2YUV_Y422, COLOR_BGRA2YUV_Y422, COLOR_RGBA2YUV_UYNV, COLOR_BGRA2YUV_UYNV, COLOR_RGB2YUV_YUY2, COLOR_BGR2YUV_YUY2, COLOR_RGB2YUV_YVYU, COLOR_BGR2YUV_YVYU, COLOR_RGB2YUV_YUYV, COLOR_BGR2YUV_YUYV, COLOR_RGB2YUV_YUNV, COLOR_BGR2YUV_YUNV, COLOR_RGBA2YUV_YUY2, COLOR_BGRA2YUV_YUY2, COLOR_RGBA2YUV_YVYU, COLOR_BGRA2YUV_YVYU, COLOR_RGBA2YUV_YUYV, COLOR_BGRA2YUV_YUYV, COLOR_RGBA2YUV_YUNV, COLOR_BGRA2YUV_YUNV, COLOR_COLORCVT_MAX]""" + +INTERSECT_NONE: int +INTERSECT_PARTIAL: int +INTERSECT_FULL: int +RectanglesIntersectTypes = int +"""One of [INTERSECT_NONE, INTERSECT_PARTIAL, INTERSECT_FULL]""" + +FILLED: int +LINE_4: int +LINE_8: int +LINE_AA: int +LineTypes = int +"""One of [FILLED, LINE_4, LINE_8, LINE_AA]""" + +FONT_HERSHEY_SIMPLEX: int +FONT_HERSHEY_PLAIN: int +FONT_HERSHEY_DUPLEX: int +FONT_HERSHEY_COMPLEX: int +FONT_HERSHEY_TRIPLEX: int +FONT_HERSHEY_COMPLEX_SMALL: int +FONT_HERSHEY_SCRIPT_SIMPLEX: int +FONT_HERSHEY_SCRIPT_COMPLEX: int +FONT_ITALIC: int +HersheyFonts = int +"""One of [FONT_HERSHEY_SIMPLEX, FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, FONT_HERSHEY_SCRIPT_COMPLEX, FONT_ITALIC]""" + +MARKER_CROSS: int +MARKER_TILTED_CROSS: int +MARKER_STAR: int +MARKER_DIAMOND: int +MARKER_SQUARE: int +MARKER_TRIANGLE_UP: int +MARKER_TRIANGLE_DOWN: int +MarkerTypes = int +"""One of [MARKER_CROSS, MARKER_TILTED_CROSS, MARKER_STAR, MARKER_DIAMOND, MARKER_SQUARE, MARKER_TRIANGLE_UP, MARKER_TRIANGLE_DOWN]""" + +TM_SQDIFF: int +TM_SQDIFF_NORMED: int +TM_CCORR: int +TM_CCORR_NORMED: int +TM_CCOEFF: int +TM_CCOEFF_NORMED: int +TemplateMatchModes = int +"""One of [TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED]""" + +COLORMAP_AUTUMN: int +COLORMAP_BONE: int +COLORMAP_JET: int +COLORMAP_WINTER: int +COLORMAP_RAINBOW: int +COLORMAP_OCEAN: int +COLORMAP_SUMMER: int +COLORMAP_SPRING: int +COLORMAP_COOL: int +COLORMAP_HSV: int +COLORMAP_PINK: int +COLORMAP_HOT: int +COLORMAP_PARULA: int +COLORMAP_MAGMA: int +COLORMAP_INFERNO: int +COLORMAP_PLASMA: int +COLORMAP_VIRIDIS: int +COLORMAP_CIVIDIS: int +COLORMAP_TWILIGHT: int +COLORMAP_TWILIGHT_SHIFTED: int +COLORMAP_TURBO: int +COLORMAP_DEEPGREEN: int +ColormapTypes = int +"""One of [COLORMAP_AUTUMN, COLORMAP_BONE, COLORMAP_JET, COLORMAP_WINTER, COLORMAP_RAINBOW, COLORMAP_OCEAN, COLORMAP_SUMMER, COLORMAP_SPRING, COLORMAP_COOL, COLORMAP_HSV, COLORMAP_PINK, COLORMAP_HOT, COLORMAP_PARULA, COLORMAP_MAGMA, COLORMAP_INFERNO, COLORMAP_PLASMA, COLORMAP_VIRIDIS, COLORMAP_CIVIDIS, COLORMAP_TWILIGHT, COLORMAP_TWILIGHT_SHIFTED, COLORMAP_TURBO, COLORMAP_DEEPGREEN]""" + +INPAINT_NS: int +INPAINT_TELEA: int +LDR_SIZE: int +NORMAL_CLONE: int +MIXED_CLONE: int +MONOCHROME_TRANSFER: int +RECURS_FILTER: int +NORMCONV_FILTER: int +CAP_PROP_DC1394_OFF: int +CAP_PROP_DC1394_MODE_MANUAL: int +CAP_PROP_DC1394_MODE_AUTO: int +CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO: int +CAP_PROP_DC1394_MAX: int +CAP_OPENNI_DEPTH_GENERATOR: int +CAP_OPENNI_IMAGE_GENERATOR: int +CAP_OPENNI_IR_GENERATOR: int +CAP_OPENNI_GENERATORS_MASK: int +CAP_PROP_OPENNI_OUTPUT_MODE: int +CAP_PROP_OPENNI_FRAME_MAX_DEPTH: int +CAP_PROP_OPENNI_BASELINE: int +CAP_PROP_OPENNI_FOCAL_LENGTH: int +CAP_PROP_OPENNI_REGISTRATION: int +CAP_PROP_OPENNI_REGISTRATION_ON: int +CAP_PROP_OPENNI_APPROX_FRAME_SYNC: int +CAP_PROP_OPENNI_MAX_BUFFER_SIZE: int +CAP_PROP_OPENNI_CIRCLE_BUFFER: int +CAP_PROP_OPENNI_MAX_TIME_DURATION: int +CAP_PROP_OPENNI_GENERATOR_PRESENT: int +CAP_PROP_OPENNI2_SYNC: int +CAP_PROP_OPENNI2_MIRROR: int +CAP_OPENNI_IMAGE_GENERATOR_PRESENT: int +CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE: int +CAP_OPENNI_DEPTH_GENERATOR_PRESENT: int +CAP_OPENNI_DEPTH_GENERATOR_BASELINE: int +CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH: int +CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION: int +CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON: int +CAP_OPENNI_IR_GENERATOR_PRESENT: int +CAP_OPENNI_DEPTH_MAP: int +CAP_OPENNI_POINT_CLOUD_MAP: int +CAP_OPENNI_DISPARITY_MAP: int +CAP_OPENNI_DISPARITY_MAP_32F: int +CAP_OPENNI_VALID_DEPTH_MASK: int +CAP_OPENNI_BGR_IMAGE: int +CAP_OPENNI_GRAY_IMAGE: int +CAP_OPENNI_IR_IMAGE: int +CAP_OPENNI_VGA_30HZ: int +CAP_OPENNI_SXGA_15HZ: int +CAP_OPENNI_SXGA_30HZ: int +CAP_OPENNI_QVGA_30HZ: int +CAP_OPENNI_QVGA_60HZ: int +CAP_PROP_GSTREAMER_QUEUE_LENGTH: int +CAP_PROP_PVAPI_MULTICASTIP: int +CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE: int +CAP_PROP_PVAPI_DECIMATIONHORIZONTAL: int +CAP_PROP_PVAPI_DECIMATIONVERTICAL: int +CAP_PROP_PVAPI_BINNINGX: int +CAP_PROP_PVAPI_BINNINGY: int +CAP_PROP_PVAPI_PIXELFORMAT: int +CAP_PVAPI_FSTRIGMODE_FREERUN: int +CAP_PVAPI_FSTRIGMODE_SYNCIN1: int +CAP_PVAPI_FSTRIGMODE_SYNCIN2: int +CAP_PVAPI_FSTRIGMODE_FIXEDRATE: int +CAP_PVAPI_FSTRIGMODE_SOFTWARE: int +CAP_PVAPI_DECIMATION_OFF: int +CAP_PVAPI_DECIMATION_2OUTOF4: int +CAP_PVAPI_DECIMATION_2OUTOF8: int +CAP_PVAPI_DECIMATION_2OUTOF16: int +CAP_PVAPI_PIXELFORMAT_MONO8: int +CAP_PVAPI_PIXELFORMAT_MONO16: int +CAP_PVAPI_PIXELFORMAT_BAYER8: int +CAP_PVAPI_PIXELFORMAT_BAYER16: int +CAP_PVAPI_PIXELFORMAT_RGB24: int +CAP_PVAPI_PIXELFORMAT_BGR24: int +CAP_PVAPI_PIXELFORMAT_RGBA32: int +CAP_PVAPI_PIXELFORMAT_BGRA32: int +CAP_PROP_XI_DOWNSAMPLING: int +CAP_PROP_XI_DATA_FORMAT: int +CAP_PROP_XI_OFFSET_X: int +CAP_PROP_XI_OFFSET_Y: int +CAP_PROP_XI_TRG_SOURCE: int +CAP_PROP_XI_TRG_SOFTWARE: int +CAP_PROP_XI_GPI_SELECTOR: int +CAP_PROP_XI_GPI_MODE: int +CAP_PROP_XI_GPI_LEVEL: int +CAP_PROP_XI_GPO_SELECTOR: int +CAP_PROP_XI_GPO_MODE: int +CAP_PROP_XI_LED_SELECTOR: int +CAP_PROP_XI_LED_MODE: int +CAP_PROP_XI_MANUAL_WB: int +CAP_PROP_XI_AUTO_WB: int +CAP_PROP_XI_AEAG: int +CAP_PROP_XI_EXP_PRIORITY: int +CAP_PROP_XI_AE_MAX_LIMIT: int +CAP_PROP_XI_AG_MAX_LIMIT: int +CAP_PROP_XI_AEAG_LEVEL: int +CAP_PROP_XI_TIMEOUT: int +CAP_PROP_XI_EXPOSURE: int +CAP_PROP_XI_EXPOSURE_BURST_COUNT: int +CAP_PROP_XI_GAIN_SELECTOR: int +CAP_PROP_XI_GAIN: int +CAP_PROP_XI_DOWNSAMPLING_TYPE: int +CAP_PROP_XI_BINNING_SELECTOR: int +CAP_PROP_XI_BINNING_VERTICAL: int +CAP_PROP_XI_BINNING_HORIZONTAL: int +CAP_PROP_XI_BINNING_PATTERN: int +CAP_PROP_XI_DECIMATION_SELECTOR: int +CAP_PROP_XI_DECIMATION_VERTICAL: int +CAP_PROP_XI_DECIMATION_HORIZONTAL: int +CAP_PROP_XI_DECIMATION_PATTERN: int +CAP_PROP_XI_TEST_PATTERN_GENERATOR_SELECTOR: int +CAP_PROP_XI_TEST_PATTERN: int +CAP_PROP_XI_IMAGE_DATA_FORMAT: int +CAP_PROP_XI_SHUTTER_TYPE: int +CAP_PROP_XI_SENSOR_TAPS: int +CAP_PROP_XI_AEAG_ROI_OFFSET_X: int +CAP_PROP_XI_AEAG_ROI_OFFSET_Y: int +CAP_PROP_XI_AEAG_ROI_WIDTH: int +CAP_PROP_XI_AEAG_ROI_HEIGHT: int +CAP_PROP_XI_BPC: int +CAP_PROP_XI_WB_KR: int +CAP_PROP_XI_WB_KG: int +CAP_PROP_XI_WB_KB: int +CAP_PROP_XI_WIDTH: int +CAP_PROP_XI_HEIGHT: int +CAP_PROP_XI_REGION_SELECTOR: int +CAP_PROP_XI_REGION_MODE: int +CAP_PROP_XI_LIMIT_BANDWIDTH: int +CAP_PROP_XI_SENSOR_DATA_BIT_DEPTH: int +CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH: int +CAP_PROP_XI_IMAGE_DATA_BIT_DEPTH: int +CAP_PROP_XI_OUTPUT_DATA_PACKING: int +CAP_PROP_XI_OUTPUT_DATA_PACKING_TYPE: int +CAP_PROP_XI_IS_COOLED: int +CAP_PROP_XI_COOLING: int +CAP_PROP_XI_TARGET_TEMP: int +CAP_PROP_XI_CHIP_TEMP: int +CAP_PROP_XI_HOUS_TEMP: int +CAP_PROP_XI_HOUS_BACK_SIDE_TEMP: int +CAP_PROP_XI_SENSOR_BOARD_TEMP: int +CAP_PROP_XI_CMS: int +CAP_PROP_XI_APPLY_CMS: int +CAP_PROP_XI_IMAGE_IS_COLOR: int +CAP_PROP_XI_COLOR_FILTER_ARRAY: int +CAP_PROP_XI_GAMMAY: int +CAP_PROP_XI_GAMMAC: int +CAP_PROP_XI_SHARPNESS: int +CAP_PROP_XI_CC_MATRIX_00: int +CAP_PROP_XI_CC_MATRIX_01: int +CAP_PROP_XI_CC_MATRIX_02: int +CAP_PROP_XI_CC_MATRIX_03: int +CAP_PROP_XI_CC_MATRIX_10: int +CAP_PROP_XI_CC_MATRIX_11: int +CAP_PROP_XI_CC_MATRIX_12: int +CAP_PROP_XI_CC_MATRIX_13: int +CAP_PROP_XI_CC_MATRIX_20: int +CAP_PROP_XI_CC_MATRIX_21: int +CAP_PROP_XI_CC_MATRIX_22: int +CAP_PROP_XI_CC_MATRIX_23: int +CAP_PROP_XI_CC_MATRIX_30: int +CAP_PROP_XI_CC_MATRIX_31: int +CAP_PROP_XI_CC_MATRIX_32: int +CAP_PROP_XI_CC_MATRIX_33: int +CAP_PROP_XI_DEFAULT_CC_MATRIX: int +CAP_PROP_XI_TRG_SELECTOR: int +CAP_PROP_XI_ACQ_FRAME_BURST_COUNT: int +CAP_PROP_XI_DEBOUNCE_EN: int +CAP_PROP_XI_DEBOUNCE_T0: int +CAP_PROP_XI_DEBOUNCE_T1: int +CAP_PROP_XI_DEBOUNCE_POL: int +CAP_PROP_XI_LENS_MODE: int +CAP_PROP_XI_LENS_APERTURE_VALUE: int +CAP_PROP_XI_LENS_FOCUS_MOVEMENT_VALUE: int +CAP_PROP_XI_LENS_FOCUS_MOVE: int +CAP_PROP_XI_LENS_FOCUS_DISTANCE: int +CAP_PROP_XI_LENS_FOCAL_LENGTH: int +CAP_PROP_XI_LENS_FEATURE_SELECTOR: int +CAP_PROP_XI_LENS_FEATURE: int +CAP_PROP_XI_DEVICE_MODEL_ID: int +CAP_PROP_XI_DEVICE_SN: int +CAP_PROP_XI_IMAGE_DATA_FORMAT_RGB32_ALPHA: int +CAP_PROP_XI_IMAGE_PAYLOAD_SIZE: int +CAP_PROP_XI_TRANSPORT_PIXEL_FORMAT: int +CAP_PROP_XI_SENSOR_CLOCK_FREQ_HZ: int +CAP_PROP_XI_SENSOR_CLOCK_FREQ_INDEX: int +CAP_PROP_XI_SENSOR_OUTPUT_CHANNEL_COUNT: int +CAP_PROP_XI_FRAMERATE: int +CAP_PROP_XI_COUNTER_SELECTOR: int +CAP_PROP_XI_COUNTER_VALUE: int +CAP_PROP_XI_ACQ_TIMING_MODE: int +CAP_PROP_XI_AVAILABLE_BANDWIDTH: int +CAP_PROP_XI_BUFFER_POLICY: int +CAP_PROP_XI_LUT_EN: int +CAP_PROP_XI_LUT_INDEX: int +CAP_PROP_XI_LUT_VALUE: int +CAP_PROP_XI_TRG_DELAY: int +CAP_PROP_XI_TS_RST_MODE: int +CAP_PROP_XI_TS_RST_SOURCE: int +CAP_PROP_XI_IS_DEVICE_EXIST: int +CAP_PROP_XI_ACQ_BUFFER_SIZE: int +CAP_PROP_XI_ACQ_BUFFER_SIZE_UNIT: int +CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_SIZE: int +CAP_PROP_XI_BUFFERS_QUEUE_SIZE: int +CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_COMMIT: int +CAP_PROP_XI_RECENT_FRAME: int +CAP_PROP_XI_DEVICE_RESET: int +CAP_PROP_XI_COLUMN_FPN_CORRECTION: int +CAP_PROP_XI_ROW_FPN_CORRECTION: int +CAP_PROP_XI_SENSOR_MODE: int +CAP_PROP_XI_HDR: int +CAP_PROP_XI_HDR_KNEEPOINT_COUNT: int +CAP_PROP_XI_HDR_T1: int +CAP_PROP_XI_HDR_T2: int +CAP_PROP_XI_KNEEPOINT1: int +CAP_PROP_XI_KNEEPOINT2: int +CAP_PROP_XI_IMAGE_BLACK_LEVEL: int +CAP_PROP_XI_HW_REVISION: int +CAP_PROP_XI_DEBUG_LEVEL: int +CAP_PROP_XI_AUTO_BANDWIDTH_CALCULATION: int +CAP_PROP_XI_FFS_FILE_ID: int +CAP_PROP_XI_FFS_FILE_SIZE: int +CAP_PROP_XI_FREE_FFS_SIZE: int +CAP_PROP_XI_USED_FFS_SIZE: int +CAP_PROP_XI_FFS_ACCESS_KEY: int +CAP_PROP_XI_SENSOR_FEATURE_SELECTOR: int +CAP_PROP_XI_SENSOR_FEATURE_VALUE: int +CAP_PROP_ARAVIS_AUTOTRIGGER: int +CAP_PROP_IOS_DEVICE_FOCUS: int +CAP_PROP_IOS_DEVICE_EXPOSURE: int +CAP_PROP_IOS_DEVICE_FLASH: int +CAP_PROP_IOS_DEVICE_WHITEBALANCE: int +CAP_PROP_IOS_DEVICE_TORCH: int +CAP_PROP_GIGA_FRAME_OFFSET_X: int +CAP_PROP_GIGA_FRAME_OFFSET_Y: int +CAP_PROP_GIGA_FRAME_WIDTH_MAX: int +CAP_PROP_GIGA_FRAME_HEIGH_MAX: int +CAP_PROP_GIGA_FRAME_SENS_WIDTH: int +CAP_PROP_GIGA_FRAME_SENS_HEIGH: int +CAP_PROP_INTELPERC_PROFILE_COUNT: int +CAP_PROP_INTELPERC_PROFILE_IDX: int +CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE: int +CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE: int +CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD: int +CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ: int +CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT: int +CAP_INTELPERC_DEPTH_GENERATOR: int +CAP_INTELPERC_IMAGE_GENERATOR: int +CAP_INTELPERC_IR_GENERATOR: int +CAP_INTELPERC_GENERATORS_MASK: int +CAP_INTELPERC_DEPTH_MAP: int +CAP_INTELPERC_UVDEPTH_MAP: int +CAP_INTELPERC_IR_MAP: int +CAP_INTELPERC_IMAGE: int +CAP_PROP_GPHOTO2_PREVIEW: int +CAP_PROP_GPHOTO2_WIDGET_ENUMERATE: int +CAP_PROP_GPHOTO2_RELOAD_CONFIG: int +CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE: int +CAP_PROP_GPHOTO2_COLLECT_MSGS: int +CAP_PROP_GPHOTO2_FLUSH_MSGS: int +CAP_PROP_SPEED: int +CAP_PROP_APERTURE: int +CAP_PROP_EXPOSUREPROGRAM: int +CAP_PROP_VIEWFINDER: int +CAP_PROP_IMAGES_BASE: int +CAP_PROP_IMAGES_LAST: int +LMEDS: int +RANSAC: int +RHO: int +USAC_DEFAULT: int +USAC_PARALLEL: int +USAC_FM_8PTS: int +USAC_FAST: int +USAC_ACCURATE: int +USAC_PROSAC: int +USAC_MAGSAC: int +CALIB_CB_ADAPTIVE_THRESH: int +CALIB_CB_NORMALIZE_IMAGE: int +CALIB_CB_FILTER_QUADS: int +CALIB_CB_FAST_CHECK: int +CALIB_CB_EXHAUSTIVE: int +CALIB_CB_ACCURACY: int +CALIB_CB_LARGER: int +CALIB_CB_MARKER: int +CALIB_CB_PLAIN: int +CALIB_CB_SYMMETRIC_GRID: int +CALIB_CB_ASYMMETRIC_GRID: int +CALIB_CB_CLUSTERING: int +CALIB_NINTRINSIC: int +CALIB_USE_INTRINSIC_GUESS: int +CALIB_FIX_ASPECT_RATIO: int +CALIB_FIX_PRINCIPAL_POINT: int +CALIB_ZERO_TANGENT_DIST: int +CALIB_FIX_FOCAL_LENGTH: int +CALIB_FIX_K1: int +CALIB_FIX_K2: int +CALIB_FIX_K3: int +CALIB_FIX_K4: int +CALIB_FIX_K5: int +CALIB_FIX_K6: int +CALIB_RATIONAL_MODEL: int +CALIB_THIN_PRISM_MODEL: int +CALIB_FIX_S1_S2_S3_S4: int +CALIB_TILTED_MODEL: int +CALIB_FIX_TAUX_TAUY: int +CALIB_USE_QR: int +CALIB_FIX_TANGENT_DIST: int +CALIB_FIX_INTRINSIC: int +CALIB_SAME_FOCAL_LENGTH: int +CALIB_ZERO_DISPARITY: int +CALIB_USE_LU: int +CALIB_USE_EXTRINSIC_GUESS: int +FM_7POINT: int +FM_8POINT: int +FM_LMEDS: int +FM_RANSAC: int +CASCADE_DO_CANNY_PRUNING: int +CASCADE_SCALE_IMAGE: int +CASCADE_FIND_BIGGEST_OBJECT: int +CASCADE_DO_ROUGH_SEARCH: int +OPTFLOW_USE_INITIAL_FLOW: int +OPTFLOW_LK_GET_MIN_EIGENVALS: int +OPTFLOW_FARNEBACK_GAUSSIAN: int +MOTION_TRANSLATION: int +MOTION_EUCLIDEAN: int +MOTION_AFFINE: int +MOTION_HOMOGRAPHY: int + +DrawMatchesFlags_DEFAULT: int +DRAW_MATCHES_FLAGS_DEFAULT: int +DrawMatchesFlags_DRAW_OVER_OUTIMG: int +DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG: int +DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS: int +DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS: int +DrawMatchesFlags_DRAW_RICH_KEYPOINTS: int +DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS: int +DrawMatchesFlags = int +"""One of [DrawMatchesFlags_DEFAULT, DRAW_MATCHES_FLAGS_DEFAULT, DrawMatchesFlags_DRAW_OVER_OUTIMG, DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG, DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS, DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS, DrawMatchesFlags_DRAW_RICH_KEYPOINTS, DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS]""" + +IMREAD_UNCHANGED: int +IMREAD_GRAYSCALE: int +IMREAD_COLOR: int +IMREAD_ANYDEPTH: int +IMREAD_ANYCOLOR: int +IMREAD_LOAD_GDAL: int +IMREAD_REDUCED_GRAYSCALE_2: int +IMREAD_REDUCED_COLOR_2: int +IMREAD_REDUCED_GRAYSCALE_4: int +IMREAD_REDUCED_COLOR_4: int +IMREAD_REDUCED_GRAYSCALE_8: int +IMREAD_REDUCED_COLOR_8: int +IMREAD_IGNORE_ORIENTATION: int +ImreadModes = int +"""One of [IMREAD_UNCHANGED, IMREAD_GRAYSCALE, IMREAD_COLOR, IMREAD_ANYDEPTH, IMREAD_ANYCOLOR, IMREAD_LOAD_GDAL, IMREAD_REDUCED_GRAYSCALE_2, IMREAD_REDUCED_COLOR_2, IMREAD_REDUCED_GRAYSCALE_4, IMREAD_REDUCED_COLOR_4, IMREAD_REDUCED_GRAYSCALE_8, IMREAD_REDUCED_COLOR_8, IMREAD_IGNORE_ORIENTATION]""" + +IMWRITE_JPEG_QUALITY: int +IMWRITE_JPEG_PROGRESSIVE: int +IMWRITE_JPEG_OPTIMIZE: int +IMWRITE_JPEG_RST_INTERVAL: int +IMWRITE_JPEG_LUMA_QUALITY: int +IMWRITE_JPEG_CHROMA_QUALITY: int +IMWRITE_JPEG_SAMPLING_FACTOR: int +IMWRITE_PNG_COMPRESSION: int +IMWRITE_PNG_STRATEGY: int +IMWRITE_PNG_BILEVEL: int +IMWRITE_PXM_BINARY: int +IMWRITE_EXR_TYPE: int +IMWRITE_EXR_COMPRESSION: int +IMWRITE_EXR_DWA_COMPRESSION_LEVEL: int +IMWRITE_WEBP_QUALITY: int +IMWRITE_HDR_COMPRESSION: int +IMWRITE_PAM_TUPLETYPE: int +IMWRITE_TIFF_RESUNIT: int +IMWRITE_TIFF_XDPI: int +IMWRITE_TIFF_YDPI: int +IMWRITE_TIFF_COMPRESSION: int +IMWRITE_TIFF_ROWSPERSTRIP: int +IMWRITE_TIFF_PREDICTOR: int +IMWRITE_JPEG2000_COMPRESSION_X1000: int +IMWRITE_AVIF_QUALITY: int +IMWRITE_AVIF_DEPTH: int +IMWRITE_AVIF_SPEED: int +ImwriteFlags = int +"""One of [IMWRITE_JPEG_QUALITY, IMWRITE_JPEG_PROGRESSIVE, IMWRITE_JPEG_OPTIMIZE, IMWRITE_JPEG_RST_INTERVAL, IMWRITE_JPEG_LUMA_QUALITY, IMWRITE_JPEG_CHROMA_QUALITY, IMWRITE_JPEG_SAMPLING_FACTOR, IMWRITE_PNG_COMPRESSION, IMWRITE_PNG_STRATEGY, IMWRITE_PNG_BILEVEL, IMWRITE_PXM_BINARY, IMWRITE_EXR_TYPE, IMWRITE_EXR_COMPRESSION, IMWRITE_EXR_DWA_COMPRESSION_LEVEL, IMWRITE_WEBP_QUALITY, IMWRITE_HDR_COMPRESSION, IMWRITE_PAM_TUPLETYPE, IMWRITE_TIFF_RESUNIT, IMWRITE_TIFF_XDPI, IMWRITE_TIFF_YDPI, IMWRITE_TIFF_COMPRESSION, IMWRITE_TIFF_ROWSPERSTRIP, IMWRITE_TIFF_PREDICTOR, IMWRITE_JPEG2000_COMPRESSION_X1000, IMWRITE_AVIF_QUALITY, IMWRITE_AVIF_DEPTH, IMWRITE_AVIF_SPEED]""" + +IMWRITE_JPEG_SAMPLING_FACTOR_411: int +IMWRITE_JPEG_SAMPLING_FACTOR_420: int +IMWRITE_JPEG_SAMPLING_FACTOR_422: int +IMWRITE_JPEG_SAMPLING_FACTOR_440: int +IMWRITE_JPEG_SAMPLING_FACTOR_444: int +ImwriteJPEGSamplingFactorParams = int +"""One of [IMWRITE_JPEG_SAMPLING_FACTOR_411, IMWRITE_JPEG_SAMPLING_FACTOR_420, IMWRITE_JPEG_SAMPLING_FACTOR_422, IMWRITE_JPEG_SAMPLING_FACTOR_440, IMWRITE_JPEG_SAMPLING_FACTOR_444]""" + +IMWRITE_TIFF_COMPRESSION_NONE: int +IMWRITE_TIFF_COMPRESSION_CCITTRLE: int +IMWRITE_TIFF_COMPRESSION_CCITTFAX3: int +IMWRITE_TIFF_COMPRESSION_CCITT_T4: int +IMWRITE_TIFF_COMPRESSION_CCITTFAX4: int +IMWRITE_TIFF_COMPRESSION_CCITT_T6: int +IMWRITE_TIFF_COMPRESSION_LZW: int +IMWRITE_TIFF_COMPRESSION_OJPEG: int +IMWRITE_TIFF_COMPRESSION_JPEG: int +IMWRITE_TIFF_COMPRESSION_T85: int +IMWRITE_TIFF_COMPRESSION_T43: int +IMWRITE_TIFF_COMPRESSION_NEXT: int +IMWRITE_TIFF_COMPRESSION_CCITTRLEW: int +IMWRITE_TIFF_COMPRESSION_PACKBITS: int +IMWRITE_TIFF_COMPRESSION_THUNDERSCAN: int +IMWRITE_TIFF_COMPRESSION_IT8CTPAD: int +IMWRITE_TIFF_COMPRESSION_IT8LW: int +IMWRITE_TIFF_COMPRESSION_IT8MP: int +IMWRITE_TIFF_COMPRESSION_IT8BL: int +IMWRITE_TIFF_COMPRESSION_PIXARFILM: int +IMWRITE_TIFF_COMPRESSION_PIXARLOG: int +IMWRITE_TIFF_COMPRESSION_DEFLATE: int +IMWRITE_TIFF_COMPRESSION_ADOBE_DEFLATE: int +IMWRITE_TIFF_COMPRESSION_DCS: int +IMWRITE_TIFF_COMPRESSION_JBIG: int +IMWRITE_TIFF_COMPRESSION_SGILOG: int +IMWRITE_TIFF_COMPRESSION_SGILOG24: int +IMWRITE_TIFF_COMPRESSION_JP2000: int +IMWRITE_TIFF_COMPRESSION_LERC: int +IMWRITE_TIFF_COMPRESSION_LZMA: int +IMWRITE_TIFF_COMPRESSION_ZSTD: int +IMWRITE_TIFF_COMPRESSION_WEBP: int +IMWRITE_TIFF_COMPRESSION_JXL: int +ImwriteTiffCompressionFlags = int +"""One of [IMWRITE_TIFF_COMPRESSION_NONE, IMWRITE_TIFF_COMPRESSION_CCITTRLE, IMWRITE_TIFF_COMPRESSION_CCITTFAX3, IMWRITE_TIFF_COMPRESSION_CCITT_T4, IMWRITE_TIFF_COMPRESSION_CCITTFAX4, IMWRITE_TIFF_COMPRESSION_CCITT_T6, IMWRITE_TIFF_COMPRESSION_LZW, IMWRITE_TIFF_COMPRESSION_OJPEG, IMWRITE_TIFF_COMPRESSION_JPEG, IMWRITE_TIFF_COMPRESSION_T85, IMWRITE_TIFF_COMPRESSION_T43, IMWRITE_TIFF_COMPRESSION_NEXT, IMWRITE_TIFF_COMPRESSION_CCITTRLEW, IMWRITE_TIFF_COMPRESSION_PACKBITS, IMWRITE_TIFF_COMPRESSION_THUNDERSCAN, IMWRITE_TIFF_COMPRESSION_IT8CTPAD, IMWRITE_TIFF_COMPRESSION_IT8LW, IMWRITE_TIFF_COMPRESSION_IT8MP, IMWRITE_TIFF_COMPRESSION_IT8BL, IMWRITE_TIFF_COMPRESSION_PIXARFILM, IMWRITE_TIFF_COMPRESSION_PIXARLOG, IMWRITE_TIFF_COMPRESSION_DEFLATE, IMWRITE_TIFF_COMPRESSION_ADOBE_DEFLATE, IMWRITE_TIFF_COMPRESSION_DCS, IMWRITE_TIFF_COMPRESSION_JBIG, IMWRITE_TIFF_COMPRESSION_SGILOG, IMWRITE_TIFF_COMPRESSION_SGILOG24, IMWRITE_TIFF_COMPRESSION_JP2000, IMWRITE_TIFF_COMPRESSION_LERC, IMWRITE_TIFF_COMPRESSION_LZMA, IMWRITE_TIFF_COMPRESSION_ZSTD, IMWRITE_TIFF_COMPRESSION_WEBP, IMWRITE_TIFF_COMPRESSION_JXL]""" + +IMWRITE_TIFF_PREDICTOR_NONE: int +IMWRITE_TIFF_PREDICTOR_HORIZONTAL: int +IMWRITE_TIFF_PREDICTOR_FLOATINGPOINT: int +ImwriteTiffPredictorFlags = int +"""One of [IMWRITE_TIFF_PREDICTOR_NONE, IMWRITE_TIFF_PREDICTOR_HORIZONTAL, IMWRITE_TIFF_PREDICTOR_FLOATINGPOINT]""" + +IMWRITE_EXR_TYPE_HALF: int +IMWRITE_EXR_TYPE_FLOAT: int +ImwriteEXRTypeFlags = int +"""One of [IMWRITE_EXR_TYPE_HALF, IMWRITE_EXR_TYPE_FLOAT]""" + +IMWRITE_EXR_COMPRESSION_NO: int +IMWRITE_EXR_COMPRESSION_RLE: int +IMWRITE_EXR_COMPRESSION_ZIPS: int +IMWRITE_EXR_COMPRESSION_ZIP: int +IMWRITE_EXR_COMPRESSION_PIZ: int +IMWRITE_EXR_COMPRESSION_PXR24: int +IMWRITE_EXR_COMPRESSION_B44: int +IMWRITE_EXR_COMPRESSION_B44A: int +IMWRITE_EXR_COMPRESSION_DWAA: int +IMWRITE_EXR_COMPRESSION_DWAB: int +ImwriteEXRCompressionFlags = int +"""One of [IMWRITE_EXR_COMPRESSION_NO, IMWRITE_EXR_COMPRESSION_RLE, IMWRITE_EXR_COMPRESSION_ZIPS, IMWRITE_EXR_COMPRESSION_ZIP, IMWRITE_EXR_COMPRESSION_PIZ, IMWRITE_EXR_COMPRESSION_PXR24, IMWRITE_EXR_COMPRESSION_B44, IMWRITE_EXR_COMPRESSION_B44A, IMWRITE_EXR_COMPRESSION_DWAA, IMWRITE_EXR_COMPRESSION_DWAB]""" + +IMWRITE_PNG_STRATEGY_DEFAULT: int +IMWRITE_PNG_STRATEGY_FILTERED: int +IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY: int +IMWRITE_PNG_STRATEGY_RLE: int +IMWRITE_PNG_STRATEGY_FIXED: int +ImwritePNGFlags = int +"""One of [IMWRITE_PNG_STRATEGY_DEFAULT, IMWRITE_PNG_STRATEGY_FILTERED, IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY, IMWRITE_PNG_STRATEGY_RLE, IMWRITE_PNG_STRATEGY_FIXED]""" + +IMWRITE_PAM_FORMAT_NULL: int +IMWRITE_PAM_FORMAT_BLACKANDWHITE: int +IMWRITE_PAM_FORMAT_GRAYSCALE: int +IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA: int +IMWRITE_PAM_FORMAT_RGB: int +IMWRITE_PAM_FORMAT_RGB_ALPHA: int +ImwritePAMFlags = int +"""One of [IMWRITE_PAM_FORMAT_NULL, IMWRITE_PAM_FORMAT_BLACKANDWHITE, IMWRITE_PAM_FORMAT_GRAYSCALE, IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA, IMWRITE_PAM_FORMAT_RGB, IMWRITE_PAM_FORMAT_RGB_ALPHA]""" + +IMWRITE_HDR_COMPRESSION_NONE: int +IMWRITE_HDR_COMPRESSION_RLE: int +ImwriteHDRCompressionFlags = int +"""One of [IMWRITE_HDR_COMPRESSION_NONE, IMWRITE_HDR_COMPRESSION_RLE]""" + +CAP_ANY: int +CAP_VFW: int +CAP_V4L: int +CAP_V4L2: int +CAP_FIREWIRE: int +CAP_FIREWARE: int +CAP_IEEE1394: int +CAP_DC1394: int +CAP_CMU1394: int +CAP_QT: int +CAP_UNICAP: int +CAP_DSHOW: int +CAP_PVAPI: int +CAP_OPENNI: int +CAP_OPENNI_ASUS: int +CAP_ANDROID: int +CAP_XIAPI: int +CAP_AVFOUNDATION: int +CAP_GIGANETIX: int +CAP_MSMF: int +CAP_WINRT: int +CAP_INTELPERC: int +CAP_REALSENSE: int +CAP_OPENNI2: int +CAP_OPENNI2_ASUS: int +CAP_OPENNI2_ASTRA: int +CAP_GPHOTO2: int +CAP_GSTREAMER: int +CAP_FFMPEG: int +CAP_IMAGES: int +CAP_ARAVIS: int +CAP_OPENCV_MJPEG: int +CAP_INTEL_MFX: int +CAP_XINE: int +CAP_UEYE: int +CAP_OBSENSOR: int +VideoCaptureAPIs = int +"""One of [CAP_ANY, CAP_VFW, CAP_V4L, CAP_V4L2, CAP_FIREWIRE, CAP_FIREWARE, CAP_IEEE1394, CAP_DC1394, CAP_CMU1394, CAP_QT, CAP_UNICAP, CAP_DSHOW, CAP_PVAPI, CAP_OPENNI, CAP_OPENNI_ASUS, CAP_ANDROID, CAP_XIAPI, CAP_AVFOUNDATION, CAP_GIGANETIX, CAP_MSMF, CAP_WINRT, CAP_INTELPERC, CAP_REALSENSE, CAP_OPENNI2, CAP_OPENNI2_ASUS, CAP_OPENNI2_ASTRA, CAP_GPHOTO2, CAP_GSTREAMER, CAP_FFMPEG, CAP_IMAGES, CAP_ARAVIS, CAP_OPENCV_MJPEG, CAP_INTEL_MFX, CAP_XINE, CAP_UEYE, CAP_OBSENSOR]""" + +CAP_PROP_POS_MSEC: int +CAP_PROP_POS_FRAMES: int +CAP_PROP_POS_AVI_RATIO: int +CAP_PROP_FRAME_WIDTH: int +CAP_PROP_FRAME_HEIGHT: int +CAP_PROP_FPS: int +CAP_PROP_FOURCC: int +CAP_PROP_FRAME_COUNT: int +CAP_PROP_FORMAT: int +CAP_PROP_MODE: int +CAP_PROP_BRIGHTNESS: int +CAP_PROP_CONTRAST: int +CAP_PROP_SATURATION: int +CAP_PROP_HUE: int +CAP_PROP_GAIN: int +CAP_PROP_EXPOSURE: int +CAP_PROP_CONVERT_RGB: int +CAP_PROP_WHITE_BALANCE_BLUE_U: int +CAP_PROP_RECTIFICATION: int +CAP_PROP_MONOCHROME: int +CAP_PROP_SHARPNESS: int +CAP_PROP_AUTO_EXPOSURE: int +CAP_PROP_GAMMA: int +CAP_PROP_TEMPERATURE: int +CAP_PROP_TRIGGER: int +CAP_PROP_TRIGGER_DELAY: int +CAP_PROP_WHITE_BALANCE_RED_V: int +CAP_PROP_ZOOM: int +CAP_PROP_FOCUS: int +CAP_PROP_GUID: int +CAP_PROP_ISO_SPEED: int +CAP_PROP_BACKLIGHT: int +CAP_PROP_PAN: int +CAP_PROP_TILT: int +CAP_PROP_ROLL: int +CAP_PROP_IRIS: int +CAP_PROP_SETTINGS: int +CAP_PROP_BUFFERSIZE: int +CAP_PROP_AUTOFOCUS: int +CAP_PROP_SAR_NUM: int +CAP_PROP_SAR_DEN: int +CAP_PROP_BACKEND: int +CAP_PROP_CHANNEL: int +CAP_PROP_AUTO_WB: int +CAP_PROP_WB_TEMPERATURE: int +CAP_PROP_CODEC_PIXEL_FORMAT: int +CAP_PROP_BITRATE: int +CAP_PROP_ORIENTATION_META: int +CAP_PROP_ORIENTATION_AUTO: int +CAP_PROP_HW_ACCELERATION: int +CAP_PROP_HW_DEVICE: int +CAP_PROP_HW_ACCELERATION_USE_OPENCL: int +CAP_PROP_OPEN_TIMEOUT_MSEC: int +CAP_PROP_READ_TIMEOUT_MSEC: int +CAP_PROP_STREAM_OPEN_TIME_USEC: int +CAP_PROP_VIDEO_TOTAL_CHANNELS: int +CAP_PROP_VIDEO_STREAM: int +CAP_PROP_AUDIO_STREAM: int +CAP_PROP_AUDIO_POS: int +CAP_PROP_AUDIO_SHIFT_NSEC: int +CAP_PROP_AUDIO_DATA_DEPTH: int +CAP_PROP_AUDIO_SAMPLES_PER_SECOND: int +CAP_PROP_AUDIO_BASE_INDEX: int +CAP_PROP_AUDIO_TOTAL_CHANNELS: int +CAP_PROP_AUDIO_TOTAL_STREAMS: int +CAP_PROP_AUDIO_SYNCHRONIZE: int +CAP_PROP_LRF_HAS_KEY_FRAME: int +CAP_PROP_CODEC_EXTRADATA_INDEX: int +CAP_PROP_FRAME_TYPE: int +CAP_PROP_N_THREADS: int +VideoCaptureProperties = int +"""One of [CAP_PROP_POS_MSEC, CAP_PROP_POS_FRAMES, CAP_PROP_POS_AVI_RATIO, CAP_PROP_FRAME_WIDTH, CAP_PROP_FRAME_HEIGHT, CAP_PROP_FPS, CAP_PROP_FOURCC, CAP_PROP_FRAME_COUNT, CAP_PROP_FORMAT, CAP_PROP_MODE, CAP_PROP_BRIGHTNESS, CAP_PROP_CONTRAST, CAP_PROP_SATURATION, CAP_PROP_HUE, CAP_PROP_GAIN, CAP_PROP_EXPOSURE, CAP_PROP_CONVERT_RGB, CAP_PROP_WHITE_BALANCE_BLUE_U, CAP_PROP_RECTIFICATION, CAP_PROP_MONOCHROME, CAP_PROP_SHARPNESS, CAP_PROP_AUTO_EXPOSURE, CAP_PROP_GAMMA, CAP_PROP_TEMPERATURE, CAP_PROP_TRIGGER, CAP_PROP_TRIGGER_DELAY, CAP_PROP_WHITE_BALANCE_RED_V, CAP_PROP_ZOOM, CAP_PROP_FOCUS, CAP_PROP_GUID, CAP_PROP_ISO_SPEED, CAP_PROP_BACKLIGHT, CAP_PROP_PAN, CAP_PROP_TILT, CAP_PROP_ROLL, CAP_PROP_IRIS, CAP_PROP_SETTINGS, CAP_PROP_BUFFERSIZE, CAP_PROP_AUTOFOCUS, CAP_PROP_SAR_NUM, CAP_PROP_SAR_DEN, CAP_PROP_BACKEND, CAP_PROP_CHANNEL, CAP_PROP_AUTO_WB, CAP_PROP_WB_TEMPERATURE, CAP_PROP_CODEC_PIXEL_FORMAT, CAP_PROP_BITRATE, CAP_PROP_ORIENTATION_META, CAP_PROP_ORIENTATION_AUTO, CAP_PROP_HW_ACCELERATION, CAP_PROP_HW_DEVICE, CAP_PROP_HW_ACCELERATION_USE_OPENCL, CAP_PROP_OPEN_TIMEOUT_MSEC, CAP_PROP_READ_TIMEOUT_MSEC, CAP_PROP_STREAM_OPEN_TIME_USEC, CAP_PROP_VIDEO_TOTAL_CHANNELS, CAP_PROP_VIDEO_STREAM, CAP_PROP_AUDIO_STREAM, CAP_PROP_AUDIO_POS, CAP_PROP_AUDIO_SHIFT_NSEC, CAP_PROP_AUDIO_DATA_DEPTH, CAP_PROP_AUDIO_SAMPLES_PER_SECOND, CAP_PROP_AUDIO_BASE_INDEX, CAP_PROP_AUDIO_TOTAL_CHANNELS, CAP_PROP_AUDIO_TOTAL_STREAMS, CAP_PROP_AUDIO_SYNCHRONIZE, CAP_PROP_LRF_HAS_KEY_FRAME, CAP_PROP_CODEC_EXTRADATA_INDEX, CAP_PROP_FRAME_TYPE, CAP_PROP_N_THREADS]""" + +VIDEOWRITER_PROP_QUALITY: int +VIDEOWRITER_PROP_FRAMEBYTES: int +VIDEOWRITER_PROP_NSTRIPES: int +VIDEOWRITER_PROP_IS_COLOR: int +VIDEOWRITER_PROP_DEPTH: int +VIDEOWRITER_PROP_HW_ACCELERATION: int +VIDEOWRITER_PROP_HW_DEVICE: int +VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL: int +VIDEOWRITER_PROP_RAW_VIDEO: int +VIDEOWRITER_PROP_KEY_INTERVAL: int +VIDEOWRITER_PROP_KEY_FLAG: int +VideoWriterProperties = int +"""One of [VIDEOWRITER_PROP_QUALITY, VIDEOWRITER_PROP_FRAMEBYTES, VIDEOWRITER_PROP_NSTRIPES, VIDEOWRITER_PROP_IS_COLOR, VIDEOWRITER_PROP_DEPTH, VIDEOWRITER_PROP_HW_ACCELERATION, VIDEOWRITER_PROP_HW_DEVICE, VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL, VIDEOWRITER_PROP_RAW_VIDEO, VIDEOWRITER_PROP_KEY_INTERVAL, VIDEOWRITER_PROP_KEY_FLAG]""" + +VIDEO_ACCELERATION_NONE: int +VIDEO_ACCELERATION_ANY: int +VIDEO_ACCELERATION_D3D11: int +VIDEO_ACCELERATION_VAAPI: int +VIDEO_ACCELERATION_MFX: int +VideoAccelerationType = int +"""One of [VIDEO_ACCELERATION_NONE, VIDEO_ACCELERATION_ANY, VIDEO_ACCELERATION_D3D11, VIDEO_ACCELERATION_VAAPI, VIDEO_ACCELERATION_MFX]""" + +CAP_OBSENSOR_DEPTH_MAP: int +CAP_OBSENSOR_BGR_IMAGE: int +CAP_OBSENSOR_IR_IMAGE: int +VideoCaptureOBSensorDataType = int +"""One of [CAP_OBSENSOR_DEPTH_MAP, CAP_OBSENSOR_BGR_IMAGE, CAP_OBSENSOR_IR_IMAGE]""" + +CAP_OBSENSOR_DEPTH_GENERATOR: int +CAP_OBSENSOR_IMAGE_GENERATOR: int +CAP_OBSENSOR_IR_GENERATOR: int +CAP_OBSENSOR_GENERATORS_MASK: int +VideoCaptureOBSensorGenerators = int +"""One of [CAP_OBSENSOR_DEPTH_GENERATOR, CAP_OBSENSOR_IMAGE_GENERATOR, CAP_OBSENSOR_IR_GENERATOR, CAP_OBSENSOR_GENERATORS_MASK]""" + +CAP_PROP_OBSENSOR_INTRINSIC_FX: int +CAP_PROP_OBSENSOR_INTRINSIC_FY: int +CAP_PROP_OBSENSOR_INTRINSIC_CX: int +CAP_PROP_OBSENSOR_INTRINSIC_CY: int +VideoCaptureOBSensorProperties = int +"""One of [CAP_PROP_OBSENSOR_INTRINSIC_FX, CAP_PROP_OBSENSOR_INTRINSIC_FY, CAP_PROP_OBSENSOR_INTRINSIC_CX, CAP_PROP_OBSENSOR_INTRINSIC_CY]""" + +SOLVEPNP_ITERATIVE: int +SOLVEPNP_EPNP: int +SOLVEPNP_P3P: int +SOLVEPNP_DLS: int +SOLVEPNP_UPNP: int +SOLVEPNP_AP3P: int +SOLVEPNP_IPPE: int +SOLVEPNP_IPPE_SQUARE: int +SOLVEPNP_SQPNP: int +SOLVEPNP_MAX_COUNT: int +SolvePnPMethod = int +"""One of [SOLVEPNP_ITERATIVE, SOLVEPNP_EPNP, SOLVEPNP_P3P, SOLVEPNP_DLS, SOLVEPNP_UPNP, SOLVEPNP_AP3P, SOLVEPNP_IPPE, SOLVEPNP_IPPE_SQUARE, SOLVEPNP_SQPNP, SOLVEPNP_MAX_COUNT]""" + +CALIB_HAND_EYE_TSAI: int +CALIB_HAND_EYE_PARK: int +CALIB_HAND_EYE_HORAUD: int +CALIB_HAND_EYE_ANDREFF: int +CALIB_HAND_EYE_DANIILIDIS: int +HandEyeCalibrationMethod = int +"""One of [CALIB_HAND_EYE_TSAI, CALIB_HAND_EYE_PARK, CALIB_HAND_EYE_HORAUD, CALIB_HAND_EYE_ANDREFF, CALIB_HAND_EYE_DANIILIDIS]""" + +CALIB_ROBOT_WORLD_HAND_EYE_SHAH: int +CALIB_ROBOT_WORLD_HAND_EYE_LI: int +RobotWorldHandEyeCalibrationMethod = int +"""One of [CALIB_ROBOT_WORLD_HAND_EYE_SHAH, CALIB_ROBOT_WORLD_HAND_EYE_LI]""" + +SAMPLING_UNIFORM: int +SAMPLING_PROGRESSIVE_NAPSAC: int +SAMPLING_NAPSAC: int +SAMPLING_PROSAC: int +SamplingMethod = int +"""One of [SAMPLING_UNIFORM, SAMPLING_PROGRESSIVE_NAPSAC, SAMPLING_NAPSAC, SAMPLING_PROSAC]""" + +LOCAL_OPTIM_NULL: int +LOCAL_OPTIM_INNER_LO: int +LOCAL_OPTIM_INNER_AND_ITER_LO: int +LOCAL_OPTIM_GC: int +LOCAL_OPTIM_SIGMA: int +LocalOptimMethod = int +"""One of [LOCAL_OPTIM_NULL, LOCAL_OPTIM_INNER_LO, LOCAL_OPTIM_INNER_AND_ITER_LO, LOCAL_OPTIM_GC, LOCAL_OPTIM_SIGMA]""" + +SCORE_METHOD_RANSAC: int +SCORE_METHOD_MSAC: int +SCORE_METHOD_MAGSAC: int +SCORE_METHOD_LMEDS: int +ScoreMethod = int +"""One of [SCORE_METHOD_RANSAC, SCORE_METHOD_MSAC, SCORE_METHOD_MAGSAC, SCORE_METHOD_LMEDS]""" + +NEIGH_FLANN_KNN: int +NEIGH_GRID: int +NEIGH_FLANN_RADIUS: int +NeighborSearchMethod = int +"""One of [NEIGH_FLANN_KNN, NEIGH_GRID, NEIGH_FLANN_RADIUS]""" + +NONE_POLISHER: int +LSQ_POLISHER: int +MAGSAC: int +COV_POLISHER: int +PolishingMethod = int +"""One of [NONE_POLISHER, LSQ_POLISHER, MAGSAC, COV_POLISHER]""" + +PROJ_SPHERICAL_ORTHO: int +PROJ_SPHERICAL_EQRECT: int +UndistortTypes = int +"""One of [PROJ_SPHERICAL_ORTHO, PROJ_SPHERICAL_EQRECT]""" + +WINDOW_NORMAL: int +WINDOW_AUTOSIZE: int +WINDOW_OPENGL: int +WINDOW_FULLSCREEN: int +WINDOW_FREERATIO: int +WINDOW_KEEPRATIO: int +WINDOW_GUI_EXPANDED: int +WINDOW_GUI_NORMAL: int +WindowFlags = int +"""One of [WINDOW_NORMAL, WINDOW_AUTOSIZE, WINDOW_OPENGL, WINDOW_FULLSCREEN, WINDOW_FREERATIO, WINDOW_KEEPRATIO, WINDOW_GUI_EXPANDED, WINDOW_GUI_NORMAL]""" + +WND_PROP_FULLSCREEN: int +WND_PROP_AUTOSIZE: int +WND_PROP_ASPECT_RATIO: int +WND_PROP_OPENGL: int +WND_PROP_VISIBLE: int +WND_PROP_TOPMOST: int +WND_PROP_VSYNC: int +WindowPropertyFlags = int +"""One of [WND_PROP_FULLSCREEN, WND_PROP_AUTOSIZE, WND_PROP_ASPECT_RATIO, WND_PROP_OPENGL, WND_PROP_VISIBLE, WND_PROP_TOPMOST, WND_PROP_VSYNC]""" + +EVENT_MOUSEMOVE: int +EVENT_LBUTTONDOWN: int +EVENT_RBUTTONDOWN: int +EVENT_MBUTTONDOWN: int +EVENT_LBUTTONUP: int +EVENT_RBUTTONUP: int +EVENT_MBUTTONUP: int +EVENT_LBUTTONDBLCLK: int +EVENT_RBUTTONDBLCLK: int +EVENT_MBUTTONDBLCLK: int +EVENT_MOUSEWHEEL: int +EVENT_MOUSEHWHEEL: int +MouseEventTypes = int +"""One of [EVENT_MOUSEMOVE, EVENT_LBUTTONDOWN, EVENT_RBUTTONDOWN, EVENT_MBUTTONDOWN, EVENT_LBUTTONUP, EVENT_RBUTTONUP, EVENT_MBUTTONUP, EVENT_LBUTTONDBLCLK, EVENT_RBUTTONDBLCLK, EVENT_MBUTTONDBLCLK, EVENT_MOUSEWHEEL, EVENT_MOUSEHWHEEL]""" + +EVENT_FLAG_LBUTTON: int +EVENT_FLAG_RBUTTON: int +EVENT_FLAG_MBUTTON: int +EVENT_FLAG_CTRLKEY: int +EVENT_FLAG_SHIFTKEY: int +EVENT_FLAG_ALTKEY: int +MouseEventFlags = int +"""One of [EVENT_FLAG_LBUTTON, EVENT_FLAG_RBUTTON, EVENT_FLAG_MBUTTON, EVENT_FLAG_CTRLKEY, EVENT_FLAG_SHIFTKEY, EVENT_FLAG_ALTKEY]""" + +QT_FONT_LIGHT: int +QT_FONT_NORMAL: int +QT_FONT_DEMIBOLD: int +QT_FONT_BOLD: int +QT_FONT_BLACK: int +QtFontWeights = int +"""One of [QT_FONT_LIGHT, QT_FONT_NORMAL, QT_FONT_DEMIBOLD, QT_FONT_BOLD, QT_FONT_BLACK]""" + +QT_STYLE_NORMAL: int +QT_STYLE_ITALIC: int +QT_STYLE_OBLIQUE: int +QtFontStyles = int +"""One of [QT_STYLE_NORMAL, QT_STYLE_ITALIC, QT_STYLE_OBLIQUE]""" + +QT_PUSH_BUTTON: int +QT_CHECKBOX: int +QT_RADIOBOX: int +QT_NEW_BUTTONBAR: int +QtButtonTypes = int +"""One of [QT_PUSH_BUTTON, QT_CHECKBOX, QT_RADIOBOX, QT_NEW_BUTTONBAR]""" + +GShape_GMAT: int +GSHAPE_GMAT: int +GShape_GSCALAR: int +GSHAPE_GSCALAR: int +GShape_GARRAY: int +GSHAPE_GARRAY: int +GShape_GOPAQUE: int +GSHAPE_GOPAQUE: int +GShape_GFRAME: int +GSHAPE_GFRAME: int +GShape = int +"""One of [GShape_GMAT, GSHAPE_GMAT, GShape_GSCALAR, GSHAPE_GSCALAR, GShape_GARRAY, GSHAPE_GARRAY, GShape_GOPAQUE, GSHAPE_GOPAQUE, GShape_GFRAME, GSHAPE_GFRAME]""" + +MediaFormat_BGR: int +MEDIA_FORMAT_BGR: int +MediaFormat_NV12: int +MEDIA_FORMAT_NV12: int +MediaFormat_GRAY: int +MEDIA_FORMAT_GRAY: int +MediaFormat = int +"""One of [MediaFormat_BGR, MEDIA_FORMAT_BGR, MediaFormat_NV12, MEDIA_FORMAT_NV12, MediaFormat_GRAY, MEDIA_FORMAT_GRAY]""" + + +FileStorage_READ: int +FILE_STORAGE_READ: int +FileStorage_WRITE: int +FILE_STORAGE_WRITE: int +FileStorage_APPEND: int +FILE_STORAGE_APPEND: int +FileStorage_MEMORY: int +FILE_STORAGE_MEMORY: int +FileStorage_FORMAT_MASK: int +FILE_STORAGE_FORMAT_MASK: int +FileStorage_FORMAT_AUTO: int +FILE_STORAGE_FORMAT_AUTO: int +FileStorage_FORMAT_XML: int +FILE_STORAGE_FORMAT_XML: int +FileStorage_FORMAT_YAML: int +FILE_STORAGE_FORMAT_YAML: int +FileStorage_FORMAT_JSON: int +FILE_STORAGE_FORMAT_JSON: int +FileStorage_BASE64: int +FILE_STORAGE_BASE64: int +FileStorage_WRITE_BASE64: int +FILE_STORAGE_WRITE_BASE64: int +FileStorage_Mode = int +"""One of [FileStorage_READ, FILE_STORAGE_READ, FileStorage_WRITE, FILE_STORAGE_WRITE, FileStorage_APPEND, FILE_STORAGE_APPEND, FileStorage_MEMORY, FILE_STORAGE_MEMORY, FileStorage_FORMAT_MASK, FILE_STORAGE_FORMAT_MASK, FileStorage_FORMAT_AUTO, FILE_STORAGE_FORMAT_AUTO, FileStorage_FORMAT_XML, FILE_STORAGE_FORMAT_XML, FileStorage_FORMAT_YAML, FILE_STORAGE_FORMAT_YAML, FileStorage_FORMAT_JSON, FILE_STORAGE_FORMAT_JSON, FileStorage_BASE64, FILE_STORAGE_BASE64, FileStorage_WRITE_BASE64, FILE_STORAGE_WRITE_BASE64]""" + +FileStorage_UNDEFINED: int +FILE_STORAGE_UNDEFINED: int +FileStorage_VALUE_EXPECTED: int +FILE_STORAGE_VALUE_EXPECTED: int +FileStorage_NAME_EXPECTED: int +FILE_STORAGE_NAME_EXPECTED: int +FileStorage_INSIDE_MAP: int +FILE_STORAGE_INSIDE_MAP: int +FileStorage_State = int +"""One of [FileStorage_UNDEFINED, FILE_STORAGE_UNDEFINED, FileStorage_VALUE_EXPECTED, FILE_STORAGE_VALUE_EXPECTED, FileStorage_NAME_EXPECTED, FILE_STORAGE_NAME_EXPECTED, FileStorage_INSIDE_MAP, FILE_STORAGE_INSIDE_MAP]""" + +FileNode_NONE: int +FILE_NODE_NONE: int +FileNode_INT: int +FILE_NODE_INT: int +FileNode_REAL: int +FILE_NODE_REAL: int +FileNode_FLOAT: int +FILE_NODE_FLOAT: int +FileNode_STR: int +FILE_NODE_STR: int +FileNode_STRING: int +FILE_NODE_STRING: int +FileNode_SEQ: int +FILE_NODE_SEQ: int +FileNode_MAP: int +FILE_NODE_MAP: int +FileNode_TYPE_MASK: int +FILE_NODE_TYPE_MASK: int +FileNode_FLOW: int +FILE_NODE_FLOW: int +FileNode_UNIFORM: int +FILE_NODE_UNIFORM: int +FileNode_EMPTY: int +FILE_NODE_EMPTY: int +FileNode_NAMED: int +FILE_NODE_NAMED: int + +UMat_MAGIC_VAL: int +UMAT_MAGIC_VAL: int +UMat_AUTO_STEP: int +UMAT_AUTO_STEP: int +UMat_CONTINUOUS_FLAG: int +UMAT_CONTINUOUS_FLAG: int +UMat_SUBMATRIX_FLAG: int +UMAT_SUBMATRIX_FLAG: int +UMat_MAGIC_MASK: int +UMAT_MAGIC_MASK: int +UMat_TYPE_MASK: int +UMAT_TYPE_MASK: int +UMat_DEPTH_MASK: int +UMAT_DEPTH_MASK: int + +Subdiv2D_PTLOC_ERROR: int +SUBDIV2D_PTLOC_ERROR: int +Subdiv2D_PTLOC_OUTSIDE_RECT: int +SUBDIV2D_PTLOC_OUTSIDE_RECT: int +Subdiv2D_PTLOC_INSIDE: int +SUBDIV2D_PTLOC_INSIDE: int +Subdiv2D_PTLOC_VERTEX: int +SUBDIV2D_PTLOC_VERTEX: int +Subdiv2D_PTLOC_ON_EDGE: int +SUBDIV2D_PTLOC_ON_EDGE: int +Subdiv2D_NEXT_AROUND_ORG: int +SUBDIV2D_NEXT_AROUND_ORG: int +Subdiv2D_NEXT_AROUND_DST: int +SUBDIV2D_NEXT_AROUND_DST: int +Subdiv2D_PREV_AROUND_ORG: int +SUBDIV2D_PREV_AROUND_ORG: int +Subdiv2D_PREV_AROUND_DST: int +SUBDIV2D_PREV_AROUND_DST: int +Subdiv2D_NEXT_AROUND_LEFT: int +SUBDIV2D_NEXT_AROUND_LEFT: int +Subdiv2D_NEXT_AROUND_RIGHT: int +SUBDIV2D_NEXT_AROUND_RIGHT: int +Subdiv2D_PREV_AROUND_LEFT: int +SUBDIV2D_PREV_AROUND_LEFT: int +Subdiv2D_PREV_AROUND_RIGHT: int +SUBDIV2D_PREV_AROUND_RIGHT: int + +ORB_HARRIS_SCORE: int +ORB_FAST_SCORE: int +ORB_ScoreType = int +"""One of [ORB_HARRIS_SCORE, ORB_FAST_SCORE]""" + +FastFeatureDetector_TYPE_5_8: int +FAST_FEATURE_DETECTOR_TYPE_5_8: int +FastFeatureDetector_TYPE_7_12: int +FAST_FEATURE_DETECTOR_TYPE_7_12: int +FastFeatureDetector_TYPE_9_16: int +FAST_FEATURE_DETECTOR_TYPE_9_16: int +FastFeatureDetector_DetectorType = int +"""One of [FastFeatureDetector_TYPE_5_8, FAST_FEATURE_DETECTOR_TYPE_5_8, FastFeatureDetector_TYPE_7_12, FAST_FEATURE_DETECTOR_TYPE_7_12, FastFeatureDetector_TYPE_9_16, FAST_FEATURE_DETECTOR_TYPE_9_16]""" + +FastFeatureDetector_THRESHOLD: int +FAST_FEATURE_DETECTOR_THRESHOLD: int +FastFeatureDetector_NONMAX_SUPPRESSION: int +FAST_FEATURE_DETECTOR_NONMAX_SUPPRESSION: int +FastFeatureDetector_FAST_N: int +FAST_FEATURE_DETECTOR_FAST_N: int + +AgastFeatureDetector_AGAST_5_8: int +AGAST_FEATURE_DETECTOR_AGAST_5_8: int +AgastFeatureDetector_AGAST_7_12d: int +AGAST_FEATURE_DETECTOR_AGAST_7_12D: int +AgastFeatureDetector_AGAST_7_12s: int +AGAST_FEATURE_DETECTOR_AGAST_7_12S: int +AgastFeatureDetector_OAST_9_16: int +AGAST_FEATURE_DETECTOR_OAST_9_16: int +AgastFeatureDetector_DetectorType = int +"""One of [AgastFeatureDetector_AGAST_5_8, AGAST_FEATURE_DETECTOR_AGAST_5_8, AgastFeatureDetector_AGAST_7_12d, AGAST_FEATURE_DETECTOR_AGAST_7_12D, AgastFeatureDetector_AGAST_7_12s, AGAST_FEATURE_DETECTOR_AGAST_7_12S, AgastFeatureDetector_OAST_9_16, AGAST_FEATURE_DETECTOR_OAST_9_16]""" + +AgastFeatureDetector_THRESHOLD: int +AGAST_FEATURE_DETECTOR_THRESHOLD: int +AgastFeatureDetector_NONMAX_SUPPRESSION: int +AGAST_FEATURE_DETECTOR_NONMAX_SUPPRESSION: int + +KAZE_DIFF_PM_G1: int +KAZE_DIFF_PM_G2: int +KAZE_DIFF_WEICKERT: int +KAZE_DIFF_CHARBONNIER: int +KAZE_DiffusivityType = int +"""One of [KAZE_DIFF_PM_G1, KAZE_DIFF_PM_G2, KAZE_DIFF_WEICKERT, KAZE_DIFF_CHARBONNIER]""" + +AKAZE_DESCRIPTOR_KAZE_UPRIGHT: int +AKAZE_DESCRIPTOR_KAZE: int +AKAZE_DESCRIPTOR_MLDB_UPRIGHT: int +AKAZE_DESCRIPTOR_MLDB: int +AKAZE_DescriptorType = int +"""One of [AKAZE_DESCRIPTOR_KAZE_UPRIGHT, AKAZE_DESCRIPTOR_KAZE, AKAZE_DESCRIPTOR_MLDB_UPRIGHT, AKAZE_DESCRIPTOR_MLDB]""" + +DescriptorMatcher_FLANNBASED: int +DESCRIPTOR_MATCHER_FLANNBASED: int +DescriptorMatcher_BRUTEFORCE: int +DESCRIPTOR_MATCHER_BRUTEFORCE: int +DescriptorMatcher_BRUTEFORCE_L1: int +DESCRIPTOR_MATCHER_BRUTEFORCE_L1: int +DescriptorMatcher_BRUTEFORCE_HAMMING: int +DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING: int +DescriptorMatcher_BRUTEFORCE_HAMMINGLUT: int +DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMINGLUT: int +DescriptorMatcher_BRUTEFORCE_SL2: int +DESCRIPTOR_MATCHER_BRUTEFORCE_SL2: int +DescriptorMatcher_MatcherType = int +"""One of [DescriptorMatcher_FLANNBASED, DESCRIPTOR_MATCHER_FLANNBASED, DescriptorMatcher_BRUTEFORCE, DESCRIPTOR_MATCHER_BRUTEFORCE, DescriptorMatcher_BRUTEFORCE_L1, DESCRIPTOR_MATCHER_BRUTEFORCE_L1, DescriptorMatcher_BRUTEFORCE_HAMMING, DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING, DescriptorMatcher_BRUTEFORCE_HAMMINGLUT, DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMINGLUT, DescriptorMatcher_BRUTEFORCE_SL2, DESCRIPTOR_MATCHER_BRUTEFORCE_SL2]""" + +CirclesGridFinderParameters_SYMMETRIC_GRID: int +CIRCLES_GRID_FINDER_PARAMETERS_SYMMETRIC_GRID: int +CirclesGridFinderParameters_ASYMMETRIC_GRID: int +CIRCLES_GRID_FINDER_PARAMETERS_ASYMMETRIC_GRID: int +CirclesGridFinderParameters_GridType = int +"""One of [CirclesGridFinderParameters_SYMMETRIC_GRID, CIRCLES_GRID_FINDER_PARAMETERS_SYMMETRIC_GRID, CirclesGridFinderParameters_ASYMMETRIC_GRID, CIRCLES_GRID_FINDER_PARAMETERS_ASYMMETRIC_GRID]""" + +StereoMatcher_DISP_SHIFT: int +STEREO_MATCHER_DISP_SHIFT: int +StereoMatcher_DISP_SCALE: int +STEREO_MATCHER_DISP_SCALE: int + +StereoBM_PREFILTER_NORMALIZED_RESPONSE: int +STEREO_BM_PREFILTER_NORMALIZED_RESPONSE: int +StereoBM_PREFILTER_XSOBEL: int +STEREO_BM_PREFILTER_XSOBEL: int + +StereoSGBM_MODE_SGBM: int +STEREO_SGBM_MODE_SGBM: int +StereoSGBM_MODE_HH: int +STEREO_SGBM_MODE_HH: int +StereoSGBM_MODE_SGBM_3WAY: int +STEREO_SGBM_MODE_SGBM_3WAY: int +StereoSGBM_MODE_HH4: int +STEREO_SGBM_MODE_HH4: int + +HOGDescriptor_L2Hys: int +HOGDESCRIPTOR_L2HYS: int +HOGDescriptor_HistogramNormType = int +"""One of [HOGDescriptor_L2Hys, HOGDESCRIPTOR_L2HYS]""" + +HOGDescriptor_DEFAULT_NLEVELS: int +HOGDESCRIPTOR_DEFAULT_NLEVELS: int + +HOGDescriptor_DESCR_FORMAT_COL_BY_COL: int +HOGDESCRIPTOR_DESCR_FORMAT_COL_BY_COL: int +HOGDescriptor_DESCR_FORMAT_ROW_BY_ROW: int +HOGDESCRIPTOR_DESCR_FORMAT_ROW_BY_ROW: int +HOGDescriptor_DescriptorStorageFormat = int +"""One of [HOGDescriptor_DESCR_FORMAT_COL_BY_COL, HOGDESCRIPTOR_DESCR_FORMAT_COL_BY_COL, HOGDescriptor_DESCR_FORMAT_ROW_BY_ROW, HOGDESCRIPTOR_DESCR_FORMAT_ROW_BY_ROW]""" + +QRCodeEncoder_MODE_AUTO: int +QRCODE_ENCODER_MODE_AUTO: int +QRCodeEncoder_MODE_NUMERIC: int +QRCODE_ENCODER_MODE_NUMERIC: int +QRCodeEncoder_MODE_ALPHANUMERIC: int +QRCODE_ENCODER_MODE_ALPHANUMERIC: int +QRCodeEncoder_MODE_BYTE: int +QRCODE_ENCODER_MODE_BYTE: int +QRCodeEncoder_MODE_ECI: int +QRCODE_ENCODER_MODE_ECI: int +QRCodeEncoder_MODE_KANJI: int +QRCODE_ENCODER_MODE_KANJI: int +QRCodeEncoder_MODE_STRUCTURED_APPEND: int +QRCODE_ENCODER_MODE_STRUCTURED_APPEND: int +QRCodeEncoder_EncodeMode = int +"""One of [QRCodeEncoder_MODE_AUTO, QRCODE_ENCODER_MODE_AUTO, QRCodeEncoder_MODE_NUMERIC, QRCODE_ENCODER_MODE_NUMERIC, QRCodeEncoder_MODE_ALPHANUMERIC, QRCODE_ENCODER_MODE_ALPHANUMERIC, QRCodeEncoder_MODE_BYTE, QRCODE_ENCODER_MODE_BYTE, QRCodeEncoder_MODE_ECI, QRCODE_ENCODER_MODE_ECI, QRCodeEncoder_MODE_KANJI, QRCODE_ENCODER_MODE_KANJI, QRCodeEncoder_MODE_STRUCTURED_APPEND, QRCODE_ENCODER_MODE_STRUCTURED_APPEND]""" + +QRCodeEncoder_CORRECT_LEVEL_L: int +QRCODE_ENCODER_CORRECT_LEVEL_L: int +QRCodeEncoder_CORRECT_LEVEL_M: int +QRCODE_ENCODER_CORRECT_LEVEL_M: int +QRCodeEncoder_CORRECT_LEVEL_Q: int +QRCODE_ENCODER_CORRECT_LEVEL_Q: int +QRCodeEncoder_CORRECT_LEVEL_H: int +QRCODE_ENCODER_CORRECT_LEVEL_H: int +QRCodeEncoder_CorrectionLevel = int +"""One of [QRCodeEncoder_CORRECT_LEVEL_L, QRCODE_ENCODER_CORRECT_LEVEL_L, QRCodeEncoder_CORRECT_LEVEL_M, QRCODE_ENCODER_CORRECT_LEVEL_M, QRCodeEncoder_CORRECT_LEVEL_Q, QRCODE_ENCODER_CORRECT_LEVEL_Q, QRCodeEncoder_CORRECT_LEVEL_H, QRCODE_ENCODER_CORRECT_LEVEL_H]""" + +QRCodeEncoder_ECI_UTF8: int +QRCODE_ENCODER_ECI_UTF8: int +QRCodeEncoder_ECIEncodings = int +"""One of [QRCodeEncoder_ECI_UTF8, QRCODE_ENCODER_ECI_UTF8]""" + +FaceRecognizerSF_FR_COSINE: int +FACE_RECOGNIZER_SF_FR_COSINE: int +FaceRecognizerSF_FR_NORM_L2: int +FACE_RECOGNIZER_SF_FR_NORM_L2: int +FaceRecognizerSF_DisType = int +"""One of [FaceRecognizerSF_FR_COSINE, FACE_RECOGNIZER_SF_FR_COSINE, FaceRecognizerSF_FR_NORM_L2, FACE_RECOGNIZER_SF_FR_NORM_L2]""" + +Stitcher_OK: int +STITCHER_OK: int +Stitcher_ERR_NEED_MORE_IMGS: int +STITCHER_ERR_NEED_MORE_IMGS: int +Stitcher_ERR_HOMOGRAPHY_EST_FAIL: int +STITCHER_ERR_HOMOGRAPHY_EST_FAIL: int +Stitcher_ERR_CAMERA_PARAMS_ADJUST_FAIL: int +STITCHER_ERR_CAMERA_PARAMS_ADJUST_FAIL: int +Stitcher_Status = int +"""One of [Stitcher_OK, STITCHER_OK, Stitcher_ERR_NEED_MORE_IMGS, STITCHER_ERR_NEED_MORE_IMGS, Stitcher_ERR_HOMOGRAPHY_EST_FAIL, STITCHER_ERR_HOMOGRAPHY_EST_FAIL, Stitcher_ERR_CAMERA_PARAMS_ADJUST_FAIL, STITCHER_ERR_CAMERA_PARAMS_ADJUST_FAIL]""" + +Stitcher_PANORAMA: int +STITCHER_PANORAMA: int +Stitcher_SCANS: int +STITCHER_SCANS: int +Stitcher_Mode = int +"""One of [Stitcher_PANORAMA, STITCHER_PANORAMA, Stitcher_SCANS, STITCHER_SCANS]""" + +DISOpticalFlow_PRESET_ULTRAFAST: int +DISOPTICAL_FLOW_PRESET_ULTRAFAST: int +DISOpticalFlow_PRESET_FAST: int +DISOPTICAL_FLOW_PRESET_FAST: int +DISOpticalFlow_PRESET_MEDIUM: int +DISOPTICAL_FLOW_PRESET_MEDIUM: int + +PCA_DATA_AS_ROW: int +PCA_DATA_AS_COL: int +PCA_USE_AVG: int +PCA_Flags = int +"""One of [PCA_DATA_AS_ROW, PCA_DATA_AS_COL, PCA_USE_AVG]""" + +SVD_MODIFY_A: int +SVD_NO_UV: int +SVD_FULL_UV: int +SVD_Flags = int +"""One of [SVD_MODIFY_A, SVD_NO_UV, SVD_FULL_UV]""" + +RNG_UNIFORM: int +RNG_NORMAL: int + +Formatter_FMT_DEFAULT: int +FORMATTER_FMT_DEFAULT: int +Formatter_FMT_MATLAB: int +FORMATTER_FMT_MATLAB: int +Formatter_FMT_CSV: int +FORMATTER_FMT_CSV: int +Formatter_FMT_PYTHON: int +FORMATTER_FMT_PYTHON: int +Formatter_FMT_NUMPY: int +FORMATTER_FMT_NUMPY: int +Formatter_FMT_C: int +FORMATTER_FMT_C: int +Formatter_FormatType = int +"""One of [Formatter_FMT_DEFAULT, FORMATTER_FMT_DEFAULT, Formatter_FMT_MATLAB, FORMATTER_FMT_MATLAB, Formatter_FMT_CSV, FORMATTER_FMT_CSV, Formatter_FMT_PYTHON, FORMATTER_FMT_PYTHON, Formatter_FMT_NUMPY, FORMATTER_FMT_NUMPY, Formatter_FMT_C, FORMATTER_FMT_C]""" + +_InputArray_KIND_SHIFT: int +_INPUT_ARRAY_KIND_SHIFT: int +_InputArray_FIXED_TYPE: int +_INPUT_ARRAY_FIXED_TYPE: int +_InputArray_FIXED_SIZE: int +_INPUT_ARRAY_FIXED_SIZE: int +_InputArray_KIND_MASK: int +_INPUT_ARRAY_KIND_MASK: int +_InputArray_NONE: int +_INPUT_ARRAY_NONE: int +_InputArray_MAT: int +_INPUT_ARRAY_MAT: int +_InputArray_MATX: int +_INPUT_ARRAY_MATX: int +_InputArray_STD_VECTOR: int +_INPUT_ARRAY_STD_VECTOR: int +_InputArray_STD_VECTOR_VECTOR: int +_INPUT_ARRAY_STD_VECTOR_VECTOR: int +_InputArray_STD_VECTOR_MAT: int +_INPUT_ARRAY_STD_VECTOR_MAT: int +_InputArray_EXPR: int +_INPUT_ARRAY_EXPR: int +_InputArray_OPENGL_BUFFER: int +_INPUT_ARRAY_OPENGL_BUFFER: int +_InputArray_CUDA_HOST_MEM: int +_INPUT_ARRAY_CUDA_HOST_MEM: int +_InputArray_CUDA_GPU_MAT: int +_INPUT_ARRAY_CUDA_GPU_MAT: int +_InputArray_UMAT: int +_INPUT_ARRAY_UMAT: int +_InputArray_STD_VECTOR_UMAT: int +_INPUT_ARRAY_STD_VECTOR_UMAT: int +_InputArray_STD_BOOL_VECTOR: int +_INPUT_ARRAY_STD_BOOL_VECTOR: int +_InputArray_STD_VECTOR_CUDA_GPU_MAT: int +_INPUT_ARRAY_STD_VECTOR_CUDA_GPU_MAT: int +_InputArray_STD_ARRAY: int +_INPUT_ARRAY_STD_ARRAY: int +_InputArray_STD_ARRAY_MAT: int +_INPUT_ARRAY_STD_ARRAY_MAT: int +_InputArray_KindFlag = int +"""One of [_InputArray_KIND_SHIFT, _INPUT_ARRAY_KIND_SHIFT, _InputArray_FIXED_TYPE, _INPUT_ARRAY_FIXED_TYPE, _InputArray_FIXED_SIZE, _INPUT_ARRAY_FIXED_SIZE, _InputArray_KIND_MASK, _INPUT_ARRAY_KIND_MASK, _InputArray_NONE, _INPUT_ARRAY_NONE, _InputArray_MAT, _INPUT_ARRAY_MAT, _InputArray_MATX, _INPUT_ARRAY_MATX, _InputArray_STD_VECTOR, _INPUT_ARRAY_STD_VECTOR, _InputArray_STD_VECTOR_VECTOR, _INPUT_ARRAY_STD_VECTOR_VECTOR, _InputArray_STD_VECTOR_MAT, _INPUT_ARRAY_STD_VECTOR_MAT, _InputArray_EXPR, _INPUT_ARRAY_EXPR, _InputArray_OPENGL_BUFFER, _INPUT_ARRAY_OPENGL_BUFFER, _InputArray_CUDA_HOST_MEM, _INPUT_ARRAY_CUDA_HOST_MEM, _InputArray_CUDA_GPU_MAT, _INPUT_ARRAY_CUDA_GPU_MAT, _InputArray_UMAT, _INPUT_ARRAY_UMAT, _InputArray_STD_VECTOR_UMAT, _INPUT_ARRAY_STD_VECTOR_UMAT, _InputArray_STD_BOOL_VECTOR, _INPUT_ARRAY_STD_BOOL_VECTOR, _InputArray_STD_VECTOR_CUDA_GPU_MAT, _INPUT_ARRAY_STD_VECTOR_CUDA_GPU_MAT, _InputArray_STD_ARRAY, _INPUT_ARRAY_STD_ARRAY, _InputArray_STD_ARRAY_MAT, _INPUT_ARRAY_STD_ARRAY_MAT]""" + +_OutputArray_DEPTH_MASK_8U: int +_OUTPUT_ARRAY_DEPTH_MASK_8U: int +_OutputArray_DEPTH_MASK_8S: int +_OUTPUT_ARRAY_DEPTH_MASK_8S: int +_OutputArray_DEPTH_MASK_16U: int +_OUTPUT_ARRAY_DEPTH_MASK_16U: int +_OutputArray_DEPTH_MASK_16S: int +_OUTPUT_ARRAY_DEPTH_MASK_16S: int +_OutputArray_DEPTH_MASK_32S: int +_OUTPUT_ARRAY_DEPTH_MASK_32S: int +_OutputArray_DEPTH_MASK_32F: int +_OUTPUT_ARRAY_DEPTH_MASK_32F: int +_OutputArray_DEPTH_MASK_64F: int +_OUTPUT_ARRAY_DEPTH_MASK_64F: int +_OutputArray_DEPTH_MASK_16F: int +_OUTPUT_ARRAY_DEPTH_MASK_16F: int +_OutputArray_DEPTH_MASK_ALL: int +_OUTPUT_ARRAY_DEPTH_MASK_ALL: int +_OutputArray_DEPTH_MASK_ALL_BUT_8S: int +_OUTPUT_ARRAY_DEPTH_MASK_ALL_BUT_8S: int +_OutputArray_DEPTH_MASK_ALL_16F: int +_OUTPUT_ARRAY_DEPTH_MASK_ALL_16F: int +_OutputArray_DEPTH_MASK_FLT: int +_OUTPUT_ARRAY_DEPTH_MASK_FLT: int +_OutputArray_DepthMask = int +"""One of [_OutputArray_DEPTH_MASK_8U, _OUTPUT_ARRAY_DEPTH_MASK_8U, _OutputArray_DEPTH_MASK_8S, _OUTPUT_ARRAY_DEPTH_MASK_8S, _OutputArray_DEPTH_MASK_16U, _OUTPUT_ARRAY_DEPTH_MASK_16U, _OutputArray_DEPTH_MASK_16S, _OUTPUT_ARRAY_DEPTH_MASK_16S, _OutputArray_DEPTH_MASK_32S, _OUTPUT_ARRAY_DEPTH_MASK_32S, _OutputArray_DEPTH_MASK_32F, _OUTPUT_ARRAY_DEPTH_MASK_32F, _OutputArray_DEPTH_MASK_64F, _OUTPUT_ARRAY_DEPTH_MASK_64F, _OutputArray_DEPTH_MASK_16F, _OUTPUT_ARRAY_DEPTH_MASK_16F, _OutputArray_DEPTH_MASK_ALL, _OUTPUT_ARRAY_DEPTH_MASK_ALL, _OutputArray_DEPTH_MASK_ALL_BUT_8S, _OUTPUT_ARRAY_DEPTH_MASK_ALL_BUT_8S, _OutputArray_DEPTH_MASK_ALL_16F, _OUTPUT_ARRAY_DEPTH_MASK_ALL_16F, _OutputArray_DEPTH_MASK_FLT, _OUTPUT_ARRAY_DEPTH_MASK_FLT]""" + +UMatData_COPY_ON_MAP: int +UMAT_DATA_COPY_ON_MAP: int +UMatData_HOST_COPY_OBSOLETE: int +UMAT_DATA_HOST_COPY_OBSOLETE: int +UMatData_DEVICE_COPY_OBSOLETE: int +UMAT_DATA_DEVICE_COPY_OBSOLETE: int +UMatData_TEMP_UMAT: int +UMAT_DATA_TEMP_UMAT: int +UMatData_TEMP_COPIED_UMAT: int +UMAT_DATA_TEMP_COPIED_UMAT: int +UMatData_USER_ALLOCATED: int +UMAT_DATA_USER_ALLOCATED: int +UMatData_DEVICE_MEM_MAPPED: int +UMAT_DATA_DEVICE_MEM_MAPPED: int +UMatData_ASYNC_CLEANUP: int +UMAT_DATA_ASYNC_CLEANUP: int +UMatData_MemoryFlag = int +"""One of [UMatData_COPY_ON_MAP, UMAT_DATA_COPY_ON_MAP, UMatData_HOST_COPY_OBSOLETE, UMAT_DATA_HOST_COPY_OBSOLETE, UMatData_DEVICE_COPY_OBSOLETE, UMAT_DATA_DEVICE_COPY_OBSOLETE, UMatData_TEMP_UMAT, UMAT_DATA_TEMP_UMAT, UMatData_TEMP_COPIED_UMAT, UMAT_DATA_TEMP_COPIED_UMAT, UMatData_USER_ALLOCATED, UMAT_DATA_USER_ALLOCATED, UMatData_DEVICE_MEM_MAPPED, UMAT_DATA_DEVICE_MEM_MAPPED, UMatData_ASYNC_CLEANUP, UMAT_DATA_ASYNC_CLEANUP]""" + +Mat_MAGIC_VAL: int +MAT_MAGIC_VAL: int +Mat_AUTO_STEP: int +MAT_AUTO_STEP: int +Mat_CONTINUOUS_FLAG: int +MAT_CONTINUOUS_FLAG: int +Mat_SUBMATRIX_FLAG: int +MAT_SUBMATRIX_FLAG: int +Mat_MAGIC_MASK: int +MAT_MAGIC_MASK: int +Mat_TYPE_MASK: int +MAT_TYPE_MASK: int +Mat_DEPTH_MASK: int +MAT_DEPTH_MASK: int + +SparseMat_MAGIC_VAL: int +SPARSE_MAT_MAGIC_VAL: int +SparseMat_MAX_DIM: int +SPARSE_MAT_MAX_DIM: int +SparseMat_HASH_SCALE: int +SPARSE_MAT_HASH_SCALE: int +SparseMat_HASH_BIT: int +SPARSE_MAT_HASH_BIT: int + +QuatEnum_INT_XYZ: int +QUAT_ENUM_INT_XYZ: int +QuatEnum_INT_XZY: int +QUAT_ENUM_INT_XZY: int +QuatEnum_INT_YXZ: int +QUAT_ENUM_INT_YXZ: int +QuatEnum_INT_YZX: int +QUAT_ENUM_INT_YZX: int +QuatEnum_INT_ZXY: int +QUAT_ENUM_INT_ZXY: int +QuatEnum_INT_ZYX: int +QUAT_ENUM_INT_ZYX: int +QuatEnum_INT_XYX: int +QUAT_ENUM_INT_XYX: int +QuatEnum_INT_XZX: int +QUAT_ENUM_INT_XZX: int +QuatEnum_INT_YXY: int +QUAT_ENUM_INT_YXY: int +QuatEnum_INT_YZY: int +QUAT_ENUM_INT_YZY: int +QuatEnum_INT_ZXZ: int +QUAT_ENUM_INT_ZXZ: int +QuatEnum_INT_ZYZ: int +QUAT_ENUM_INT_ZYZ: int +QuatEnum_EXT_XYZ: int +QUAT_ENUM_EXT_XYZ: int +QuatEnum_EXT_XZY: int +QUAT_ENUM_EXT_XZY: int +QuatEnum_EXT_YXZ: int +QUAT_ENUM_EXT_YXZ: int +QuatEnum_EXT_YZX: int +QUAT_ENUM_EXT_YZX: int +QuatEnum_EXT_ZXY: int +QUAT_ENUM_EXT_ZXY: int +QuatEnum_EXT_ZYX: int +QUAT_ENUM_EXT_ZYX: int +QuatEnum_EXT_XYX: int +QUAT_ENUM_EXT_XYX: int +QuatEnum_EXT_XZX: int +QUAT_ENUM_EXT_XZX: int +QuatEnum_EXT_YXY: int +QUAT_ENUM_EXT_YXY: int +QuatEnum_EXT_YZY: int +QUAT_ENUM_EXT_YZY: int +QuatEnum_EXT_ZXZ: int +QUAT_ENUM_EXT_ZXZ: int +QuatEnum_EXT_ZYZ: int +QUAT_ENUM_EXT_ZYZ: int +QuatEnum_EULER_ANGLES_MAX_VALUE: int +QUAT_ENUM_EULER_ANGLES_MAX_VALUE: int +QuatEnum_EulerAnglesType = int +"""One of [QuatEnum_INT_XYZ, QUAT_ENUM_INT_XYZ, QuatEnum_INT_XZY, QUAT_ENUM_INT_XZY, QuatEnum_INT_YXZ, QUAT_ENUM_INT_YXZ, QuatEnum_INT_YZX, QUAT_ENUM_INT_YZX, QuatEnum_INT_ZXY, QUAT_ENUM_INT_ZXY, QuatEnum_INT_ZYX, QUAT_ENUM_INT_ZYX, QuatEnum_INT_XYX, QUAT_ENUM_INT_XYX, QuatEnum_INT_XZX, QUAT_ENUM_INT_XZX, QuatEnum_INT_YXY, QUAT_ENUM_INT_YXY, QuatEnum_INT_YZY, QUAT_ENUM_INT_YZY, QuatEnum_INT_ZXZ, QUAT_ENUM_INT_ZXZ, QuatEnum_INT_ZYZ, QUAT_ENUM_INT_ZYZ, QuatEnum_EXT_XYZ, QUAT_ENUM_EXT_XYZ, QuatEnum_EXT_XZY, QUAT_ENUM_EXT_XZY, QuatEnum_EXT_YXZ, QUAT_ENUM_EXT_YXZ, QuatEnum_EXT_YZX, QUAT_ENUM_EXT_YZX, QuatEnum_EXT_ZXY, QUAT_ENUM_EXT_ZXY, QuatEnum_EXT_ZYX, QUAT_ENUM_EXT_ZYX, QuatEnum_EXT_XYX, QUAT_ENUM_EXT_XYX, QuatEnum_EXT_XZX, QUAT_ENUM_EXT_XZX, QuatEnum_EXT_YXY, QUAT_ENUM_EXT_YXY, QuatEnum_EXT_YZY, QUAT_ENUM_EXT_YZY, QuatEnum_EXT_ZXZ, QUAT_ENUM_EXT_ZXZ, QuatEnum_EXT_ZYZ, QUAT_ENUM_EXT_ZYZ, QuatEnum_EULER_ANGLES_MAX_VALUE, QUAT_ENUM_EULER_ANGLES_MAX_VALUE]""" + +TermCriteria_COUNT: int +TERM_CRITERIA_COUNT: int +TermCriteria_MAX_ITER: int +TERM_CRITERIA_MAX_ITER: int +TermCriteria_EPS: int +TERM_CRITERIA_EPS: int +TermCriteria_Type = int +"""One of [TermCriteria_COUNT, TERM_CRITERIA_COUNT, TermCriteria_MAX_ITER, TERM_CRITERIA_MAX_ITER, TermCriteria_EPS, TERM_CRITERIA_EPS]""" + +GFluidKernel_Kind_Filter: int +GFLUID_KERNEL_KIND_FILTER: int +GFluidKernel_Kind_Resize: int +GFLUID_KERNEL_KIND_RESIZE: int +GFluidKernel_Kind_YUV420toRGB: int +GFLUID_KERNEL_KIND_YUV420TO_RGB: int +GFluidKernel_Kind = int +"""One of [GFluidKernel_Kind_Filter, GFLUID_KERNEL_KIND_FILTER, GFluidKernel_Kind_Resize, GFLUID_KERNEL_KIND_RESIZE, GFluidKernel_Kind_YUV420toRGB, GFLUID_KERNEL_KIND_YUV420TO_RGB]""" + +MediaFrame_Access_R: int +MEDIA_FRAME_ACCESS_R: int +MediaFrame_Access_W: int +MEDIA_FRAME_ACCESS_W: int +MediaFrame_Access = int +"""One of [MediaFrame_Access_R, MEDIA_FRAME_ACCESS_R, MediaFrame_Access_W, MEDIA_FRAME_ACCESS_W]""" + +RMat_Access_R: int +RMAT_ACCESS_R: int +RMat_Access_W: int +RMAT_ACCESS_W: int +RMat_Access = int +"""One of [RMat_Access_R, RMAT_ACCESS_R, RMat_Access_W, RMAT_ACCESS_W]""" + + +# Constants +CV_8U: int +CV_8UC1: int +CV_8UC2: int +CV_8UC3: int +CV_8UC4: int +CV_8S: int +CV_8SC1: int +CV_8SC2: int +CV_8SC3: int +CV_8SC4: int +CV_16U: int +CV_16UC1: int +CV_16UC2: int +CV_16UC3: int +CV_16UC4: int +CV_16S: int +CV_16SC1: int +CV_16SC2: int +CV_16SC3: int +CV_16SC4: int +CV_32S: int +CV_32SC1: int +CV_32SC2: int +CV_32SC3: int +CV_32SC4: int +CV_32F: int +CV_32FC1: int +CV_32FC2: int +CV_32FC3: int +CV_32FC4: int +CV_64F: int +CV_64FC1: int +CV_64FC2: int +CV_64FC3: int +CV_64FC4: int +CV_16F: int +CV_16FC1: int +CV_16FC2: int +CV_16FC3: int +CV_16FC4: int +__version__: str + +# Classes +class Algorithm: + # Functions + def clear(self) -> None: ... + + @_typing.overload + def write(self, fs: FileStorage) -> None: ... + @_typing.overload + def write(self, fs: FileStorage, name: str) -> None: ... + + def read(self, fn: FileNode) -> None: ... + + def empty(self) -> bool: ... + + def save(self, filename: str) -> None: ... + + def getDefaultName(self) -> str: ... + + +class AsyncArray: + # Functions + def __init__(self) -> None: ... + + def release(self) -> None: ... + + @_typing.overload + def get(self, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def get(self, dst: UMat | None = ...) -> UMat: ... + @_typing.overload + def get(self, timeoutNs: float, dst: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ... + @_typing.overload + def get(self, timeoutNs: float, dst: UMat | None = ...) -> tuple[bool, UMat]: ... + + def wait_for(self, timeoutNs: float) -> bool: ... + + def valid(self) -> bool: ... + + +class FileStorage: + # Functions + @_typing.overload + def __init__(self) -> None: ... + @_typing.overload + def __init__(self, filename: str, flags: int, encoding: str = ...) -> None: ... + + def open(self, filename: str, flags: int, encoding: str = ...) -> bool: ... + + def isOpened(self) -> bool: ... + + def release(self) -> None: ... + + def releaseAndGetString(self) -> str: ... + + def getFirstTopLevelNode(self) -> FileNode: ... + + def root(self, streamidx: int = ...) -> FileNode: ... + + def getNode(self, nodename: str) -> FileNode: ... + + @_typing.overload + def write(self, name: str, val: int) -> None: ... + @_typing.overload + def write(self, name: str, val: float) -> None: ... + @_typing.overload + def write(self, name: str, val: str) -> None: ... + @_typing.overload + def write(self, name: str, val: cv2.typing.MatLike) -> None: ... + @_typing.overload + def write(self, name: str, val: _typing.Sequence[str]) -> None: ... + + def writeComment(self, comment: str, append: bool = ...) -> None: ... + + def startWriteStruct(self, name: str, flags: int, typeName: str = ...) -> None: ... + + def endWriteStruct(self) -> None: ... + + def getFormat(self) -> int: ... + + +class FileNode: + # Functions + def __init__(self) -> None: ... + + def getNode(self, nodename: str) -> FileNode: ... + + def at(self, i: int) -> FileNode: ... + + def keys(self) -> _typing.Sequence[str]: ... + + def type(self) -> int: ... + + def empty(self) -> bool: ... + + def isNone(self) -> bool: ... + + def isSeq(self) -> bool: ... + + def isMap(self) -> bool: ... + + def isInt(self) -> bool: ... + + def isReal(self) -> bool: ... + + def isString(self) -> bool: ... + + def isNamed(self) -> bool: ... + + def name(self) -> str: ... + + def size(self) -> int: ... + + def rawSize(self) -> int: ... + + def real(self) -> float: ... + + def string(self) -> str: ... + + def mat(self) -> cv2.typing.MatLike: ... + + +class RotatedRect: + center: cv2.typing.Point2f + size: cv2.typing.Size2f + angle: float + + # Functions + @_typing.overload + def __init__(self) -> None: ... + @_typing.overload + def __init__(self, center: cv2.typing.Point2f, size: cv2.typing.Size2f, angle: float) -> None: ... + @_typing.overload + def __init__(self, point1: cv2.typing.Point2f, point2: cv2.typing.Point2f, point3: cv2.typing.Point2f) -> None: ... + + def points(self) -> _typing.Sequence[cv2.typing.Point2f]: ... + + def boundingRect(self) -> cv2.typing.Rect: ... + + def boundingRect2f(self) -> cv2.typing.Rect2f: ... + + +class KeyPoint: + pt: cv2.typing.Point2f + size: float + angle: float + response: float + octave: int + class_id: int + + # Functions + @_typing.overload + def __init__(self) -> None: ... + @_typing.overload + def __init__(self, x: float, y: float, size: float, angle: float = ..., response: float = ..., octave: int = ..., class_id: int = ...) -> None: ... + + @staticmethod + @_typing.overload + def convert(keypoints: _typing.Sequence[KeyPoint], keypointIndexes: _typing.Sequence[int] = ...) -> _typing.Sequence[cv2.typing.Point2f]: ... + @staticmethod + @_typing.overload + def convert(points2f: _typing.Sequence[cv2.typing.Point2f], size: float = ..., response: float = ..., octave: int = ..., class_id: int = ...) -> _typing.Sequence[KeyPoint]: ... + + @staticmethod + def overlap(kp1: KeyPoint, kp2: KeyPoint) -> float: ... + + +class DMatch: + queryIdx: int + trainIdx: int + imgIdx: int + distance: float + + # Functions + @_typing.overload + def __init__(self) -> None: ... + @_typing.overload + def __init__(self, _queryIdx: int, _trainIdx: int, _distance: float) -> None: ... + @_typing.overload + def __init__(self, _queryIdx: int, _trainIdx: int, _imgIdx: int, _distance: float) -> None: ... + + +class TickMeter: + # Functions + def __init__(self) -> None: ... + + def start(self) -> None: ... + + def stop(self) -> None: ... + + def getTimeTicks(self) -> int: ... + + def getTimeMicro(self) -> float: ... + + def getTimeMilli(self) -> float: ... + + def getTimeSec(self) -> float: ... + + def getCounter(self) -> int: ... + + def getFPS(self) -> float: ... + + def getAvgTimeSec(self) -> float: ... + + def getAvgTimeMilli(self) -> float: ... + + def reset(self) -> None: ... + + +class UMat: + offset: int + + # Functions + @_typing.overload + def __init__(self, usageFlags: UMatUsageFlags = ...) -> None: ... + @_typing.overload + def __init__(self, rows: int, cols: int, type: int, usageFlags: UMatUsageFlags = ...) -> None: ... + @_typing.overload + def __init__(self, size: cv2.typing.Size, type: int, usageFlags: UMatUsageFlags = ...) -> None: ... + @_typing.overload + def __init__(self, rows: int, cols: int, type: int, s: cv2.typing.Scalar, usageFlags: UMatUsageFlags = ...) -> None: ... + @_typing.overload + def __init__(self, size: cv2.typing.Size, type: int, s: cv2.typing.Scalar, usageFlags: UMatUsageFlags = ...) -> None: ... + @_typing.overload + def __init__(self, m: UMat) -> None: ... + @_typing.overload + def __init__(self, m: UMat, rowRange: cv2.typing.Range, colRange: cv2.typing.Range = ...) -> None: ... + @_typing.overload + def __init__(self, m: UMat, roi: cv2.typing.Rect) -> None: ... + @_typing.overload + def __init__(self, m: UMat, ranges: _typing.Sequence[cv2.typing.Range]) -> None: ... + + @staticmethod + def queue() -> cv2.typing.IntPointer: ... + + @staticmethod + def context() -> cv2.typing.IntPointer: ... + + def get(self) -> cv2.typing.MatLike: ... + + def isContinuous(self) -> bool: ... + + def isSubmatrix(self) -> bool: ... + + def handle(self, accessFlags: AccessFlag) -> cv2.typing.IntPointer: ... + + +class GeneralizedHough(Algorithm): + # Functions + @_typing.overload + def setTemplate(self, templ: cv2.typing.MatLike, templCenter: cv2.typing.Point = ...) -> None: ... + @_typing.overload + def setTemplate(self, templ: UMat, templCenter: cv2.typing.Point = ...) -> None: ... + @_typing.overload + def setTemplate(self, edges: cv2.typing.MatLike, dx: cv2.typing.MatLike, dy: cv2.typing.MatLike, templCenter: cv2.typing.Point = ...) -> None: ... + @_typing.overload + def setTemplate(self, edges: UMat, dx: UMat, dy: UMat, templCenter: cv2.typing.Point = ...) -> None: ... + + @_typing.overload + def detect(self, image: cv2.typing.MatLike, positions: cv2.typing.MatLike | None = ..., votes: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... + @_typing.overload + def detect(self, image: UMat, positions: UMat | None = ..., votes: UMat | None = ...) -> tuple[UMat, UMat]: ... + @_typing.overload + def detect(self, edges: cv2.typing.MatLike, dx: cv2.typing.MatLike, dy: cv2.typing.MatLike, positions: cv2.typing.MatLike | None = ..., votes: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... + @_typing.overload + def detect(self, edges: UMat, dx: UMat, dy: UMat, positions: UMat | None = ..., votes: UMat | None = ...) -> tuple[UMat, UMat]: ... + + def setCannyLowThresh(self, cannyLowThresh: int) -> None: ... + + def getCannyLowThresh(self) -> int: ... + + def setCannyHighThresh(self, cannyHighThresh: int) -> None: ... + + def getCannyHighThresh(self) -> int: ... + + def setMinDist(self, minDist: float) -> None: ... + + def getMinDist(self) -> float: ... + + def setDp(self, dp: float) -> None: ... + + def getDp(self) -> float: ... + + def setMaxBufferSize(self, maxBufferSize: int) -> None: ... + + def getMaxBufferSize(self) -> int: ... + + +class GeneralizedHoughBallard(GeneralizedHough): + # Functions + def setLevels(self, levels: int) -> None: ... + + def getLevels(self) -> int: ... + + def setVotesThreshold(self, votesThreshold: int) -> None: ... + + def getVotesThreshold(self) -> int: ... + + +class GeneralizedHoughGuil(GeneralizedHough): + # Functions + def setXi(self, xi: float) -> None: ... + + def getXi(self) -> float: ... + + def setLevels(self, levels: int) -> None: ... + + def getLevels(self) -> int: ... + + def setAngleEpsilon(self, angleEpsilon: float) -> None: ... + + def getAngleEpsilon(self) -> float: ... + + def setMinAngle(self, minAngle: float) -> None: ... + + def getMinAngle(self) -> float: ... + + def setMaxAngle(self, maxAngle: float) -> None: ... + + def getMaxAngle(self) -> float: ... + + def setAngleStep(self, angleStep: float) -> None: ... + + def getAngleStep(self) -> float: ... + + def setAngleThresh(self, angleThresh: int) -> None: ... + + def getAngleThresh(self) -> int: ... + + def setMinScale(self, minScale: float) -> None: ... + + def getMinScale(self) -> float: ... + + def setMaxScale(self, maxScale: float) -> None: ... + + def getMaxScale(self) -> float: ... + + def setScaleStep(self, scaleStep: float) -> None: ... + + def getScaleStep(self) -> float: ... + + def setScaleThresh(self, scaleThresh: int) -> None: ... + + def getScaleThresh(self) -> int: ... + + def setPosThresh(self, posThresh: int) -> None: ... + + def getPosThresh(self) -> int: ... + + +class CLAHE(Algorithm): + # Functions + @_typing.overload + def apply(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def apply(self, src: UMat, dst: UMat | None = ...) -> UMat: ... + + def setClipLimit(self, clipLimit: float) -> None: ... + + def getClipLimit(self) -> float: ... + + def setTilesGridSize(self, tileGridSize: cv2.typing.Size) -> None: ... + + def getTilesGridSize(self) -> cv2.typing.Size: ... + + def collectGarbage(self) -> None: ... + + +class Subdiv2D: + # Functions + @_typing.overload + def __init__(self) -> None: ... + @_typing.overload + def __init__(self, rect: cv2.typing.Rect) -> None: ... + + def initDelaunay(self, rect: cv2.typing.Rect) -> None: ... + + @_typing.overload + def insert(self, pt: cv2.typing.Point2f) -> int: ... + @_typing.overload + def insert(self, ptvec: _typing.Sequence[cv2.typing.Point2f]) -> None: ... + + def locate(self, pt: cv2.typing.Point2f) -> tuple[int, int, int]: ... + + def findNearest(self, pt: cv2.typing.Point2f) -> tuple[int, cv2.typing.Point2f]: ... + + def getEdgeList(self) -> _typing.Sequence[cv2.typing.Vec4f]: ... + + def getLeadingEdgeList(self) -> _typing.Sequence[int]: ... + + def getTriangleList(self) -> _typing.Sequence[cv2.typing.Vec6f]: ... + + def getVoronoiFacetList(self, idx: _typing.Sequence[int]) -> tuple[_typing.Sequence[_typing.Sequence[cv2.typing.Point2f]], _typing.Sequence[cv2.typing.Point2f]]: ... + + def getVertex(self, vertex: int) -> tuple[cv2.typing.Point2f, int]: ... + + def getEdge(self, edge: int, nextEdgeType: int) -> int: ... + + def nextEdge(self, edge: int) -> int: ... + + def rotateEdge(self, edge: int, rotate: int) -> int: ... + + def symEdge(self, edge: int) -> int: ... + + def edgeOrg(self, edge: int) -> tuple[int, cv2.typing.Point2f]: ... + + def edgeDst(self, edge: int) -> tuple[int, cv2.typing.Point2f]: ... + + +class LineSegmentDetector(Algorithm): + # Functions + @_typing.overload + def detect(self, image: cv2.typing.MatLike, lines: cv2.typing.MatLike | None = ..., width: cv2.typing.MatLike | None = ..., prec: cv2.typing.MatLike | None = ..., nfa: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... + @_typing.overload + def detect(self, image: UMat, lines: UMat | None = ..., width: UMat | None = ..., prec: UMat | None = ..., nfa: UMat | None = ...) -> tuple[UMat, UMat, UMat, UMat]: ... + + @_typing.overload + def drawSegments(self, image: cv2.typing.MatLike, lines: cv2.typing.MatLike) -> cv2.typing.MatLike: ... + @_typing.overload + def drawSegments(self, image: UMat, lines: UMat) -> UMat: ... + + @_typing.overload + def compareSegments(self, size: cv2.typing.Size, lines1: cv2.typing.MatLike, lines2: cv2.typing.MatLike, image: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ... + @_typing.overload + def compareSegments(self, size: cv2.typing.Size, lines1: UMat, lines2: UMat, image: UMat | None = ...) -> tuple[int, UMat]: ... + + +class Tonemap(Algorithm): + # Functions + @_typing.overload + def process(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def process(self, src: UMat, dst: UMat | None = ...) -> UMat: ... + + def getGamma(self) -> float: ... + + def setGamma(self, gamma: float) -> None: ... + + +class TonemapDrago(Tonemap): + # Functions + def getSaturation(self) -> float: ... + + def setSaturation(self, saturation: float) -> None: ... + + def getBias(self) -> float: ... + + def setBias(self, bias: float) -> None: ... + + +class TonemapReinhard(Tonemap): + # Functions + def getIntensity(self) -> float: ... + + def setIntensity(self, intensity: float) -> None: ... + + def getLightAdaptation(self) -> float: ... + + def setLightAdaptation(self, light_adapt: float) -> None: ... + + def getColorAdaptation(self) -> float: ... + + def setColorAdaptation(self, color_adapt: float) -> None: ... + + +class TonemapMantiuk(Tonemap): + # Functions + def getScale(self) -> float: ... + + def setScale(self, scale: float) -> None: ... + + def getSaturation(self) -> float: ... + + def setSaturation(self, saturation: float) -> None: ... + + +class AlignExposures(Algorithm): + # Functions + @_typing.overload + def process(self, src: _typing.Sequence[cv2.typing.MatLike], dst: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, response: cv2.typing.MatLike) -> None: ... + @_typing.overload + def process(self, src: _typing.Sequence[UMat], dst: _typing.Sequence[cv2.typing.MatLike], times: UMat, response: UMat) -> None: ... + + +class AlignMTB(AlignExposures): + # Functions + @_typing.overload + def process(self, src: _typing.Sequence[cv2.typing.MatLike], dst: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, response: cv2.typing.MatLike) -> None: ... + @_typing.overload + def process(self, src: _typing.Sequence[UMat], dst: _typing.Sequence[cv2.typing.MatLike], times: UMat, response: UMat) -> None: ... + @_typing.overload + def process(self, src: _typing.Sequence[cv2.typing.MatLike], dst: _typing.Sequence[cv2.typing.MatLike]) -> None: ... + @_typing.overload + def process(self, src: _typing.Sequence[UMat], dst: _typing.Sequence[cv2.typing.MatLike]) -> None: ... + + @_typing.overload + def calculateShift(self, img0: cv2.typing.MatLike, img1: cv2.typing.MatLike) -> cv2.typing.Point: ... + @_typing.overload + def calculateShift(self, img0: UMat, img1: UMat) -> cv2.typing.Point: ... + + @_typing.overload + def shiftMat(self, src: cv2.typing.MatLike, shift: cv2.typing.Point, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def shiftMat(self, src: UMat, shift: cv2.typing.Point, dst: UMat | None = ...) -> UMat: ... + + @_typing.overload + def computeBitmaps(self, img: cv2.typing.MatLike, tb: cv2.typing.MatLike | None = ..., eb: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... + @_typing.overload + def computeBitmaps(self, img: UMat, tb: UMat | None = ..., eb: UMat | None = ...) -> tuple[UMat, UMat]: ... + + def getMaxBits(self) -> int: ... + + def setMaxBits(self, max_bits: int) -> None: ... + + def getExcludeRange(self) -> int: ... + + def setExcludeRange(self, exclude_range: int) -> None: ... + + def getCut(self) -> bool: ... + + def setCut(self, value: bool) -> None: ... + + +class CalibrateCRF(Algorithm): + # Functions + @_typing.overload + def process(self, src: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def process(self, src: _typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ... + + +class CalibrateDebevec(CalibrateCRF): + # Functions + def getLambda(self) -> float: ... + + def setLambda(self, lambda_: float) -> None: ... + + def getSamples(self) -> int: ... + + def setSamples(self, samples: int) -> None: ... + + def getRandom(self) -> bool: ... + + def setRandom(self, random: bool) -> None: ... + + +class CalibrateRobertson(CalibrateCRF): + # Functions + def getMaxIter(self) -> int: ... + + def setMaxIter(self, max_iter: int) -> None: ... + + def getThreshold(self) -> float: ... + + def setThreshold(self, threshold: float) -> None: ... + + def getRadiance(self) -> cv2.typing.MatLike: ... + + +class MergeExposures(Algorithm): + # Functions + @_typing.overload + def process(self, src: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, response: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def process(self, src: _typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... + + +class MergeDebevec(MergeExposures): + # Functions + @_typing.overload + def process(self, src: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, response: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def process(self, src: _typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... + @_typing.overload + def process(self, src: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def process(self, src: _typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ... + + +class MergeMertens(MergeExposures): + # Functions + @_typing.overload + def process(self, src: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, response: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def process(self, src: _typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... + @_typing.overload + def process(self, src: _typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def process(self, src: _typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... + + def getContrastWeight(self) -> float: ... + + def setContrastWeight(self, contrast_weiht: float) -> None: ... + + def getSaturationWeight(self) -> float: ... + + def setSaturationWeight(self, saturation_weight: float) -> None: ... + + def getExposureWeight(self) -> float: ... + + def setExposureWeight(self, exposure_weight: float) -> None: ... + + +class MergeRobertson(MergeExposures): + # Functions + @_typing.overload + def process(self, src: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, response: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def process(self, src: _typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... + @_typing.overload + def process(self, src: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def process(self, src: _typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ... + + +class Feature2D: + # Functions + @_typing.overload + def detect(self, image: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> _typing.Sequence[KeyPoint]: ... + @_typing.overload + def detect(self, image: UMat, mask: UMat | None = ...) -> _typing.Sequence[KeyPoint]: ... + @_typing.overload + def detect(self, images: _typing.Sequence[cv2.typing.MatLike], masks: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[_typing.Sequence[KeyPoint]]: ... + @_typing.overload + def detect(self, images: _typing.Sequence[UMat], masks: _typing.Sequence[UMat] | None = ...) -> _typing.Sequence[_typing.Sequence[KeyPoint]]: ... + + @_typing.overload + def compute(self, image: cv2.typing.MatLike, keypoints: _typing.Sequence[KeyPoint], descriptors: cv2.typing.MatLike | None = ...) -> tuple[_typing.Sequence[KeyPoint], cv2.typing.MatLike]: ... + @_typing.overload + def compute(self, image: UMat, keypoints: _typing.Sequence[KeyPoint], descriptors: UMat | None = ...) -> tuple[_typing.Sequence[KeyPoint], UMat]: ... + @_typing.overload + def compute(self, images: _typing.Sequence[cv2.typing.MatLike], keypoints: _typing.Sequence[_typing.Sequence[KeyPoint]], descriptors: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[_typing.Sequence[_typing.Sequence[KeyPoint]], _typing.Sequence[cv2.typing.MatLike]]: ... + @_typing.overload + def compute(self, images: _typing.Sequence[UMat], keypoints: _typing.Sequence[_typing.Sequence[KeyPoint]], descriptors: _typing.Sequence[UMat] | None = ...) -> tuple[_typing.Sequence[_typing.Sequence[KeyPoint]], _typing.Sequence[UMat]]: ... + + @_typing.overload + def detectAndCompute(self, image: cv2.typing.MatLike, mask: cv2.typing.MatLike, descriptors: cv2.typing.MatLike | None = ..., useProvidedKeypoints: bool = ...) -> tuple[_typing.Sequence[KeyPoint], cv2.typing.MatLike]: ... + @_typing.overload + def detectAndCompute(self, image: UMat, mask: UMat, descriptors: UMat | None = ..., useProvidedKeypoints: bool = ...) -> tuple[_typing.Sequence[KeyPoint], UMat]: ... + + def descriptorSize(self) -> int: ... + + def descriptorType(self) -> int: ... + + def defaultNorm(self) -> int: ... + + @_typing.overload + def write(self, fileName: str) -> None: ... + @_typing.overload + def write(self, fs: FileStorage, name: str) -> None: ... + + @_typing.overload + def read(self, fileName: str) -> None: ... + @_typing.overload + def read(self, arg1: FileNode) -> None: ... + + def empty(self) -> bool: ... + + def getDefaultName(self) -> str: ... + + +class AffineFeature(Feature2D): + # Functions + @classmethod + def create(cls, backend: Feature2D, maxTilt: int = ..., minTilt: int = ..., tiltStep: float = ..., rotateStepBase: float = ...) -> AffineFeature: ... + + def setViewParams(self, tilts: _typing.Sequence[float], rolls: _typing.Sequence[float]) -> None: ... + + def getViewParams(self, tilts: _typing.Sequence[float], rolls: _typing.Sequence[float]) -> None: ... + + def getDefaultName(self) -> str: ... + + +class SIFT(Feature2D): + # Functions + @classmethod + @_typing.overload + def create(cls, nfeatures: int = ..., nOctaveLayers: int = ..., contrastThreshold: float = ..., edgeThreshold: float = ..., sigma: float = ..., enable_precise_upscale: bool = ...) -> SIFT: ... + @classmethod + @_typing.overload + def create(cls, nfeatures: int, nOctaveLayers: int, contrastThreshold: float, edgeThreshold: float, sigma: float, descriptorType: int, enable_precise_upscale: bool = ...) -> SIFT: ... + + def getDefaultName(self) -> str: ... + + def setNFeatures(self, maxFeatures: int) -> None: ... + + def getNFeatures(self) -> int: ... + + def setNOctaveLayers(self, nOctaveLayers: int) -> None: ... + + def getNOctaveLayers(self) -> int: ... + + def setContrastThreshold(self, contrastThreshold: float) -> None: ... + + def getContrastThreshold(self) -> float: ... + + def setEdgeThreshold(self, edgeThreshold: float) -> None: ... + + def getEdgeThreshold(self) -> float: ... + + def setSigma(self, sigma: float) -> None: ... + + def getSigma(self) -> float: ... + + +class BRISK(Feature2D): + # Functions + @classmethod + @_typing.overload + def create(cls, thresh: int = ..., octaves: int = ..., patternScale: float = ...) -> BRISK: ... + @classmethod + @_typing.overload + def create(cls, radiusList: _typing.Sequence[float], numberList: _typing.Sequence[int], dMax: float = ..., dMin: float = ..., indexChange: _typing.Sequence[int] = ...) -> BRISK: ... + @classmethod + @_typing.overload + def create(cls, thresh: int, octaves: int, radiusList: _typing.Sequence[float], numberList: _typing.Sequence[int], dMax: float = ..., dMin: float = ..., indexChange: _typing.Sequence[int] = ...) -> BRISK: ... + + def getDefaultName(self) -> str: ... + + def setThreshold(self, threshold: int) -> None: ... + + def getThreshold(self) -> int: ... + + def setOctaves(self, octaves: int) -> None: ... + + def getOctaves(self) -> int: ... + + def setPatternScale(self, patternScale: float) -> None: ... + + def getPatternScale(self) -> float: ... + + +class ORB(Feature2D): + # Functions + @classmethod + def create(cls, nfeatures: int = ..., scaleFactor: float = ..., nlevels: int = ..., edgeThreshold: int = ..., firstLevel: int = ..., WTA_K: int = ..., scoreType: ORB_ScoreType = ..., patchSize: int = ..., fastThreshold: int = ...) -> ORB: ... + + def setMaxFeatures(self, maxFeatures: int) -> None: ... + + def getMaxFeatures(self) -> int: ... + + def setScaleFactor(self, scaleFactor: float) -> None: ... + + def getScaleFactor(self) -> float: ... + + def setNLevels(self, nlevels: int) -> None: ... + + def getNLevels(self) -> int: ... + + def setEdgeThreshold(self, edgeThreshold: int) -> None: ... + + def getEdgeThreshold(self) -> int: ... + + def setFirstLevel(self, firstLevel: int) -> None: ... + + def getFirstLevel(self) -> int: ... + + def setWTA_K(self, wta_k: int) -> None: ... + + def getWTA_K(self) -> int: ... + + def setScoreType(self, scoreType: ORB_ScoreType) -> None: ... + + def getScoreType(self) -> ORB_ScoreType: ... + + def setPatchSize(self, patchSize: int) -> None: ... + + def getPatchSize(self) -> int: ... + + def setFastThreshold(self, fastThreshold: int) -> None: ... + + def getFastThreshold(self) -> int: ... + + def getDefaultName(self) -> str: ... + + +class MSER(Feature2D): + # Functions + @classmethod + def create(cls, delta: int = ..., min_area: int = ..., max_area: int = ..., max_variation: float = ..., min_diversity: float = ..., max_evolution: int = ..., area_threshold: float = ..., min_margin: float = ..., edge_blur_size: int = ...) -> MSER: ... + + @_typing.overload + def detectRegions(self, image: cv2.typing.MatLike) -> tuple[_typing.Sequence[_typing.Sequence[cv2.typing.Point]], _typing.Sequence[cv2.typing.Rect]]: ... + @_typing.overload + def detectRegions(self, image: UMat) -> tuple[_typing.Sequence[_typing.Sequence[cv2.typing.Point]], _typing.Sequence[cv2.typing.Rect]]: ... + + def setDelta(self, delta: int) -> None: ... + + def getDelta(self) -> int: ... + + def setMinArea(self, minArea: int) -> None: ... + + def getMinArea(self) -> int: ... + + def setMaxArea(self, maxArea: int) -> None: ... + + def getMaxArea(self) -> int: ... + + def setMaxVariation(self, maxVariation: float) -> None: ... + + def getMaxVariation(self) -> float: ... + + def setMinDiversity(self, minDiversity: float) -> None: ... + + def getMinDiversity(self) -> float: ... + + def setMaxEvolution(self, maxEvolution: int) -> None: ... + + def getMaxEvolution(self) -> int: ... + + def setAreaThreshold(self, areaThreshold: float) -> None: ... + + def getAreaThreshold(self) -> float: ... + + def setMinMargin(self, min_margin: float) -> None: ... + + def getMinMargin(self) -> float: ... + + def setEdgeBlurSize(self, edge_blur_size: int) -> None: ... + + def getEdgeBlurSize(self) -> int: ... + + def setPass2Only(self, f: bool) -> None: ... + + def getPass2Only(self) -> bool: ... + + def getDefaultName(self) -> str: ... + + +class FastFeatureDetector(Feature2D): + # Functions + @classmethod + def create(cls, threshold: int = ..., nonmaxSuppression: bool = ..., type: FastFeatureDetector_DetectorType = ...) -> FastFeatureDetector: ... + + def setThreshold(self, threshold: int) -> None: ... + + def getThreshold(self) -> int: ... + + def setNonmaxSuppression(self, f: bool) -> None: ... + + def getNonmaxSuppression(self) -> bool: ... + + def setType(self, type: FastFeatureDetector_DetectorType) -> None: ... + + def getType(self) -> FastFeatureDetector_DetectorType: ... + + def getDefaultName(self) -> str: ... + + +class AgastFeatureDetector(Feature2D): + # Functions + @classmethod + def create(cls, threshold: int = ..., nonmaxSuppression: bool = ..., type: AgastFeatureDetector_DetectorType = ...) -> AgastFeatureDetector: ... + + def setThreshold(self, threshold: int) -> None: ... + + def getThreshold(self) -> int: ... + + def setNonmaxSuppression(self, f: bool) -> None: ... + + def getNonmaxSuppression(self) -> bool: ... + + def setType(self, type: AgastFeatureDetector_DetectorType) -> None: ... + + def getType(self) -> AgastFeatureDetector_DetectorType: ... + + def getDefaultName(self) -> str: ... + + +class GFTTDetector(Feature2D): + # Functions + @classmethod + @_typing.overload + def create(cls, maxCorners: int = ..., qualityLevel: float = ..., minDistance: float = ..., blockSize: int = ..., useHarrisDetector: bool = ..., k: float = ...) -> GFTTDetector: ... + @classmethod + @_typing.overload + def create(cls, maxCorners: int, qualityLevel: float, minDistance: float, blockSize: int, gradiantSize: int, useHarrisDetector: bool = ..., k: float = ...) -> GFTTDetector: ... + + def setMaxFeatures(self, maxFeatures: int) -> None: ... + + def getMaxFeatures(self) -> int: ... + + def setQualityLevel(self, qlevel: float) -> None: ... + + def getQualityLevel(self) -> float: ... + + def setMinDistance(self, minDistance: float) -> None: ... + + def getMinDistance(self) -> float: ... + + def setBlockSize(self, blockSize: int) -> None: ... + + def getBlockSize(self) -> int: ... + + def setGradientSize(self, gradientSize_: int) -> None: ... + + def getGradientSize(self) -> int: ... + + def setHarrisDetector(self, val: bool) -> None: ... + + def getHarrisDetector(self) -> bool: ... + + def setK(self, k: float) -> None: ... + + def getK(self) -> float: ... + + def getDefaultName(self) -> str: ... + + +class SimpleBlobDetector(Feature2D): + # Classes + class Params: + thresholdStep: float + minThreshold: float + maxThreshold: float + minRepeatability: int + minDistBetweenBlobs: float + filterByColor: bool + blobColor: int + filterByArea: bool + minArea: float + maxArea: float + filterByCircularity: bool + minCircularity: float + maxCircularity: float + filterByInertia: bool + minInertiaRatio: float + maxInertiaRatio: float + filterByConvexity: bool + minConvexity: float + maxConvexity: float + collectContours: bool + + # Functions + def __init__(self) -> None: ... + + + + # Functions + @classmethod + def create(cls, parameters: SimpleBlobDetector.Params = ...) -> SimpleBlobDetector: ... + + def setParams(self, params: SimpleBlobDetector.Params) -> None: ... + + def getParams(self) -> SimpleBlobDetector.Params: ... + + def getDefaultName(self) -> str: ... + + def getBlobContours(self) -> _typing.Sequence[_typing.Sequence[cv2.typing.Point]]: ... + + +class KAZE(Feature2D): + # Functions + @classmethod + def create(cls, extended: bool = ..., upright: bool = ..., threshold: float = ..., nOctaves: int = ..., nOctaveLayers: int = ..., diffusivity: KAZE_DiffusivityType = ...) -> KAZE: ... + + def setExtended(self, extended: bool) -> None: ... + + def getExtended(self) -> bool: ... + + def setUpright(self, upright: bool) -> None: ... + + def getUpright(self) -> bool: ... + + def setThreshold(self, threshold: float) -> None: ... + + def getThreshold(self) -> float: ... + + def setNOctaves(self, octaves: int) -> None: ... + + def getNOctaves(self) -> int: ... + + def setNOctaveLayers(self, octaveLayers: int) -> None: ... + + def getNOctaveLayers(self) -> int: ... + + def setDiffusivity(self, diff: KAZE_DiffusivityType) -> None: ... + + def getDiffusivity(self) -> KAZE_DiffusivityType: ... + + def getDefaultName(self) -> str: ... + + +class AKAZE(Feature2D): + # Functions + @classmethod + def create(cls, descriptor_type: AKAZE_DescriptorType = ..., descriptor_size: int = ..., descriptor_channels: int = ..., threshold: float = ..., nOctaves: int = ..., nOctaveLayers: int = ..., diffusivity: KAZE_DiffusivityType = ..., max_points: int = ...) -> AKAZE: ... + + def setDescriptorType(self, dtype: AKAZE_DescriptorType) -> None: ... + + def getDescriptorType(self) -> AKAZE_DescriptorType: ... + + def setDescriptorSize(self, dsize: int) -> None: ... + + def getDescriptorSize(self) -> int: ... + + def setDescriptorChannels(self, dch: int) -> None: ... + + def getDescriptorChannels(self) -> int: ... + + def setThreshold(self, threshold: float) -> None: ... + + def getThreshold(self) -> float: ... + + def setNOctaves(self, octaves: int) -> None: ... + + def getNOctaves(self) -> int: ... + + def setNOctaveLayers(self, octaveLayers: int) -> None: ... + + def getNOctaveLayers(self) -> int: ... + + def setDiffusivity(self, diff: KAZE_DiffusivityType) -> None: ... + + def getDiffusivity(self) -> KAZE_DiffusivityType: ... + + def getDefaultName(self) -> str: ... + + def setMaxPoints(self, max_points: int) -> None: ... + + def getMaxPoints(self) -> int: ... + + +class DescriptorMatcher(Algorithm): + # Functions + @_typing.overload + def add(self, descriptors: _typing.Sequence[cv2.typing.MatLike]) -> None: ... + @_typing.overload + def add(self, descriptors: _typing.Sequence[UMat]) -> None: ... + + def getTrainDescriptors(self) -> _typing.Sequence[cv2.typing.MatLike]: ... + + def clear(self) -> None: ... + + def empty(self) -> bool: ... + + def isMaskSupported(self) -> bool: ... + + def train(self) -> None: ... + + @_typing.overload + def match(self, queryDescriptors: cv2.typing.MatLike, trainDescriptors: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> _typing.Sequence[DMatch]: ... + @_typing.overload + def match(self, queryDescriptors: UMat, trainDescriptors: UMat, mask: UMat | None = ...) -> _typing.Sequence[DMatch]: ... + @_typing.overload + def match(self, queryDescriptors: cv2.typing.MatLike, masks: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[DMatch]: ... + @_typing.overload + def match(self, queryDescriptors: UMat, masks: _typing.Sequence[UMat] | None = ...) -> _typing.Sequence[DMatch]: ... + + @_typing.overload + def knnMatch(self, queryDescriptors: cv2.typing.MatLike, trainDescriptors: cv2.typing.MatLike, k: int, mask: cv2.typing.MatLike | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... + @_typing.overload + def knnMatch(self, queryDescriptors: UMat, trainDescriptors: UMat, k: int, mask: UMat | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... + @_typing.overload + def knnMatch(self, queryDescriptors: cv2.typing.MatLike, k: int, masks: _typing.Sequence[cv2.typing.MatLike] | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... + @_typing.overload + def knnMatch(self, queryDescriptors: UMat, k: int, masks: _typing.Sequence[UMat] | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... + + @_typing.overload + def radiusMatch(self, queryDescriptors: cv2.typing.MatLike, trainDescriptors: cv2.typing.MatLike, maxDistance: float, mask: cv2.typing.MatLike | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... + @_typing.overload + def radiusMatch(self, queryDescriptors: UMat, trainDescriptors: UMat, maxDistance: float, mask: UMat | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... + @_typing.overload + def radiusMatch(self, queryDescriptors: cv2.typing.MatLike, maxDistance: float, masks: _typing.Sequence[cv2.typing.MatLike] | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... + @_typing.overload + def radiusMatch(self, queryDescriptors: UMat, maxDistance: float, masks: _typing.Sequence[UMat] | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... + + @_typing.overload + def write(self, fileName: str) -> None: ... + @_typing.overload + def write(self, fs: FileStorage, name: str) -> None: ... + + @_typing.overload + def read(self, fileName: str) -> None: ... + @_typing.overload + def read(self, arg1: FileNode) -> None: ... + + def clone(self, emptyTrainData: bool = ...) -> DescriptorMatcher: ... + + @classmethod + @_typing.overload + def create(cls, descriptorMatcherType: str) -> DescriptorMatcher: ... + @classmethod + @_typing.overload + def create(cls, matcherType: DescriptorMatcher_MatcherType) -> DescriptorMatcher: ... + + +class BFMatcher(DescriptorMatcher): + # Functions + def __init__(self, normType: int = ..., crossCheck: bool = ...) -> None: ... + + @classmethod + def create(cls, normType: int = ..., crossCheck: bool = ...) -> BFMatcher: ... + + +class FlannBasedMatcher(DescriptorMatcher): + # Functions + def __init__(self, indexParams: cv2.typing.IndexParams = ..., searchParams: cv2.typing.SearchParams = ...) -> None: ... + + @classmethod + def create(cls) -> FlannBasedMatcher: ... + + +class BOWTrainer: + # Functions + def add(self, descriptors: cv2.typing.MatLike) -> None: ... + + def getDescriptors(self) -> _typing.Sequence[cv2.typing.MatLike]: ... + + def descriptorsCount(self) -> int: ... + + def clear(self) -> None: ... + + @_typing.overload + def cluster(self) -> cv2.typing.MatLike: ... + @_typing.overload + def cluster(self, descriptors: cv2.typing.MatLike) -> cv2.typing.MatLike: ... + + +class BOWKMeansTrainer(BOWTrainer): + # Functions + def __init__(self, clusterCount: int, termcrit: cv2.typing.TermCriteria = ..., attempts: int = ..., flags: int = ...) -> None: ... + + @_typing.overload + def cluster(self) -> cv2.typing.MatLike: ... + @_typing.overload + def cluster(self, descriptors: cv2.typing.MatLike) -> cv2.typing.MatLike: ... + + +class BOWImgDescriptorExtractor: + # Functions + def __init__(self, dextractor: Feature2D, dmatcher: DescriptorMatcher) -> None: ... + + def setVocabulary(self, vocabulary: cv2.typing.MatLike) -> None: ... + + def getVocabulary(self) -> cv2.typing.MatLike: ... + + def compute(self, image: cv2.typing.MatLike, keypoints: _typing.Sequence[KeyPoint], imgDescriptor: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + + def descriptorSize(self) -> int: ... + + def descriptorType(self) -> int: ... + + +class VideoCapture: + # Functions + @_typing.overload + def __init__(self) -> None: ... + @_typing.overload + def __init__(self, filename: str, apiPreference: int = ...) -> None: ... + @_typing.overload + def __init__(self, filename: str, apiPreference: int, params: _typing.Sequence[int]) -> None: ... + @_typing.overload + def __init__(self, index: int, apiPreference: int = ...) -> None: ... + @_typing.overload + def __init__(self, index: int, apiPreference: int, params: _typing.Sequence[int]) -> None: ... + + @_typing.overload + def open(self, filename: str, apiPreference: int = ...) -> bool: ... + @_typing.overload + def open(self, filename: str, apiPreference: int, params: _typing.Sequence[int]) -> bool: ... + @_typing.overload + def open(self, index: int, apiPreference: int = ...) -> bool: ... + @_typing.overload + def open(self, index: int, apiPreference: int, params: _typing.Sequence[int]) -> bool: ... + + def isOpened(self) -> bool: ... + + def release(self) -> None: ... + + def grab(self) -> bool: ... + + @_typing.overload + def retrieve(self, image: cv2.typing.MatLike | None = ..., flag: int = ...) -> tuple[bool, cv2.typing.MatLike]: ... + @_typing.overload + def retrieve(self, image: UMat | None = ..., flag: int = ...) -> tuple[bool, UMat]: ... + + @_typing.overload + def read(self, image: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ... + @_typing.overload + def read(self, image: UMat | None = ...) -> tuple[bool, UMat]: ... + + def set(self, propId: int, value: float) -> bool: ... + + def get(self, propId: int) -> float: ... + + def getBackendName(self) -> str: ... + + def setExceptionMode(self, enable: bool) -> None: ... + + def getExceptionMode(self) -> bool: ... + + @staticmethod + def waitAny(streams: _typing.Sequence[VideoCapture], timeoutNs: int = ...) -> tuple[bool, _typing.Sequence[int]]: ... + + +class VideoWriter: + # Functions + @_typing.overload + def __init__(self) -> None: ... + @_typing.overload + def __init__(self, filename: str, fourcc: int, fps: float, frameSize: cv2.typing.Size, isColor: bool = ...) -> None: ... + @_typing.overload + def __init__(self, filename: str, apiPreference: int, fourcc: int, fps: float, frameSize: cv2.typing.Size, isColor: bool = ...) -> None: ... + @_typing.overload + def __init__(self, filename: str, fourcc: int, fps: float, frameSize: cv2.typing.Size, params: _typing.Sequence[int]) -> None: ... + @_typing.overload + def __init__(self, filename: str, apiPreference: int, fourcc: int, fps: float, frameSize: cv2.typing.Size, params: _typing.Sequence[int]) -> None: ... + + @_typing.overload + def open(self, filename: str, fourcc: int, fps: float, frameSize: cv2.typing.Size, isColor: bool = ...) -> bool: ... + @_typing.overload + def open(self, filename: str, apiPreference: int, fourcc: int, fps: float, frameSize: cv2.typing.Size, isColor: bool = ...) -> bool: ... + @_typing.overload + def open(self, filename: str, fourcc: int, fps: float, frameSize: cv2.typing.Size, params: _typing.Sequence[int]) -> bool: ... + @_typing.overload + def open(self, filename: str, apiPreference: int, fourcc: int, fps: float, frameSize: cv2.typing.Size, params: _typing.Sequence[int]) -> bool: ... + + def isOpened(self) -> bool: ... + + def release(self) -> None: ... + + @_typing.overload + def write(self, image: cv2.typing.MatLike) -> None: ... + @_typing.overload + def write(self, image: UMat) -> None: ... + + def set(self, propId: int, value: float) -> bool: ... + + def get(self, propId: int) -> float: ... + + @staticmethod + def fourcc(c1: str, c2: str, c3: str, c4: str) -> int: ... + + def getBackendName(self) -> str: ... + + +class UsacParams: + confidence: float + isParallel: bool + loIterations: int + loMethod: LocalOptimMethod + loSampleSize: int + maxIterations: int + neighborsSearch: NeighborSearchMethod + randomGeneratorState: int + sampler: SamplingMethod + score: ScoreMethod + threshold: float + final_polisher: PolishingMethod + final_polisher_iterations: int + + # Functions + def __init__(self) -> None: ... + + +class CirclesGridFinderParameters: + densityNeighborhoodSize: cv2.typing.Size2f + minDensity: float + kmeansAttempts: int + minDistanceToAddKeypoint: int + keypointScale: int + minGraphConfidence: float + vertexGain: float + vertexPenalty: float + existingVertexGain: float + edgeGain: float + edgePenalty: float + convexHullFactor: float + minRNGEdgeSwitchDist: float + squareSize: float + maxRectifiedDistance: float + + # Functions + def __init__(self) -> None: ... + + +class StereoMatcher(Algorithm): + # Functions + @_typing.overload + def compute(self, left: cv2.typing.MatLike, right: cv2.typing.MatLike, disparity: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def compute(self, left: UMat, right: UMat, disparity: UMat | None = ...) -> UMat: ... + + def getMinDisparity(self) -> int: ... + + def setMinDisparity(self, minDisparity: int) -> None: ... + + def getNumDisparities(self) -> int: ... + + def setNumDisparities(self, numDisparities: int) -> None: ... + + def getBlockSize(self) -> int: ... + + def setBlockSize(self, blockSize: int) -> None: ... + + def getSpeckleWindowSize(self) -> int: ... + + def setSpeckleWindowSize(self, speckleWindowSize: int) -> None: ... + + def getSpeckleRange(self) -> int: ... + + def setSpeckleRange(self, speckleRange: int) -> None: ... + + def getDisp12MaxDiff(self) -> int: ... + + def setDisp12MaxDiff(self, disp12MaxDiff: int) -> None: ... + + +class StereoBM(StereoMatcher): + # Functions + def getPreFilterType(self) -> int: ... + + def setPreFilterType(self, preFilterType: int) -> None: ... + + def getPreFilterSize(self) -> int: ... + + def setPreFilterSize(self, preFilterSize: int) -> None: ... + + def getPreFilterCap(self) -> int: ... + + def setPreFilterCap(self, preFilterCap: int) -> None: ... + + def getTextureThreshold(self) -> int: ... + + def setTextureThreshold(self, textureThreshold: int) -> None: ... + + def getUniquenessRatio(self) -> int: ... + + def setUniquenessRatio(self, uniquenessRatio: int) -> None: ... + + def getSmallerBlockSize(self) -> int: ... + + def setSmallerBlockSize(self, blockSize: int) -> None: ... + + def getROI1(self) -> cv2.typing.Rect: ... + + def setROI1(self, roi1: cv2.typing.Rect) -> None: ... + + def getROI2(self) -> cv2.typing.Rect: ... + + def setROI2(self, roi2: cv2.typing.Rect) -> None: ... + + @classmethod + def create(cls, numDisparities: int = ..., blockSize: int = ...) -> StereoBM: ... + + +class StereoSGBM(StereoMatcher): + # Functions + def getPreFilterCap(self) -> int: ... + + def setPreFilterCap(self, preFilterCap: int) -> None: ... + + def getUniquenessRatio(self) -> int: ... + + def setUniquenessRatio(self, uniquenessRatio: int) -> None: ... + + def getP1(self) -> int: ... + + def setP1(self, P1: int) -> None: ... + + def getP2(self) -> int: ... + + def setP2(self, P2: int) -> None: ... + + def getMode(self) -> int: ... + + def setMode(self, mode: int) -> None: ... + + @classmethod + def create(cls, minDisparity: int = ..., numDisparities: int = ..., blockSize: int = ..., P1: int = ..., P2: int = ..., disp12MaxDiff: int = ..., preFilterCap: int = ..., uniquenessRatio: int = ..., speckleWindowSize: int = ..., speckleRange: int = ..., mode: int = ...) -> StereoSGBM: ... + + +class BaseCascadeClassifier(Algorithm): + ... + +class CascadeClassifier: + # Functions + @_typing.overload + def __init__(self) -> None: ... + @_typing.overload + def __init__(self, filename: str) -> None: ... + + def empty(self) -> bool: ... + + def load(self, filename: str) -> bool: ... + + def read(self, node: FileNode) -> bool: ... + + @_typing.overload + def detectMultiScale(self, image: cv2.typing.MatLike, scaleFactor: float = ..., minNeighbors: int = ..., flags: int = ..., minSize: cv2.typing.Size = ..., maxSize: cv2.typing.Size = ...) -> _typing.Sequence[cv2.typing.Rect]: ... + @_typing.overload + def detectMultiScale(self, image: UMat, scaleFactor: float = ..., minNeighbors: int = ..., flags: int = ..., minSize: cv2.typing.Size = ..., maxSize: cv2.typing.Size = ...) -> _typing.Sequence[cv2.typing.Rect]: ... + + @_typing.overload + def detectMultiScale2(self, image: cv2.typing.MatLike, scaleFactor: float = ..., minNeighbors: int = ..., flags: int = ..., minSize: cv2.typing.Size = ..., maxSize: cv2.typing.Size = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[int]]: ... + @_typing.overload + def detectMultiScale2(self, image: UMat, scaleFactor: float = ..., minNeighbors: int = ..., flags: int = ..., minSize: cv2.typing.Size = ..., maxSize: cv2.typing.Size = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[int]]: ... + + @_typing.overload + def detectMultiScale3(self, image: cv2.typing.MatLike, scaleFactor: float = ..., minNeighbors: int = ..., flags: int = ..., minSize: cv2.typing.Size = ..., maxSize: cv2.typing.Size = ..., outputRejectLevels: bool = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[int], _typing.Sequence[float]]: ... + @_typing.overload + def detectMultiScale3(self, image: UMat, scaleFactor: float = ..., minNeighbors: int = ..., flags: int = ..., minSize: cv2.typing.Size = ..., maxSize: cv2.typing.Size = ..., outputRejectLevels: bool = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[int], _typing.Sequence[float]]: ... + + def isOldFormatCascade(self) -> bool: ... + + def getOriginalWindowSize(self) -> cv2.typing.Size: ... + + def getFeatureType(self) -> int: ... + + @staticmethod + def convert(oldcascade: str, newcascade: str) -> bool: ... + + +class HOGDescriptor: + @property + def winSize(self) -> cv2.typing.Size: ... + @property + def blockSize(self) -> cv2.typing.Size: ... + @property + def blockStride(self) -> cv2.typing.Size: ... + @property + def cellSize(self) -> cv2.typing.Size: ... + @property + def nbins(self) -> int: ... + @property + def derivAperture(self) -> int: ... + @property + def winSigma(self) -> float: ... + @property + def histogramNormType(self) -> HOGDescriptor_HistogramNormType: ... + @property + def L2HysThreshold(self) -> float: ... + @property + def gammaCorrection(self) -> bool: ... + @property + def svmDetector(self) -> _typing.Sequence[float]: ... + @property + def nlevels(self) -> int: ... + @property + def signedGradient(self) -> bool: ... + + # Functions + @_typing.overload + def __init__(self) -> None: ... + @_typing.overload + def __init__(self, _winSize: cv2.typing.Size, _blockSize: cv2.typing.Size, _blockStride: cv2.typing.Size, _cellSize: cv2.typing.Size, _nbins: int, _derivAperture: int = ..., _winSigma: float = ..., _histogramNormType: HOGDescriptor_HistogramNormType = ..., _L2HysThreshold: float = ..., _gammaCorrection: bool = ..., _nlevels: int = ..., _signedGradient: bool = ...) -> None: ... + @_typing.overload + def __init__(self, filename: str) -> None: ... + + def getDescriptorSize(self) -> int: ... + + def checkDetectorSize(self) -> bool: ... + + def getWinSigma(self) -> float: ... + + @_typing.overload + def setSVMDetector(self, svmdetector: cv2.typing.MatLike) -> None: ... + @_typing.overload + def setSVMDetector(self, svmdetector: UMat) -> None: ... + + def load(self, filename: str, objname: str = ...) -> bool: ... + + def save(self, filename: str, objname: str = ...) -> None: ... + + @_typing.overload + def compute(self, img: cv2.typing.MatLike, winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ..., locations: _typing.Sequence[cv2.typing.Point] = ...) -> _typing.Sequence[float]: ... + @_typing.overload + def compute(self, img: UMat, winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ..., locations: _typing.Sequence[cv2.typing.Point] = ...) -> _typing.Sequence[float]: ... + + @_typing.overload + def detect(self, img: cv2.typing.MatLike, hitThreshold: float = ..., winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ..., searchLocations: _typing.Sequence[cv2.typing.Point] = ...) -> tuple[_typing.Sequence[cv2.typing.Point], _typing.Sequence[float]]: ... + @_typing.overload + def detect(self, img: UMat, hitThreshold: float = ..., winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ..., searchLocations: _typing.Sequence[cv2.typing.Point] = ...) -> tuple[_typing.Sequence[cv2.typing.Point], _typing.Sequence[float]]: ... + + @_typing.overload + def detectMultiScale(self, img: cv2.typing.MatLike, hitThreshold: float = ..., winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ..., scale: float = ..., groupThreshold: float = ..., useMeanshiftGrouping: bool = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[float]]: ... + @_typing.overload + def detectMultiScale(self, img: UMat, hitThreshold: float = ..., winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ..., scale: float = ..., groupThreshold: float = ..., useMeanshiftGrouping: bool = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[float]]: ... + + @_typing.overload + def computeGradient(self, img: cv2.typing.MatLike, grad: cv2.typing.MatLike, angleOfs: cv2.typing.MatLike, paddingTL: cv2.typing.Size = ..., paddingBR: cv2.typing.Size = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... + @_typing.overload + def computeGradient(self, img: UMat, grad: UMat, angleOfs: UMat, paddingTL: cv2.typing.Size = ..., paddingBR: cv2.typing.Size = ...) -> tuple[UMat, UMat]: ... + + @staticmethod + def getDefaultPeopleDetector() -> _typing.Sequence[float]: ... + + @staticmethod + def getDaimlerPeopleDetector() -> _typing.Sequence[float]: ... + + +class QRCodeEncoder: + # Classes + class Params: + version: int + correction_level: QRCodeEncoder_CorrectionLevel + mode: QRCodeEncoder_EncodeMode + structure_number: int + + # Functions + def __init__(self) -> None: ... + + + + # Functions + @classmethod + def create(cls, parameters: QRCodeEncoder.Params = ...) -> QRCodeEncoder: ... + + @_typing.overload + def encode(self, encoded_info: str, qrcode: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def encode(self, encoded_info: str, qrcode: UMat | None = ...) -> UMat: ... + + @_typing.overload + def encodeStructuredAppend(self, encoded_info: str, qrcodes: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ... + @_typing.overload + def encodeStructuredAppend(self, encoded_info: str, qrcodes: _typing.Sequence[UMat] | None = ...) -> _typing.Sequence[UMat]: ... + + +class QRCodeDetector(GraphicalCodeDetector): + # Functions + def __init__(self) -> None: ... + + def setEpsX(self, epsX: float) -> QRCodeDetector: ... + + def setEpsY(self, epsY: float) -> QRCodeDetector: ... + + def setUseAlignmentMarkers(self, useAlignmentMarkers: bool) -> QRCodeDetector: ... + + @_typing.overload + def decodeCurved(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike, straight_qrcode: cv2.typing.MatLike | None = ...) -> tuple[str, cv2.typing.MatLike]: ... + @_typing.overload + def decodeCurved(self, img: UMat, points: UMat, straight_qrcode: UMat | None = ...) -> tuple[str, UMat]: ... + + @_typing.overload + def detectAndDecodeCurved(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike | None = ..., straight_qrcode: cv2.typing.MatLike | None = ...) -> tuple[str, cv2.typing.MatLike, cv2.typing.MatLike]: ... + @_typing.overload + def detectAndDecodeCurved(self, img: UMat, points: UMat | None = ..., straight_qrcode: UMat | None = ...) -> tuple[str, UMat, UMat]: ... + + +class GraphicalCodeDetector: + # Functions + @_typing.overload + def detect(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ... + @_typing.overload + def detect(self, img: UMat, points: UMat | None = ...) -> tuple[bool, UMat]: ... + + @_typing.overload + def decode(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike, straight_code: cv2.typing.MatLike | None = ...) -> tuple[str, cv2.typing.MatLike]: ... + @_typing.overload + def decode(self, img: UMat, points: UMat, straight_code: UMat | None = ...) -> tuple[str, UMat]: ... + + @_typing.overload + def detectAndDecode(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike | None = ..., straight_code: cv2.typing.MatLike | None = ...) -> tuple[str, cv2.typing.MatLike, cv2.typing.MatLike]: ... + @_typing.overload + def detectAndDecode(self, img: UMat, points: UMat | None = ..., straight_code: UMat | None = ...) -> tuple[str, UMat, UMat]: ... + + @_typing.overload + def detectMulti(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ... + @_typing.overload + def detectMulti(self, img: UMat, points: UMat | None = ...) -> tuple[bool, UMat]: ... + + @_typing.overload + def decodeMulti(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike, straight_code: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[bool, _typing.Sequence[str], _typing.Sequence[cv2.typing.MatLike]]: ... + @_typing.overload + def decodeMulti(self, img: UMat, points: UMat, straight_code: _typing.Sequence[UMat] | None = ...) -> tuple[bool, _typing.Sequence[str], _typing.Sequence[UMat]]: ... + + @_typing.overload + def detectAndDecodeMulti(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike | None = ..., straight_code: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[bool, _typing.Sequence[str], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike]]: ... + @_typing.overload + def detectAndDecodeMulti(self, img: UMat, points: UMat | None = ..., straight_code: _typing.Sequence[UMat] | None = ...) -> tuple[bool, _typing.Sequence[str], UMat, _typing.Sequence[UMat]]: ... + + +class QRCodeDetectorAruco(GraphicalCodeDetector): + # Classes + class Params: + minModuleSizeInPyramid: float + maxRotation: float + maxModuleSizeMismatch: float + maxTimingPatternMismatch: float + maxPenalties: float + maxColorsMismatch: float + scaleTimingPatternScore: float + + # Functions + def __init__(self) -> None: ... + + + + # Functions + @_typing.overload + def __init__(self) -> None: ... + @_typing.overload + def __init__(self, params: QRCodeDetectorAruco.Params) -> None: ... + + def getDetectorParameters(self) -> QRCodeDetectorAruco.Params: ... + + def setDetectorParameters(self, params: QRCodeDetectorAruco.Params) -> QRCodeDetectorAruco: ... + + def getArucoParameters(self) -> cv2.aruco.DetectorParameters: ... + + def setArucoParameters(self, params: cv2.aruco.DetectorParameters) -> None: ... + + +class FaceDetectorYN: + # Functions + def setInputSize(self, input_size: cv2.typing.Size) -> None: ... + + def getInputSize(self) -> cv2.typing.Size: ... + + def setScoreThreshold(self, score_threshold: float) -> None: ... + + def getScoreThreshold(self) -> float: ... + + def setNMSThreshold(self, nms_threshold: float) -> None: ... + + def getNMSThreshold(self) -> float: ... + + def setTopK(self, top_k: int) -> None: ... + + def getTopK(self) -> int: ... + + @_typing.overload + def detect(self, image: cv2.typing.MatLike, faces: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ... + @_typing.overload + def detect(self, image: UMat, faces: UMat | None = ...) -> tuple[int, UMat]: ... + + @classmethod + @_typing.overload + def create(cls, model: str, config: str, input_size: cv2.typing.Size, score_threshold: float = ..., nms_threshold: float = ..., top_k: int = ..., backend_id: int = ..., target_id: int = ...) -> FaceDetectorYN: ... + @classmethod + @_typing.overload + def create(cls, framework: str, bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferConfig: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], input_size: cv2.typing.Size, score_threshold: float = ..., nms_threshold: float = ..., top_k: int = ..., backend_id: int = ..., target_id: int = ...) -> FaceDetectorYN: ... + + +class FaceRecognizerSF: + # Functions + @_typing.overload + def alignCrop(self, src_img: cv2.typing.MatLike, face_box: cv2.typing.MatLike, aligned_img: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def alignCrop(self, src_img: UMat, face_box: UMat, aligned_img: UMat | None = ...) -> UMat: ... + + @_typing.overload + def feature(self, aligned_img: cv2.typing.MatLike, face_feature: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def feature(self, aligned_img: UMat, face_feature: UMat | None = ...) -> UMat: ... + + @_typing.overload + def match(self, face_feature1: cv2.typing.MatLike, face_feature2: cv2.typing.MatLike, dis_type: int = ...) -> float: ... + @_typing.overload + def match(self, face_feature1: UMat, face_feature2: UMat, dis_type: int = ...) -> float: ... + + @classmethod + def create(cls, model: str, config: str, backend_id: int = ..., target_id: int = ...) -> FaceRecognizerSF: ... + + +class Stitcher: + # Functions + @classmethod + def create(cls, mode: Stitcher_Mode = ...) -> Stitcher: ... + + def registrationResol(self) -> float: ... + + def setRegistrationResol(self, resol_mpx: float) -> None: ... + + def seamEstimationResol(self) -> float: ... + + def setSeamEstimationResol(self, resol_mpx: float) -> None: ... + + def compositingResol(self) -> float: ... + + def setCompositingResol(self, resol_mpx: float) -> None: ... + + def panoConfidenceThresh(self) -> float: ... + + def setPanoConfidenceThresh(self, conf_thresh: float) -> None: ... + + def waveCorrection(self) -> bool: ... + + def setWaveCorrection(self, flag: bool) -> None: ... + + def interpolationFlags(self) -> InterpolationFlags: ... + + def setInterpolationFlags(self, interp_flags: InterpolationFlags) -> None: ... + + @_typing.overload + def estimateTransform(self, images: _typing.Sequence[cv2.typing.MatLike], masks: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> Stitcher_Status: ... + @_typing.overload + def estimateTransform(self, images: _typing.Sequence[UMat], masks: _typing.Sequence[UMat] | None = ...) -> Stitcher_Status: ... + + @_typing.overload + def composePanorama(self, pano: cv2.typing.MatLike | None = ...) -> tuple[Stitcher_Status, cv2.typing.MatLike]: ... + @_typing.overload + def composePanorama(self, pano: UMat | None = ...) -> tuple[Stitcher_Status, UMat]: ... + @_typing.overload + def composePanorama(self, images: _typing.Sequence[cv2.typing.MatLike], pano: cv2.typing.MatLike | None = ...) -> tuple[Stitcher_Status, cv2.typing.MatLike]: ... + @_typing.overload + def composePanorama(self, images: _typing.Sequence[UMat], pano: UMat | None = ...) -> tuple[Stitcher_Status, UMat]: ... + + @_typing.overload + def stitch(self, images: _typing.Sequence[cv2.typing.MatLike], pano: cv2.typing.MatLike | None = ...) -> tuple[Stitcher_Status, cv2.typing.MatLike]: ... + @_typing.overload + def stitch(self, images: _typing.Sequence[UMat], pano: UMat | None = ...) -> tuple[Stitcher_Status, UMat]: ... + @_typing.overload + def stitch(self, images: _typing.Sequence[cv2.typing.MatLike], masks: _typing.Sequence[cv2.typing.MatLike], pano: cv2.typing.MatLike | None = ...) -> tuple[Stitcher_Status, cv2.typing.MatLike]: ... + @_typing.overload + def stitch(self, images: _typing.Sequence[UMat], masks: _typing.Sequence[UMat], pano: UMat | None = ...) -> tuple[Stitcher_Status, UMat]: ... + + def workScale(self) -> float: ... + + +class PyRotationWarper: + # Functions + @_typing.overload + def __init__(self, type: str, scale: float) -> None: ... + @_typing.overload + def __init__(self) -> None: ... + + @_typing.overload + def warpPoint(self, pt: cv2.typing.Point2f, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Point2f: ... + @_typing.overload + def warpPoint(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ... + + @_typing.overload + def warpPointBackward(self, pt: cv2.typing.Point2f, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Point2f: ... + @_typing.overload + def warpPointBackward(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ... + @_typing.overload + def warpPointBackward(self, pt: cv2.typing.Point2f, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Point2f: ... + @_typing.overload + def warpPointBackward(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ... + + @_typing.overload + def buildMaps(self, src_size: cv2.typing.Size, K: cv2.typing.MatLike, R: cv2.typing.MatLike, xmap: cv2.typing.MatLike | None = ..., ymap: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.Rect, cv2.typing.MatLike, cv2.typing.MatLike]: ... + @_typing.overload + def buildMaps(self, src_size: cv2.typing.Size, K: UMat, R: UMat, xmap: UMat | None = ..., ymap: UMat | None = ...) -> tuple[cv2.typing.Rect, UMat, UMat]: ... + + @_typing.overload + def warp(self, src: cv2.typing.MatLike, K: cv2.typing.MatLike, R: cv2.typing.MatLike, interp_mode: int, border_mode: int, dst: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.Point, cv2.typing.MatLike]: ... + @_typing.overload + def warp(self, src: UMat, K: UMat, R: UMat, interp_mode: int, border_mode: int, dst: UMat | None = ...) -> tuple[cv2.typing.Point, UMat]: ... + + @_typing.overload + def warpBackward(self, src: cv2.typing.MatLike, K: cv2.typing.MatLike, R: cv2.typing.MatLike, interp_mode: int, border_mode: int, dst_size: cv2.typing.Size, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def warpBackward(self, src: UMat, K: UMat, R: UMat, interp_mode: int, border_mode: int, dst_size: cv2.typing.Size, dst: UMat | None = ...) -> UMat: ... + + @_typing.overload + def warpRoi(self, src_size: cv2.typing.Size, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Rect: ... + @_typing.overload + def warpRoi(self, src_size: cv2.typing.Size, K: UMat, R: UMat) -> cv2.typing.Rect: ... + + def getScale(self) -> float: ... + + def setScale(self, arg1: float) -> None: ... + + +class WarperCreator: + ... + +class BackgroundSubtractor(Algorithm): + # Functions + @_typing.overload + def apply(self, image: cv2.typing.MatLike, fgmask: cv2.typing.MatLike | None = ..., learningRate: float = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def apply(self, image: UMat, fgmask: UMat | None = ..., learningRate: float = ...) -> UMat: ... + + @_typing.overload + def getBackgroundImage(self, backgroundImage: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def getBackgroundImage(self, backgroundImage: UMat | None = ...) -> UMat: ... + + +class BackgroundSubtractorMOG2(BackgroundSubtractor): + # Functions + def getHistory(self) -> int: ... + + def setHistory(self, history: int) -> None: ... + + def getNMixtures(self) -> int: ... + + def setNMixtures(self, nmixtures: int) -> None: ... + + def getBackgroundRatio(self) -> float: ... + + def setBackgroundRatio(self, ratio: float) -> None: ... + + def getVarThreshold(self) -> float: ... + + def setVarThreshold(self, varThreshold: float) -> None: ... + + def getVarThresholdGen(self) -> float: ... + + def setVarThresholdGen(self, varThresholdGen: float) -> None: ... + + def getVarInit(self) -> float: ... + + def setVarInit(self, varInit: float) -> None: ... + + def getVarMin(self) -> float: ... + + def setVarMin(self, varMin: float) -> None: ... + + def getVarMax(self) -> float: ... + + def setVarMax(self, varMax: float) -> None: ... + + def getComplexityReductionThreshold(self) -> float: ... + + def setComplexityReductionThreshold(self, ct: float) -> None: ... + + def getDetectShadows(self) -> bool: ... + + def setDetectShadows(self, detectShadows: bool) -> None: ... + + def getShadowValue(self) -> int: ... + + def setShadowValue(self, value: int) -> None: ... + + def getShadowThreshold(self) -> float: ... + + def setShadowThreshold(self, threshold: float) -> None: ... + + @_typing.overload + def apply(self, image: cv2.typing.MatLike, fgmask: cv2.typing.MatLike | None = ..., learningRate: float = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def apply(self, image: UMat, fgmask: UMat | None = ..., learningRate: float = ...) -> UMat: ... + + +class BackgroundSubtractorKNN(BackgroundSubtractor): + # Functions + def getHistory(self) -> int: ... + + def setHistory(self, history: int) -> None: ... + + def getNSamples(self) -> int: ... + + def setNSamples(self, _nN: int) -> None: ... + + def getDist2Threshold(self) -> float: ... + + def setDist2Threshold(self, _dist2Threshold: float) -> None: ... + + def getkNNSamples(self) -> int: ... + + def setkNNSamples(self, _nkNN: int) -> None: ... + + def getDetectShadows(self) -> bool: ... + + def setDetectShadows(self, detectShadows: bool) -> None: ... + + def getShadowValue(self) -> int: ... + + def setShadowValue(self, value: int) -> None: ... + + def getShadowThreshold(self) -> float: ... + + def setShadowThreshold(self, threshold: float) -> None: ... + + +class KalmanFilter: + statePre: cv2.typing.MatLike + statePost: cv2.typing.MatLike + transitionMatrix: cv2.typing.MatLike + controlMatrix: cv2.typing.MatLike + measurementMatrix: cv2.typing.MatLike + processNoiseCov: cv2.typing.MatLike + measurementNoiseCov: cv2.typing.MatLike + errorCovPre: cv2.typing.MatLike + gain: cv2.typing.MatLike + errorCovPost: cv2.typing.MatLike + + # Functions + @_typing.overload + def __init__(self) -> None: ... + @_typing.overload + def __init__(self, dynamParams: int, measureParams: int, controlParams: int = ..., type: int = ...) -> None: ... + + def predict(self, control: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + + def correct(self, measurement: cv2.typing.MatLike) -> cv2.typing.MatLike: ... + + +class DenseOpticalFlow(Algorithm): + # Functions + @_typing.overload + def calc(self, I0: cv2.typing.MatLike, I1: cv2.typing.MatLike, flow: cv2.typing.MatLike) -> cv2.typing.MatLike: ... + @_typing.overload + def calc(self, I0: UMat, I1: UMat, flow: UMat) -> UMat: ... + + def collectGarbage(self) -> None: ... + + +class SparseOpticalFlow(Algorithm): + # Functions + @_typing.overload + def calc(self, prevImg: cv2.typing.MatLike, nextImg: cv2.typing.MatLike, prevPts: cv2.typing.MatLike, nextPts: cv2.typing.MatLike, status: cv2.typing.MatLike | None = ..., err: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... + @_typing.overload + def calc(self, prevImg: UMat, nextImg: UMat, prevPts: UMat, nextPts: UMat, status: UMat | None = ..., err: UMat | None = ...) -> tuple[UMat, UMat, UMat]: ... + + +class FarnebackOpticalFlow(DenseOpticalFlow): + # Functions + def getNumLevels(self) -> int: ... + + def setNumLevels(self, numLevels: int) -> None: ... + + def getPyrScale(self) -> float: ... + + def setPyrScale(self, pyrScale: float) -> None: ... + + def getFastPyramids(self) -> bool: ... + + def setFastPyramids(self, fastPyramids: bool) -> None: ... + + def getWinSize(self) -> int: ... + + def setWinSize(self, winSize: int) -> None: ... + + def getNumIters(self) -> int: ... + + def setNumIters(self, numIters: int) -> None: ... + + def getPolyN(self) -> int: ... + + def setPolyN(self, polyN: int) -> None: ... + + def getPolySigma(self) -> float: ... + + def setPolySigma(self, polySigma: float) -> None: ... + + def getFlags(self) -> int: ... + + def setFlags(self, flags: int) -> None: ... + + @classmethod + def create(cls, numLevels: int = ..., pyrScale: float = ..., fastPyramids: bool = ..., winSize: int = ..., numIters: int = ..., polyN: int = ..., polySigma: float = ..., flags: int = ...) -> FarnebackOpticalFlow: ... + + +class VariationalRefinement(DenseOpticalFlow): + # Functions + @_typing.overload + def calcUV(self, I0: cv2.typing.MatLike, I1: cv2.typing.MatLike, flow_u: cv2.typing.MatLike, flow_v: cv2.typing.MatLike) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... + @_typing.overload + def calcUV(self, I0: UMat, I1: UMat, flow_u: UMat, flow_v: UMat) -> tuple[UMat, UMat]: ... + + def getFixedPointIterations(self) -> int: ... + + def setFixedPointIterations(self, val: int) -> None: ... + + def getSorIterations(self) -> int: ... + + def setSorIterations(self, val: int) -> None: ... + + def getOmega(self) -> float: ... + + def setOmega(self, val: float) -> None: ... + + def getAlpha(self) -> float: ... + + def setAlpha(self, val: float) -> None: ... + + def getDelta(self) -> float: ... + + def setDelta(self, val: float) -> None: ... + + def getGamma(self) -> float: ... + + def setGamma(self, val: float) -> None: ... + + def getEpsilon(self) -> float: ... + + def setEpsilon(self, val: float) -> None: ... + + @classmethod + def create(cls) -> VariationalRefinement: ... + + +class DISOpticalFlow(DenseOpticalFlow): + # Functions + def getFinestScale(self) -> int: ... + + def setFinestScale(self, val: int) -> None: ... + + def getPatchSize(self) -> int: ... + + def setPatchSize(self, val: int) -> None: ... + + def getPatchStride(self) -> int: ... + + def setPatchStride(self, val: int) -> None: ... + + def getGradientDescentIterations(self) -> int: ... + + def setGradientDescentIterations(self, val: int) -> None: ... + + def getVariationalRefinementIterations(self) -> int: ... + + def setVariationalRefinementIterations(self, val: int) -> None: ... + + def getVariationalRefinementAlpha(self) -> float: ... + + def setVariationalRefinementAlpha(self, val: float) -> None: ... + + def getVariationalRefinementDelta(self) -> float: ... + + def setVariationalRefinementDelta(self, val: float) -> None: ... + + def getVariationalRefinementGamma(self) -> float: ... + + def setVariationalRefinementGamma(self, val: float) -> None: ... + + def getVariationalRefinementEpsilon(self) -> float: ... + + def setVariationalRefinementEpsilon(self, val: float) -> None: ... + + def getUseMeanNormalization(self) -> bool: ... + + def setUseMeanNormalization(self, val: bool) -> None: ... + + def getUseSpatialPropagation(self) -> bool: ... + + def setUseSpatialPropagation(self, val: bool) -> None: ... + + @classmethod + def create(cls, preset: int = ...) -> DISOpticalFlow: ... + + +class SparsePyrLKOpticalFlow(SparseOpticalFlow): + # Functions + def getWinSize(self) -> cv2.typing.Size: ... + + def setWinSize(self, winSize: cv2.typing.Size) -> None: ... + + def getMaxLevel(self) -> int: ... + + def setMaxLevel(self, maxLevel: int) -> None: ... + + def getTermCriteria(self) -> cv2.typing.TermCriteria: ... + + def setTermCriteria(self, crit: cv2.typing.TermCriteria) -> None: ... + + def getFlags(self) -> int: ... + + def setFlags(self, flags: int) -> None: ... + + def getMinEigThreshold(self) -> float: ... + + def setMinEigThreshold(self, minEigThreshold: float) -> None: ... + + @classmethod + def create(cls, winSize: cv2.typing.Size = ..., maxLevel: int = ..., crit: cv2.typing.TermCriteria = ..., flags: int = ..., minEigThreshold: float = ...) -> SparsePyrLKOpticalFlow: ... + + +class Tracker: + # Functions + @_typing.overload + def init(self, image: cv2.typing.MatLike, boundingBox: cv2.typing.Rect) -> None: ... + @_typing.overload + def init(self, image: UMat, boundingBox: cv2.typing.Rect) -> None: ... + + @_typing.overload + def update(self, image: cv2.typing.MatLike) -> tuple[bool, cv2.typing.Rect]: ... + @_typing.overload + def update(self, image: UMat) -> tuple[bool, cv2.typing.Rect]: ... + + +class TrackerMIL(Tracker): + # Classes + class Params: + samplerInitInRadius: float + samplerInitMaxNegNum: int + samplerSearchWinSize: float + samplerTrackInRadius: float + samplerTrackMaxPosNum: int + samplerTrackMaxNegNum: int + featureSetNumFeatures: int + + # Functions + def __init__(self) -> None: ... + + + + # Functions + @classmethod + def create(cls, parameters: TrackerMIL.Params = ...) -> TrackerMIL: ... + + +class TrackerGOTURN(Tracker): + # Classes + class Params: + modelTxt: str + modelBin: str + + # Functions + def __init__(self) -> None: ... + + + + # Functions + @classmethod + def create(cls, parameters: TrackerGOTURN.Params = ...) -> TrackerGOTURN: ... + + +class TrackerDaSiamRPN(Tracker): + # Classes + class Params: + model: str + kernel_cls1: str + kernel_r1: str + backend: int + target: int + + # Functions + def __init__(self) -> None: ... + + + + # Functions + @classmethod + def create(cls, parameters: TrackerDaSiamRPN.Params = ...) -> TrackerDaSiamRPN: ... + + def getTrackingScore(self) -> float: ... + + +class TrackerNano(Tracker): + # Classes + class Params: + backbone: str + neckhead: str + backend: int + target: int + + # Functions + def __init__(self) -> None: ... + + + + # Functions + @classmethod + def create(cls, parameters: TrackerNano.Params = ...) -> TrackerNano: ... + + def getTrackingScore(self) -> float: ... + + +class TrackerVit(Tracker): + # Classes + class Params: + net: str + backend: int + target: int + meanvalue: cv2.typing.Scalar + stdvalue: cv2.typing.Scalar + + # Functions + def __init__(self) -> None: ... + + + + # Functions + @classmethod + def create(cls, parameters: TrackerVit.Params = ...) -> TrackerVit: ... + + def getTrackingScore(self) -> float: ... + + +class GArrayDesc: + ... + +class GComputation: + # Functions + @_typing.overload + def __init__(self, ins: cv2.typing.GProtoInputArgs, outs: cv2.typing.GProtoOutputArgs) -> None: ... + @_typing.overload + def __init__(self, in_: GMat, out: GMat) -> None: ... + @_typing.overload + def __init__(self, in_: GMat, out: GScalar) -> None: ... + @_typing.overload + def __init__(self, in1: GMat, in2: GMat, out: GMat) -> None: ... + + def apply(self, callback: cv2.typing.ExtractArgsCallback, args: _typing.Sequence[GCompileArg] = ...) -> _typing.Sequence[cv2.typing.GRunArg]: ... + + @_typing.overload + def compileStreaming(self, in_metas: _typing.Sequence[cv2.typing.GMetaArg], args: _typing.Sequence[GCompileArg] = ...) -> GStreamingCompiled: ... + @_typing.overload + def compileStreaming(self, args: _typing.Sequence[GCompileArg] = ...) -> GStreamingCompiled: ... + @_typing.overload + def compileStreaming(self, callback: cv2.typing.ExtractMetaCallback, args: _typing.Sequence[GCompileArg] = ...) -> GStreamingCompiled: ... + + +class GFrame: + # Functions + def __init__(self) -> None: ... + + +class GKernelPackage: + # Functions + def size(self) -> int: ... + + +class GMat: + # Functions + @_typing.overload + def __init__(self) -> None: ... + @_typing.overload + def __init__(self, m: cv2.typing.MatLike) -> None: ... + + +class GMatDesc: + @property + def depth(self) -> int: ... + @property + def chan(self) -> int: ... + @property + def size(self) -> cv2.typing.Size: ... + @property + def planar(self) -> bool: ... + @property + def dims(self) -> _typing.Sequence[int]: ... + + # Functions + @_typing.overload + def __init__(self, d: int, c: int, s: cv2.typing.Size, p: bool = ...) -> None: ... + @_typing.overload + def __init__(self, d: int, dd: _typing.Sequence[int]) -> None: ... + @_typing.overload + def __init__(self, d: int, dd: _typing.Sequence[int]) -> None: ... + @_typing.overload + def __init__(self) -> None: ... + + @_typing.overload + def withSizeDelta(self, delta: cv2.typing.Size) -> GMatDesc: ... + @_typing.overload + def withSizeDelta(self, dx: int, dy: int) -> GMatDesc: ... + + def withSize(self, sz: cv2.typing.Size) -> GMatDesc: ... + + def withDepth(self, ddepth: int) -> GMatDesc: ... + + def withType(self, ddepth: int, dchan: int) -> GMatDesc: ... + + @_typing.overload + def asPlanar(self) -> GMatDesc: ... + @_typing.overload + def asPlanar(self, planes: int) -> GMatDesc: ... + + def asInterleaved(self) -> GMatDesc: ... + + +class GOpaqueDesc: + ... + +class GScalar: + # Functions + @_typing.overload + def __init__(self) -> None: ... + @_typing.overload + def __init__(self, s: cv2.typing.Scalar) -> None: ... + + +class GScalarDesc: + ... + +class GStreamingCompiled: + # Functions + def __init__(self) -> None: ... + + def setSource(self, callback: cv2.typing.ExtractArgsCallback) -> None: ... + + def start(self) -> None: ... + + def pull(self) -> tuple[bool, _typing.Sequence[cv2.typing.GRunArg] | _typing.Sequence[cv2.typing.GOptRunArg]]: ... + + def stop(self) -> None: ... + + def running(self) -> bool: ... + + +class GOpaqueT: + # Functions + def __init__(self, type: cv2.gapi.ArgType) -> None: ... + + def type(self) -> cv2.gapi.ArgType: ... + + +class GArrayT: + # Functions + def __init__(self, type: cv2.gapi.ArgType) -> None: ... + + def type(self) -> cv2.gapi.ArgType: ... + + +class GCompileArg: + # Functions + @_typing.overload + def __init__(self, arg: GKernelPackage) -> None: ... + @_typing.overload + def __init__(self, arg: cv2.gapi.GNetPackage) -> None: ... + @_typing.overload + def __init__(self, arg: cv2.gapi.streaming.queue_capacity) -> None: ... + @_typing.overload + def __init__(self, arg: cv2.gapi.ot.ObjectTrackerParams) -> None: ... + + +class GInferInputs: + # Functions + def __init__(self) -> None: ... + + @_typing.overload + def setInput(self, name: str, value: GMat) -> GInferInputs: ... + @_typing.overload + def setInput(self, name: str, value: GFrame) -> GInferInputs: ... + + +class GInferListInputs: + # Functions + def __init__(self) -> None: ... + + @_typing.overload + def setInput(self, name: str, value: GArrayT) -> GInferListInputs: ... + @_typing.overload + def setInput(self, name: str, value: GArrayT) -> GInferListInputs: ... + + +class GInferOutputs: + # Functions + def __init__(self) -> None: ... + + def at(self, name: str) -> GMat: ... + + +class GInferListOutputs: + # Functions + def __init__(self) -> None: ... + + def at(self, name: str) -> GArrayT: ... + + +class error(Exception): + code: int + err: str + file: str + func: str + line: int + msg: str + + +# Functions +@_typing.overload +def CamShift(probImage: cv2.typing.MatLike, window: cv2.typing.Rect, criteria: cv2.typing.TermCriteria) -> tuple[cv2.typing.RotatedRect, cv2.typing.Rect]: ... +@_typing.overload +def CamShift(probImage: UMat, window: cv2.typing.Rect, criteria: cv2.typing.TermCriteria) -> tuple[cv2.typing.RotatedRect, cv2.typing.Rect]: ... + +@_typing.overload +def Canny(image: cv2.typing.MatLike, threshold1: float, threshold2: float, edges: cv2.typing.MatLike | None = ..., apertureSize: int = ..., L2gradient: bool = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def Canny(image: UMat, threshold1: float, threshold2: float, edges: UMat | None = ..., apertureSize: int = ..., L2gradient: bool = ...) -> UMat: ... +@_typing.overload +def Canny(dx: cv2.typing.MatLike, dy: cv2.typing.MatLike, threshold1: float, threshold2: float, edges: cv2.typing.MatLike | None = ..., L2gradient: bool = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def Canny(dx: UMat, dy: UMat, threshold1: float, threshold2: float, edges: UMat | None = ..., L2gradient: bool = ...) -> UMat: ... + +@_typing.overload +def EMD(signature1: cv2.typing.MatLike, signature2: cv2.typing.MatLike, distType: int, cost: cv2.typing.MatLike | None = ..., lowerBound: float | None = ..., flow: cv2.typing.MatLike | None = ...) -> tuple[float, float, cv2.typing.MatLike]: ... +@_typing.overload +def EMD(signature1: UMat, signature2: UMat, distType: int, cost: UMat | None = ..., lowerBound: float | None = ..., flow: UMat | None = ...) -> tuple[float, float, UMat]: ... + +@_typing.overload +def GaussianBlur(src: cv2.typing.MatLike, ksize: cv2.typing.Size, sigmaX: float, dst: cv2.typing.MatLike | None = ..., sigmaY: float = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def GaussianBlur(src: UMat, ksize: cv2.typing.Size, sigmaX: float, dst: UMat | None = ..., sigmaY: float = ..., borderType: int = ...) -> UMat: ... + +@_typing.overload +def HoughCircles(image: cv2.typing.MatLike, method: int, dp: float, minDist: float, circles: cv2.typing.MatLike | None = ..., param1: float = ..., param2: float = ..., minRadius: int = ..., maxRadius: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def HoughCircles(image: UMat, method: int, dp: float, minDist: float, circles: UMat | None = ..., param1: float = ..., param2: float = ..., minRadius: int = ..., maxRadius: int = ...) -> UMat: ... + +@_typing.overload +def HoughLines(image: cv2.typing.MatLike, rho: float, theta: float, threshold: int, lines: cv2.typing.MatLike | None = ..., srn: float = ..., stn: float = ..., min_theta: float = ..., max_theta: float = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def HoughLines(image: UMat, rho: float, theta: float, threshold: int, lines: UMat | None = ..., srn: float = ..., stn: float = ..., min_theta: float = ..., max_theta: float = ...) -> UMat: ... + +@_typing.overload +def HoughLinesP(image: cv2.typing.MatLike, rho: float, theta: float, threshold: int, lines: cv2.typing.MatLike | None = ..., minLineLength: float = ..., maxLineGap: float = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def HoughLinesP(image: UMat, rho: float, theta: float, threshold: int, lines: UMat | None = ..., minLineLength: float = ..., maxLineGap: float = ...) -> UMat: ... + +@_typing.overload +def HoughLinesPointSet(point: cv2.typing.MatLike, lines_max: int, threshold: int, min_rho: float, max_rho: float, rho_step: float, min_theta: float, max_theta: float, theta_step: float, lines: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def HoughLinesPointSet(point: UMat, lines_max: int, threshold: int, min_rho: float, max_rho: float, rho_step: float, min_theta: float, max_theta: float, theta_step: float, lines: UMat | None = ...) -> UMat: ... + +@_typing.overload +def HoughLinesWithAccumulator(image: cv2.typing.MatLike, rho: float, theta: float, threshold: int, lines: cv2.typing.MatLike | None = ..., srn: float = ..., stn: float = ..., min_theta: float = ..., max_theta: float = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def HoughLinesWithAccumulator(image: UMat, rho: float, theta: float, threshold: int, lines: UMat | None = ..., srn: float = ..., stn: float = ..., min_theta: float = ..., max_theta: float = ...) -> UMat: ... + +@_typing.overload +def HuMoments(m: cv2.typing.Moments, hu: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def HuMoments(m: cv2.typing.Moments, hu: UMat | None = ...) -> UMat: ... + +@_typing.overload +def LUT(src: cv2.typing.MatLike, lut: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def LUT(src: UMat, lut: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def Laplacian(src: cv2.typing.MatLike, ddepth: int, dst: cv2.typing.MatLike | None = ..., ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def Laplacian(src: UMat, ddepth: int, dst: UMat | None = ..., ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> UMat: ... + +@_typing.overload +def Mahalanobis(v1: cv2.typing.MatLike, v2: cv2.typing.MatLike, icovar: cv2.typing.MatLike) -> float: ... +@_typing.overload +def Mahalanobis(v1: UMat, v2: UMat, icovar: UMat) -> float: ... + +@_typing.overload +def PCABackProject(data: cv2.typing.MatLike, mean: cv2.typing.MatLike, eigenvectors: cv2.typing.MatLike, result: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def PCABackProject(data: UMat, mean: UMat, eigenvectors: UMat, result: UMat | None = ...) -> UMat: ... + +@_typing.overload +def PCACompute(data: cv2.typing.MatLike, mean: cv2.typing.MatLike, eigenvectors: cv2.typing.MatLike | None = ..., maxComponents: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def PCACompute(data: UMat, mean: UMat, eigenvectors: UMat | None = ..., maxComponents: int = ...) -> tuple[UMat, UMat]: ... +@_typing.overload +def PCACompute(data: cv2.typing.MatLike, mean: cv2.typing.MatLike, retainedVariance: float, eigenvectors: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def PCACompute(data: UMat, mean: UMat, retainedVariance: float, eigenvectors: UMat | None = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def PCACompute2(data: cv2.typing.MatLike, mean: cv2.typing.MatLike, eigenvectors: cv2.typing.MatLike | None = ..., eigenvalues: cv2.typing.MatLike | None = ..., maxComponents: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def PCACompute2(data: UMat, mean: UMat, eigenvectors: UMat | None = ..., eigenvalues: UMat | None = ..., maxComponents: int = ...) -> tuple[UMat, UMat, UMat]: ... +@_typing.overload +def PCACompute2(data: cv2.typing.MatLike, mean: cv2.typing.MatLike, retainedVariance: float, eigenvectors: cv2.typing.MatLike | None = ..., eigenvalues: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def PCACompute2(data: UMat, mean: UMat, retainedVariance: float, eigenvectors: UMat | None = ..., eigenvalues: UMat | None = ...) -> tuple[UMat, UMat, UMat]: ... + +@_typing.overload +def PCAProject(data: cv2.typing.MatLike, mean: cv2.typing.MatLike, eigenvectors: cv2.typing.MatLike, result: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def PCAProject(data: UMat, mean: UMat, eigenvectors: UMat, result: UMat | None = ...) -> UMat: ... + +@_typing.overload +def PSNR(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, R: float = ...) -> float: ... +@_typing.overload +def PSNR(src1: UMat, src2: UMat, R: float = ...) -> float: ... + +@_typing.overload +def RQDecomp3x3(src: cv2.typing.MatLike, mtxR: cv2.typing.MatLike | None = ..., mtxQ: cv2.typing.MatLike | None = ..., Qx: cv2.typing.MatLike | None = ..., Qy: cv2.typing.MatLike | None = ..., Qz: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.Vec3d, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def RQDecomp3x3(src: UMat, mtxR: UMat | None = ..., mtxQ: UMat | None = ..., Qx: UMat | None = ..., Qy: UMat | None = ..., Qz: UMat | None = ...) -> tuple[cv2.typing.Vec3d, UMat, UMat, UMat, UMat, UMat]: ... + +@_typing.overload +def Rodrigues(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., jacobian: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def Rodrigues(src: UMat, dst: UMat | None = ..., jacobian: UMat | None = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def SVBackSubst(w: cv2.typing.MatLike, u: cv2.typing.MatLike, vt: cv2.typing.MatLike, rhs: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def SVBackSubst(w: UMat, u: UMat, vt: UMat, rhs: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def SVDecomp(src: cv2.typing.MatLike, w: cv2.typing.MatLike | None = ..., u: cv2.typing.MatLike | None = ..., vt: cv2.typing.MatLike | None = ..., flags: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def SVDecomp(src: UMat, w: UMat | None = ..., u: UMat | None = ..., vt: UMat | None = ..., flags: int = ...) -> tuple[UMat, UMat, UMat]: ... + +@_typing.overload +def Scharr(src: cv2.typing.MatLike, ddepth: int, dx: int, dy: int, dst: cv2.typing.MatLike | None = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def Scharr(src: UMat, ddepth: int, dx: int, dy: int, dst: UMat | None = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> UMat: ... + +@_typing.overload +def Sobel(src: cv2.typing.MatLike, ddepth: int, dx: int, dy: int, dst: cv2.typing.MatLike | None = ..., ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def Sobel(src: UMat, ddepth: int, dx: int, dy: int, dst: UMat | None = ..., ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> UMat: ... + +@_typing.overload +def absdiff(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def absdiff(src1: UMat, src2: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def accumulate(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def accumulate(src: UMat, dst: UMat, mask: UMat | None = ...) -> UMat: ... + +@_typing.overload +def accumulateProduct(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def accumulateProduct(src1: UMat, src2: UMat, dst: UMat, mask: UMat | None = ...) -> UMat: ... + +@_typing.overload +def accumulateSquare(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def accumulateSquare(src: UMat, dst: UMat, mask: UMat | None = ...) -> UMat: ... + +@_typing.overload +def accumulateWeighted(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, alpha: float, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def accumulateWeighted(src: UMat, dst: UMat, alpha: float, mask: UMat | None = ...) -> UMat: ... + +@_typing.overload +def adaptiveThreshold(src: cv2.typing.MatLike, maxValue: float, adaptiveMethod: int, thresholdType: int, blockSize: int, C: float, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def adaptiveThreshold(src: UMat, maxValue: float, adaptiveMethod: int, thresholdType: int, blockSize: int, C: float, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def add(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ..., dtype: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def add(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ..., dtype: int = ...) -> UMat: ... + +def addText(img: cv2.typing.MatLike, text: str, org: cv2.typing.Point, nameFont: str, pointSize: int = ..., color: cv2.typing.Scalar = ..., weight: int = ..., style: int = ..., spacing: int = ...) -> None: ... + +@_typing.overload +def addWeighted(src1: cv2.typing.MatLike, alpha: float, src2: cv2.typing.MatLike, beta: float, gamma: float, dst: cv2.typing.MatLike | None = ..., dtype: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def addWeighted(src1: UMat, alpha: float, src2: UMat, beta: float, gamma: float, dst: UMat | None = ..., dtype: int = ...) -> UMat: ... + +@_typing.overload +def applyColorMap(src: cv2.typing.MatLike, colormap: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def applyColorMap(src: UMat, colormap: int, dst: UMat | None = ...) -> UMat: ... +@_typing.overload +def applyColorMap(src: cv2.typing.MatLike, userColor: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def applyColorMap(src: UMat, userColor: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def approxPolyDP(curve: cv2.typing.MatLike, epsilon: float, closed: bool, approxCurve: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def approxPolyDP(curve: UMat, epsilon: float, closed: bool, approxCurve: UMat | None = ...) -> UMat: ... + +@_typing.overload +def arcLength(curve: cv2.typing.MatLike, closed: bool) -> float: ... +@_typing.overload +def arcLength(curve: UMat, closed: bool) -> float: ... + +@_typing.overload +def arrowedLine(img: cv2.typing.MatLike, pt1: cv2.typing.Point, pt2: cv2.typing.Point, color: cv2.typing.Scalar, thickness: int = ..., line_type: int = ..., shift: int = ..., tipLength: float = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def arrowedLine(img: UMat, pt1: cv2.typing.Point, pt2: cv2.typing.Point, color: cv2.typing.Scalar, thickness: int = ..., line_type: int = ..., shift: int = ..., tipLength: float = ...) -> UMat: ... + +@_typing.overload +def batchDistance(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dtype: int, dist: cv2.typing.MatLike | None = ..., nidx: cv2.typing.MatLike | None = ..., normType: int = ..., K: int = ..., mask: cv2.typing.MatLike | None = ..., update: int = ..., crosscheck: bool = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def batchDistance(src1: UMat, src2: UMat, dtype: int, dist: UMat | None = ..., nidx: UMat | None = ..., normType: int = ..., K: int = ..., mask: UMat | None = ..., update: int = ..., crosscheck: bool = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def bilateralFilter(src: cv2.typing.MatLike, d: int, sigmaColor: float, sigmaSpace: float, dst: cv2.typing.MatLike | None = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def bilateralFilter(src: UMat, d: int, sigmaColor: float, sigmaSpace: float, dst: UMat | None = ..., borderType: int = ...) -> UMat: ... + +@_typing.overload +def bitwise_and(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def bitwise_and(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... + +@_typing.overload +def bitwise_not(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def bitwise_not(src: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... + +@_typing.overload +def bitwise_or(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def bitwise_or(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... + +@_typing.overload +def bitwise_xor(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def bitwise_xor(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... + +@_typing.overload +def blendLinear(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, weights1: cv2.typing.MatLike, weights2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def blendLinear(src1: UMat, src2: UMat, weights1: UMat, weights2: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def blur(src: cv2.typing.MatLike, ksize: cv2.typing.Size, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def blur(src: UMat, ksize: cv2.typing.Size, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., borderType: int = ...) -> UMat: ... + +def borderInterpolate(p: int, len: int, borderType: int) -> int: ... + +@_typing.overload +def boundingRect(array: cv2.typing.MatLike) -> cv2.typing.Rect: ... +@_typing.overload +def boundingRect(array: UMat) -> cv2.typing.Rect: ... + +@_typing.overload +def boxFilter(src: cv2.typing.MatLike, ddepth: int, ksize: cv2.typing.Size, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., normalize: bool = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def boxFilter(src: UMat, ddepth: int, ksize: cv2.typing.Size, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., normalize: bool = ..., borderType: int = ...) -> UMat: ... + +@_typing.overload +def boxPoints(box: cv2.typing.RotatedRect, points: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def boxPoints(box: cv2.typing.RotatedRect, points: UMat | None = ...) -> UMat: ... + +@_typing.overload +def broadcast(src: cv2.typing.MatLike, shape: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def broadcast(src: UMat, shape: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def buildOpticalFlowPyramid(img: cv2.typing.MatLike, winSize: cv2.typing.Size, maxLevel: int, pyramid: _typing.Sequence[cv2.typing.MatLike] | None = ..., withDerivatives: bool = ..., pyrBorder: int = ..., derivBorder: int = ..., tryReuseInputImage: bool = ...) -> tuple[int, _typing.Sequence[cv2.typing.MatLike]]: ... +@_typing.overload +def buildOpticalFlowPyramid(img: UMat, winSize: cv2.typing.Size, maxLevel: int, pyramid: _typing.Sequence[UMat] | None = ..., withDerivatives: bool = ..., pyrBorder: int = ..., derivBorder: int = ..., tryReuseInputImage: bool = ...) -> tuple[int, _typing.Sequence[UMat]]: ... + +@_typing.overload +def calcBackProject(images: _typing.Sequence[cv2.typing.MatLike], channels: _typing.Sequence[int], hist: cv2.typing.MatLike, ranges: _typing.Sequence[float], scale: float, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def calcBackProject(images: _typing.Sequence[UMat], channels: _typing.Sequence[int], hist: UMat, ranges: _typing.Sequence[float], scale: float, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def calcCovarMatrix(samples: cv2.typing.MatLike, mean: cv2.typing.MatLike, flags: int, covar: cv2.typing.MatLike | None = ..., ctype: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def calcCovarMatrix(samples: UMat, mean: UMat, flags: int, covar: UMat | None = ..., ctype: int = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def calcHist(images: _typing.Sequence[cv2.typing.MatLike], channels: _typing.Sequence[int], mask: cv2.typing.MatLike | None, histSize: _typing.Sequence[int], ranges: _typing.Sequence[float], hist: cv2.typing.MatLike | None = ..., accumulate: bool = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def calcHist(images: _typing.Sequence[UMat], channels: _typing.Sequence[int], mask: UMat | None, histSize: _typing.Sequence[int], ranges: _typing.Sequence[float], hist: UMat | None = ..., accumulate: bool = ...) -> UMat: ... + +@_typing.overload +def calcOpticalFlowFarneback(prev: cv2.typing.MatLike, next: cv2.typing.MatLike, flow: cv2.typing.MatLike, pyr_scale: float, levels: int, winsize: int, iterations: int, poly_n: int, poly_sigma: float, flags: int) -> cv2.typing.MatLike: ... +@_typing.overload +def calcOpticalFlowFarneback(prev: UMat, next: UMat, flow: UMat, pyr_scale: float, levels: int, winsize: int, iterations: int, poly_n: int, poly_sigma: float, flags: int) -> UMat: ... + +@_typing.overload +def calcOpticalFlowPyrLK(prevImg: cv2.typing.MatLike, nextImg: cv2.typing.MatLike, prevPts: cv2.typing.MatLike, nextPts: cv2.typing.MatLike, status: cv2.typing.MatLike | None = ..., err: cv2.typing.MatLike | None = ..., winSize: cv2.typing.Size = ..., maxLevel: int = ..., criteria: cv2.typing.TermCriteria = ..., flags: int = ..., minEigThreshold: float = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def calcOpticalFlowPyrLK(prevImg: UMat, nextImg: UMat, prevPts: UMat, nextPts: UMat, status: UMat | None = ..., err: UMat | None = ..., winSize: cv2.typing.Size = ..., maxLevel: int = ..., criteria: cv2.typing.TermCriteria = ..., flags: int = ..., minEigThreshold: float = ...) -> tuple[UMat, UMat, UMat]: ... + +@_typing.overload +def calibrateCamera(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints: _typing.Sequence[cv2.typing.MatLike], imageSize: cv2.typing.Size, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike]]: ... +@_typing.overload +def calibrateCamera(objectPoints: _typing.Sequence[UMat], imagePoints: _typing.Sequence[UMat], imageSize: cv2.typing.Size, cameraMatrix: UMat, distCoeffs: UMat, rvecs: _typing.Sequence[UMat] | None = ..., tvecs: _typing.Sequence[UMat] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, UMat, UMat, _typing.Sequence[UMat], _typing.Sequence[UMat]]: ... + +@_typing.overload +def calibrateCameraExtended(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints: _typing.Sequence[cv2.typing.MatLike], imageSize: cv2.typing.Size, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., stdDeviationsIntrinsics: cv2.typing.MatLike | None = ..., stdDeviationsExtrinsics: cv2.typing.MatLike | None = ..., perViewErrors: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def calibrateCameraExtended(objectPoints: _typing.Sequence[UMat], imagePoints: _typing.Sequence[UMat], imageSize: cv2.typing.Size, cameraMatrix: UMat, distCoeffs: UMat, rvecs: _typing.Sequence[UMat] | None = ..., tvecs: _typing.Sequence[UMat] | None = ..., stdDeviationsIntrinsics: UMat | None = ..., stdDeviationsExtrinsics: UMat | None = ..., perViewErrors: UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, UMat, UMat, _typing.Sequence[UMat], _typing.Sequence[UMat], UMat, UMat, UMat]: ... + +@_typing.overload +def calibrateCameraRO(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints: _typing.Sequence[cv2.typing.MatLike], imageSize: cv2.typing.Size, iFixedPoint: int, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., newObjPoints: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ... +@_typing.overload +def calibrateCameraRO(objectPoints: _typing.Sequence[UMat], imagePoints: _typing.Sequence[UMat], imageSize: cv2.typing.Size, iFixedPoint: int, cameraMatrix: UMat, distCoeffs: UMat, rvecs: _typing.Sequence[UMat] | None = ..., tvecs: _typing.Sequence[UMat] | None = ..., newObjPoints: UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, UMat, UMat, _typing.Sequence[UMat], _typing.Sequence[UMat], UMat]: ... + +@_typing.overload +def calibrateCameraROExtended(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints: _typing.Sequence[cv2.typing.MatLike], imageSize: cv2.typing.Size, iFixedPoint: int, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., newObjPoints: cv2.typing.MatLike | None = ..., stdDeviationsIntrinsics: cv2.typing.MatLike | None = ..., stdDeviationsExtrinsics: cv2.typing.MatLike | None = ..., stdDeviationsObjPoints: cv2.typing.MatLike | None = ..., perViewErrors: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def calibrateCameraROExtended(objectPoints: _typing.Sequence[UMat], imagePoints: _typing.Sequence[UMat], imageSize: cv2.typing.Size, iFixedPoint: int, cameraMatrix: UMat, distCoeffs: UMat, rvecs: _typing.Sequence[UMat] | None = ..., tvecs: _typing.Sequence[UMat] | None = ..., newObjPoints: UMat | None = ..., stdDeviationsIntrinsics: UMat | None = ..., stdDeviationsExtrinsics: UMat | None = ..., stdDeviationsObjPoints: UMat | None = ..., perViewErrors: UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, UMat, UMat, _typing.Sequence[UMat], _typing.Sequence[UMat], UMat, UMat, UMat, UMat, UMat]: ... + +@_typing.overload +def calibrateHandEye(R_gripper2base: _typing.Sequence[cv2.typing.MatLike], t_gripper2base: _typing.Sequence[cv2.typing.MatLike], R_target2cam: _typing.Sequence[cv2.typing.MatLike], t_target2cam: _typing.Sequence[cv2.typing.MatLike], R_cam2gripper: cv2.typing.MatLike | None = ..., t_cam2gripper: cv2.typing.MatLike | None = ..., method: HandEyeCalibrationMethod = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def calibrateHandEye(R_gripper2base: _typing.Sequence[UMat], t_gripper2base: _typing.Sequence[UMat], R_target2cam: _typing.Sequence[UMat], t_target2cam: _typing.Sequence[UMat], R_cam2gripper: UMat | None = ..., t_cam2gripper: UMat | None = ..., method: HandEyeCalibrationMethod = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def calibrateRobotWorldHandEye(R_world2cam: _typing.Sequence[cv2.typing.MatLike], t_world2cam: _typing.Sequence[cv2.typing.MatLike], R_base2gripper: _typing.Sequence[cv2.typing.MatLike], t_base2gripper: _typing.Sequence[cv2.typing.MatLike], R_base2world: cv2.typing.MatLike | None = ..., t_base2world: cv2.typing.MatLike | None = ..., R_gripper2cam: cv2.typing.MatLike | None = ..., t_gripper2cam: cv2.typing.MatLike | None = ..., method: RobotWorldHandEyeCalibrationMethod = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def calibrateRobotWorldHandEye(R_world2cam: _typing.Sequence[UMat], t_world2cam: _typing.Sequence[UMat], R_base2gripper: _typing.Sequence[UMat], t_base2gripper: _typing.Sequence[UMat], R_base2world: UMat | None = ..., t_base2world: UMat | None = ..., R_gripper2cam: UMat | None = ..., t_gripper2cam: UMat | None = ..., method: RobotWorldHandEyeCalibrationMethod = ...) -> tuple[UMat, UMat, UMat, UMat]: ... + +@_typing.overload +def calibrationMatrixValues(cameraMatrix: cv2.typing.MatLike, imageSize: cv2.typing.Size, apertureWidth: float, apertureHeight: float) -> tuple[float, float, float, cv2.typing.Point2d, float]: ... +@_typing.overload +def calibrationMatrixValues(cameraMatrix: UMat, imageSize: cv2.typing.Size, apertureWidth: float, apertureHeight: float) -> tuple[float, float, float, cv2.typing.Point2d, float]: ... + +@_typing.overload +def cartToPolar(x: cv2.typing.MatLike, y: cv2.typing.MatLike, magnitude: cv2.typing.MatLike | None = ..., angle: cv2.typing.MatLike | None = ..., angleInDegrees: bool = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def cartToPolar(x: UMat, y: UMat, magnitude: UMat | None = ..., angle: UMat | None = ..., angleInDegrees: bool = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def checkChessboard(img: cv2.typing.MatLike, size: cv2.typing.Size) -> bool: ... +@_typing.overload +def checkChessboard(img: UMat, size: cv2.typing.Size) -> bool: ... + +def checkHardwareSupport(feature: int) -> bool: ... + +@_typing.overload +def checkRange(a: cv2.typing.MatLike, quiet: bool = ..., minVal: float = ..., maxVal: float = ...) -> tuple[bool, cv2.typing.Point]: ... +@_typing.overload +def checkRange(a: UMat, quiet: bool = ..., minVal: float = ..., maxVal: float = ...) -> tuple[bool, cv2.typing.Point]: ... + +@_typing.overload +def circle(img: cv2.typing.MatLike, center: cv2.typing.Point, radius: int, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def circle(img: UMat, center: cv2.typing.Point, radius: int, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> UMat: ... + +def clipLine(imgRect: cv2.typing.Rect, pt1: cv2.typing.Point, pt2: cv2.typing.Point) -> tuple[bool, cv2.typing.Point, cv2.typing.Point]: ... + +@_typing.overload +def colorChange(src: cv2.typing.MatLike, mask: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., red_mul: float = ..., green_mul: float = ..., blue_mul: float = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def colorChange(src: UMat, mask: UMat, dst: UMat | None = ..., red_mul: float = ..., green_mul: float = ..., blue_mul: float = ...) -> UMat: ... + +@_typing.overload +def compare(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, cmpop: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def compare(src1: UMat, src2: UMat, cmpop: int, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def compareHist(H1: cv2.typing.MatLike, H2: cv2.typing.MatLike, method: int) -> float: ... +@_typing.overload +def compareHist(H1: UMat, H2: UMat, method: int) -> float: ... + +@_typing.overload +def completeSymm(m: cv2.typing.MatLike, lowerToUpper: bool = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def completeSymm(m: UMat, lowerToUpper: bool = ...) -> UMat: ... + +@_typing.overload +def composeRT(rvec1: cv2.typing.MatLike, tvec1: cv2.typing.MatLike, rvec2: cv2.typing.MatLike, tvec2: cv2.typing.MatLike, rvec3: cv2.typing.MatLike | None = ..., tvec3: cv2.typing.MatLike | None = ..., dr3dr1: cv2.typing.MatLike | None = ..., dr3dt1: cv2.typing.MatLike | None = ..., dr3dr2: cv2.typing.MatLike | None = ..., dr3dt2: cv2.typing.MatLike | None = ..., dt3dr1: cv2.typing.MatLike | None = ..., dt3dt1: cv2.typing.MatLike | None = ..., dt3dr2: cv2.typing.MatLike | None = ..., dt3dt2: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def composeRT(rvec1: UMat, tvec1: UMat, rvec2: UMat, tvec2: UMat, rvec3: UMat | None = ..., tvec3: UMat | None = ..., dr3dr1: UMat | None = ..., dr3dt1: UMat | None = ..., dr3dr2: UMat | None = ..., dr3dt2: UMat | None = ..., dt3dr1: UMat | None = ..., dt3dt1: UMat | None = ..., dt3dr2: UMat | None = ..., dt3dt2: UMat | None = ...) -> tuple[UMat, UMat, UMat, UMat, UMat, UMat, UMat, UMat, UMat, UMat]: ... + +@_typing.overload +def computeCorrespondEpilines(points: cv2.typing.MatLike, whichImage: int, F: cv2.typing.MatLike, lines: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def computeCorrespondEpilines(points: UMat, whichImage: int, F: UMat, lines: UMat | None = ...) -> UMat: ... + +@_typing.overload +def computeECC(templateImage: cv2.typing.MatLike, inputImage: cv2.typing.MatLike, inputMask: cv2.typing.MatLike | None = ...) -> float: ... +@_typing.overload +def computeECC(templateImage: UMat, inputImage: UMat, inputMask: UMat | None = ...) -> float: ... + +@_typing.overload +def connectedComponents(image: cv2.typing.MatLike, labels: cv2.typing.MatLike | None = ..., connectivity: int = ..., ltype: int = ...) -> tuple[int, cv2.typing.MatLike]: ... +@_typing.overload +def connectedComponents(image: UMat, labels: UMat | None = ..., connectivity: int = ..., ltype: int = ...) -> tuple[int, UMat]: ... + +@_typing.overload +def connectedComponentsWithAlgorithm(image: cv2.typing.MatLike, connectivity: int, ltype: int, ccltype: int, labels: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ... +@_typing.overload +def connectedComponentsWithAlgorithm(image: UMat, connectivity: int, ltype: int, ccltype: int, labels: UMat | None = ...) -> tuple[int, UMat]: ... + +@_typing.overload +def connectedComponentsWithStats(image: cv2.typing.MatLike, labels: cv2.typing.MatLike | None = ..., stats: cv2.typing.MatLike | None = ..., centroids: cv2.typing.MatLike | None = ..., connectivity: int = ..., ltype: int = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def connectedComponentsWithStats(image: UMat, labels: UMat | None = ..., stats: UMat | None = ..., centroids: UMat | None = ..., connectivity: int = ..., ltype: int = ...) -> tuple[int, UMat, UMat, UMat]: ... + +@_typing.overload +def connectedComponentsWithStatsWithAlgorithm(image: cv2.typing.MatLike, connectivity: int, ltype: int, ccltype: int, labels: cv2.typing.MatLike | None = ..., stats: cv2.typing.MatLike | None = ..., centroids: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def connectedComponentsWithStatsWithAlgorithm(image: UMat, connectivity: int, ltype: int, ccltype: int, labels: UMat | None = ..., stats: UMat | None = ..., centroids: UMat | None = ...) -> tuple[int, UMat, UMat, UMat]: ... + +@_typing.overload +def contourArea(contour: cv2.typing.MatLike, oriented: bool = ...) -> float: ... +@_typing.overload +def contourArea(contour: UMat, oriented: bool = ...) -> float: ... + +@_typing.overload +def convertFp16(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def convertFp16(src: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def convertMaps(map1: cv2.typing.MatLike, map2: cv2.typing.MatLike, dstmap1type: int, dstmap1: cv2.typing.MatLike | None = ..., dstmap2: cv2.typing.MatLike | None = ..., nninterpolation: bool = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def convertMaps(map1: UMat, map2: UMat, dstmap1type: int, dstmap1: UMat | None = ..., dstmap2: UMat | None = ..., nninterpolation: bool = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def convertPointsFromHomogeneous(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def convertPointsFromHomogeneous(src: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def convertPointsToHomogeneous(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def convertPointsToHomogeneous(src: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def convertScaleAbs(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., alpha: float = ..., beta: float = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def convertScaleAbs(src: UMat, dst: UMat | None = ..., alpha: float = ..., beta: float = ...) -> UMat: ... + +@_typing.overload +def convexHull(points: cv2.typing.MatLike, hull: cv2.typing.MatLike | None = ..., clockwise: bool = ..., returnPoints: bool = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def convexHull(points: UMat, hull: UMat | None = ..., clockwise: bool = ..., returnPoints: bool = ...) -> UMat: ... + +@_typing.overload +def convexityDefects(contour: cv2.typing.MatLike, convexhull: cv2.typing.MatLike, convexityDefects: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def convexityDefects(contour: UMat, convexhull: UMat, convexityDefects: UMat | None = ...) -> UMat: ... + +@_typing.overload +def copyMakeBorder(src: cv2.typing.MatLike, top: int, bottom: int, left: int, right: int, borderType: int, dst: cv2.typing.MatLike | None = ..., value: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def copyMakeBorder(src: UMat, top: int, bottom: int, left: int, right: int, borderType: int, dst: UMat | None = ..., value: cv2.typing.Scalar = ...) -> UMat: ... + +@_typing.overload +def copyTo(src: cv2.typing.MatLike, mask: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def copyTo(src: UMat, mask: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def cornerEigenValsAndVecs(src: cv2.typing.MatLike, blockSize: int, ksize: int, dst: cv2.typing.MatLike | None = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def cornerEigenValsAndVecs(src: UMat, blockSize: int, ksize: int, dst: UMat | None = ..., borderType: int = ...) -> UMat: ... + +@_typing.overload +def cornerHarris(src: cv2.typing.MatLike, blockSize: int, ksize: int, k: float, dst: cv2.typing.MatLike | None = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def cornerHarris(src: UMat, blockSize: int, ksize: int, k: float, dst: UMat | None = ..., borderType: int = ...) -> UMat: ... + +@_typing.overload +def cornerMinEigenVal(src: cv2.typing.MatLike, blockSize: int, dst: cv2.typing.MatLike | None = ..., ksize: int = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def cornerMinEigenVal(src: UMat, blockSize: int, dst: UMat | None = ..., ksize: int = ..., borderType: int = ...) -> UMat: ... + +@_typing.overload +def cornerSubPix(image: cv2.typing.MatLike, corners: cv2.typing.MatLike, winSize: cv2.typing.Size, zeroZone: cv2.typing.Size, criteria: cv2.typing.TermCriteria) -> cv2.typing.MatLike: ... +@_typing.overload +def cornerSubPix(image: UMat, corners: UMat, winSize: cv2.typing.Size, zeroZone: cv2.typing.Size, criteria: cv2.typing.TermCriteria) -> UMat: ... + +@_typing.overload +def correctMatches(F: cv2.typing.MatLike, points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, newPoints1: cv2.typing.MatLike | None = ..., newPoints2: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def correctMatches(F: UMat, points1: UMat, points2: UMat, newPoints1: UMat | None = ..., newPoints2: UMat | None = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def countNonZero(src: cv2.typing.MatLike) -> int: ... +@_typing.overload +def countNonZero(src: UMat) -> int: ... + +def createAlignMTB(max_bits: int = ..., exclude_range: int = ..., cut: bool = ...) -> AlignMTB: ... + +def createBackgroundSubtractorKNN(history: int = ..., dist2Threshold: float = ..., detectShadows: bool = ...) -> BackgroundSubtractorKNN: ... + +def createBackgroundSubtractorMOG2(history: int = ..., varThreshold: float = ..., detectShadows: bool = ...) -> BackgroundSubtractorMOG2: ... + +def createCLAHE(clipLimit: float = ..., tileGridSize: cv2.typing.Size = ...) -> CLAHE: ... + +def createCalibrateDebevec(samples: int = ..., lambda_: float = ..., random: bool = ...) -> CalibrateDebevec: ... + +def createCalibrateRobertson(max_iter: int = ..., threshold: float = ...) -> CalibrateRobertson: ... + +def createGeneralizedHoughBallard() -> GeneralizedHoughBallard: ... + +def createGeneralizedHoughGuil() -> GeneralizedHoughGuil: ... + +@_typing.overload +def createHanningWindow(winSize: cv2.typing.Size, type: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def createHanningWindow(winSize: cv2.typing.Size, type: int, dst: UMat | None = ...) -> UMat: ... + +def createLineSegmentDetector(refine: int = ..., scale: float = ..., sigma_scale: float = ..., quant: float = ..., ang_th: float = ..., log_eps: float = ..., density_th: float = ..., n_bins: int = ...) -> LineSegmentDetector: ... + +def createMergeDebevec() -> MergeDebevec: ... + +def createMergeMertens(contrast_weight: float = ..., saturation_weight: float = ..., exposure_weight: float = ...) -> MergeMertens: ... + +def createMergeRobertson() -> MergeRobertson: ... + +def createTonemap(gamma: float = ...) -> Tonemap: ... + +def createTonemapDrago(gamma: float = ..., saturation: float = ..., bias: float = ...) -> TonemapDrago: ... + +def createTonemapMantiuk(gamma: float = ..., scale: float = ..., saturation: float = ...) -> TonemapMantiuk: ... + +def createTonemapReinhard(gamma: float = ..., intensity: float = ..., light_adapt: float = ..., color_adapt: float = ...) -> TonemapReinhard: ... + +def cubeRoot(val: float) -> float: ... + +def currentUIFramework() -> str: ... + +@_typing.overload +def cvtColor(src: cv2.typing.MatLike, code: int, dst: cv2.typing.MatLike | None = ..., dstCn: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def cvtColor(src: UMat, code: int, dst: UMat | None = ..., dstCn: int = ...) -> UMat: ... + +@_typing.overload +def cvtColorTwoPlane(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, code: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def cvtColorTwoPlane(src1: UMat, src2: UMat, code: int, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def dct(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def dct(src: UMat, dst: UMat | None = ..., flags: int = ...) -> UMat: ... + +@_typing.overload +def decolor(src: cv2.typing.MatLike, grayscale: cv2.typing.MatLike | None = ..., color_boost: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def decolor(src: UMat, grayscale: UMat | None = ..., color_boost: UMat | None = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def decomposeEssentialMat(E: cv2.typing.MatLike, R1: cv2.typing.MatLike | None = ..., R2: cv2.typing.MatLike | None = ..., t: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def decomposeEssentialMat(E: UMat, R1: UMat | None = ..., R2: UMat | None = ..., t: UMat | None = ...) -> tuple[UMat, UMat, UMat]: ... + +@_typing.overload +def decomposeHomographyMat(H: cv2.typing.MatLike, K: cv2.typing.MatLike, rotations: _typing.Sequence[cv2.typing.MatLike] | None = ..., translations: _typing.Sequence[cv2.typing.MatLike] | None = ..., normals: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[int, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike]]: ... +@_typing.overload +def decomposeHomographyMat(H: UMat, K: UMat, rotations: _typing.Sequence[UMat] | None = ..., translations: _typing.Sequence[UMat] | None = ..., normals: _typing.Sequence[UMat] | None = ...) -> tuple[int, _typing.Sequence[UMat], _typing.Sequence[UMat], _typing.Sequence[UMat]]: ... + +@_typing.overload +def decomposeProjectionMatrix(projMatrix: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike | None = ..., rotMatrix: cv2.typing.MatLike | None = ..., transVect: cv2.typing.MatLike | None = ..., rotMatrixX: cv2.typing.MatLike | None = ..., rotMatrixY: cv2.typing.MatLike | None = ..., rotMatrixZ: cv2.typing.MatLike | None = ..., eulerAngles: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def decomposeProjectionMatrix(projMatrix: UMat, cameraMatrix: UMat | None = ..., rotMatrix: UMat | None = ..., transVect: UMat | None = ..., rotMatrixX: UMat | None = ..., rotMatrixY: UMat | None = ..., rotMatrixZ: UMat | None = ..., eulerAngles: UMat | None = ...) -> tuple[UMat, UMat, UMat, UMat, UMat, UMat, UMat]: ... + +@_typing.overload +def demosaicing(src: cv2.typing.MatLike, code: int, dst: cv2.typing.MatLike | None = ..., dstCn: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def demosaicing(src: UMat, code: int, dst: UMat | None = ..., dstCn: int = ...) -> UMat: ... + +def denoise_TVL1(observations: _typing.Sequence[cv2.typing.MatLike], result: cv2.typing.MatLike, lambda_: float = ..., niters: int = ...) -> None: ... + +def destroyAllWindows() -> None: ... + +def destroyWindow(winname: str) -> None: ... + +@_typing.overload +def detailEnhance(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., sigma_s: float = ..., sigma_r: float = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def detailEnhance(src: UMat, dst: UMat | None = ..., sigma_s: float = ..., sigma_r: float = ...) -> UMat: ... + +@_typing.overload +def determinant(mtx: cv2.typing.MatLike) -> float: ... +@_typing.overload +def determinant(mtx: UMat) -> float: ... + +@_typing.overload +def dft(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ..., nonzeroRows: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def dft(src: UMat, dst: UMat | None = ..., flags: int = ..., nonzeroRows: int = ...) -> UMat: ... + +@_typing.overload +def dilate(src: cv2.typing.MatLike, kernel: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def dilate(src: UMat, kernel: UMat, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> UMat: ... + +def displayOverlay(winname: str, text: str, delayms: int = ...) -> None: ... + +def displayStatusBar(winname: str, text: str, delayms: int = ...) -> None: ... + +@_typing.overload +def distanceTransform(src: cv2.typing.MatLike, distanceType: int, maskSize: int, dst: cv2.typing.MatLike | None = ..., dstType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def distanceTransform(src: UMat, distanceType: int, maskSize: int, dst: UMat | None = ..., dstType: int = ...) -> UMat: ... + +@_typing.overload +def distanceTransformWithLabels(src: cv2.typing.MatLike, distanceType: int, maskSize: int, dst: cv2.typing.MatLike | None = ..., labels: cv2.typing.MatLike | None = ..., labelType: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def distanceTransformWithLabels(src: UMat, distanceType: int, maskSize: int, dst: UMat | None = ..., labels: UMat | None = ..., labelType: int = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def divSpectrums(a: cv2.typing.MatLike, b: cv2.typing.MatLike, flags: int, c: cv2.typing.MatLike | None = ..., conjB: bool = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def divSpectrums(a: UMat, b: UMat, flags: int, c: UMat | None = ..., conjB: bool = ...) -> UMat: ... + +@_typing.overload +def divide(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., scale: float = ..., dtype: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def divide(src1: UMat, src2: UMat, dst: UMat | None = ..., scale: float = ..., dtype: int = ...) -> UMat: ... +@_typing.overload +def divide(scale: float, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., dtype: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def divide(scale: float, src2: UMat, dst: UMat | None = ..., dtype: int = ...) -> UMat: ... + +@_typing.overload +def drawChessboardCorners(image: cv2.typing.MatLike, patternSize: cv2.typing.Size, corners: cv2.typing.MatLike, patternWasFound: bool) -> cv2.typing.MatLike: ... +@_typing.overload +def drawChessboardCorners(image: UMat, patternSize: cv2.typing.Size, corners: UMat, patternWasFound: bool) -> UMat: ... + +@_typing.overload +def drawContours(image: cv2.typing.MatLike, contours: _typing.Sequence[cv2.typing.MatLike], contourIdx: int, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., hierarchy: cv2.typing.MatLike | None = ..., maxLevel: int = ..., offset: cv2.typing.Point = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def drawContours(image: UMat, contours: _typing.Sequence[UMat], contourIdx: int, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., hierarchy: UMat | None = ..., maxLevel: int = ..., offset: cv2.typing.Point = ...) -> UMat: ... + +@_typing.overload +def drawFrameAxes(image: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike, tvec: cv2.typing.MatLike, length: float, thickness: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def drawFrameAxes(image: UMat, cameraMatrix: UMat, distCoeffs: UMat, rvec: UMat, tvec: UMat, length: float, thickness: int = ...) -> UMat: ... + +@_typing.overload +def drawKeypoints(image: cv2.typing.MatLike, keypoints: _typing.Sequence[KeyPoint], outImage: cv2.typing.MatLike, color: cv2.typing.Scalar = ..., flags: DrawMatchesFlags = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def drawKeypoints(image: UMat, keypoints: _typing.Sequence[KeyPoint], outImage: UMat, color: cv2.typing.Scalar = ..., flags: DrawMatchesFlags = ...) -> UMat: ... + +@_typing.overload +def drawMarker(img: cv2.typing.MatLike, position: cv2.typing.Point, color: cv2.typing.Scalar, markerType: int = ..., markerSize: int = ..., thickness: int = ..., line_type: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def drawMarker(img: UMat, position: cv2.typing.Point, color: cv2.typing.Scalar, markerType: int = ..., markerSize: int = ..., thickness: int = ..., line_type: int = ...) -> UMat: ... + +@_typing.overload +def drawMatches(img1: cv2.typing.MatLike, keypoints1: _typing.Sequence[KeyPoint], img2: cv2.typing.MatLike, keypoints2: _typing.Sequence[KeyPoint], matches1to2: _typing.Sequence[DMatch], outImg: cv2.typing.MatLike, matchColor: cv2.typing.Scalar = ..., singlePointColor: cv2.typing.Scalar = ..., matchesMask: _typing.Sequence[str] = ..., flags: DrawMatchesFlags = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def drawMatches(img1: UMat, keypoints1: _typing.Sequence[KeyPoint], img2: UMat, keypoints2: _typing.Sequence[KeyPoint], matches1to2: _typing.Sequence[DMatch], outImg: UMat, matchColor: cv2.typing.Scalar = ..., singlePointColor: cv2.typing.Scalar = ..., matchesMask: _typing.Sequence[str] = ..., flags: DrawMatchesFlags = ...) -> UMat: ... +@_typing.overload +def drawMatches(img1: cv2.typing.MatLike, keypoints1: _typing.Sequence[KeyPoint], img2: cv2.typing.MatLike, keypoints2: _typing.Sequence[KeyPoint], matches1to2: _typing.Sequence[DMatch], outImg: cv2.typing.MatLike, matchesThickness: int, matchColor: cv2.typing.Scalar = ..., singlePointColor: cv2.typing.Scalar = ..., matchesMask: _typing.Sequence[str] = ..., flags: DrawMatchesFlags = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def drawMatches(img1: UMat, keypoints1: _typing.Sequence[KeyPoint], img2: UMat, keypoints2: _typing.Sequence[KeyPoint], matches1to2: _typing.Sequence[DMatch], outImg: UMat, matchesThickness: int, matchColor: cv2.typing.Scalar = ..., singlePointColor: cv2.typing.Scalar = ..., matchesMask: _typing.Sequence[str] = ..., flags: DrawMatchesFlags = ...) -> UMat: ... + +@_typing.overload +def drawMatchesKnn(img1: cv2.typing.MatLike, keypoints1: _typing.Sequence[KeyPoint], img2: cv2.typing.MatLike, keypoints2: _typing.Sequence[KeyPoint], matches1to2: _typing.Sequence[_typing.Sequence[DMatch]], outImg: cv2.typing.MatLike, matchColor: cv2.typing.Scalar = ..., singlePointColor: cv2.typing.Scalar = ..., matchesMask: _typing.Sequence[_typing.Sequence[str]] = ..., flags: DrawMatchesFlags = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def drawMatchesKnn(img1: UMat, keypoints1: _typing.Sequence[KeyPoint], img2: UMat, keypoints2: _typing.Sequence[KeyPoint], matches1to2: _typing.Sequence[_typing.Sequence[DMatch]], outImg: UMat, matchColor: cv2.typing.Scalar = ..., singlePointColor: cv2.typing.Scalar = ..., matchesMask: _typing.Sequence[_typing.Sequence[str]] = ..., flags: DrawMatchesFlags = ...) -> UMat: ... + +@_typing.overload +def edgePreservingFilter(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ..., sigma_s: float = ..., sigma_r: float = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def edgePreservingFilter(src: UMat, dst: UMat | None = ..., flags: int = ..., sigma_s: float = ..., sigma_r: float = ...) -> UMat: ... + +@_typing.overload +def eigen(src: cv2.typing.MatLike, eigenvalues: cv2.typing.MatLike | None = ..., eigenvectors: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def eigen(src: UMat, eigenvalues: UMat | None = ..., eigenvectors: UMat | None = ...) -> tuple[bool, UMat, UMat]: ... + +@_typing.overload +def eigenNonSymmetric(src: cv2.typing.MatLike, eigenvalues: cv2.typing.MatLike | None = ..., eigenvectors: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def eigenNonSymmetric(src: UMat, eigenvalues: UMat | None = ..., eigenvectors: UMat | None = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def ellipse(img: cv2.typing.MatLike, center: cv2.typing.Point, axes: cv2.typing.Size, angle: float, startAngle: float, endAngle: float, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def ellipse(img: UMat, center: cv2.typing.Point, axes: cv2.typing.Size, angle: float, startAngle: float, endAngle: float, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> UMat: ... +@_typing.overload +def ellipse(img: cv2.typing.MatLike, box: cv2.typing.RotatedRect, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def ellipse(img: UMat, box: cv2.typing.RotatedRect, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ...) -> UMat: ... + +def ellipse2Poly(center: cv2.typing.Point, axes: cv2.typing.Size, angle: int, arcStart: int, arcEnd: int, delta: int) -> _typing.Sequence[cv2.typing.Point]: ... + +def empty_array_desc() -> GArrayDesc: ... + +def empty_gopaque_desc() -> GOpaqueDesc: ... + +def empty_scalar_desc() -> GScalarDesc: ... + +@_typing.overload +def equalizeHist(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def equalizeHist(src: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def erode(src: cv2.typing.MatLike, kernel: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def erode(src: UMat, kernel: UMat, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> UMat: ... + +@_typing.overload +def estimateAffine2D(from_: cv2.typing.MatLike, to: cv2.typing.MatLike, inliers: cv2.typing.MatLike | None = ..., method: int = ..., ransacReprojThreshold: float = ..., maxIters: int = ..., confidence: float = ..., refineIters: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def estimateAffine2D(from_: UMat, to: UMat, inliers: UMat | None = ..., method: int = ..., ransacReprojThreshold: float = ..., maxIters: int = ..., confidence: float = ..., refineIters: int = ...) -> tuple[cv2.typing.MatLike, UMat]: ... +@_typing.overload +def estimateAffine2D(pts1: cv2.typing.MatLike, pts2: cv2.typing.MatLike, params: UsacParams, inliers: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def estimateAffine2D(pts1: UMat, pts2: UMat, params: UsacParams, inliers: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... + +@_typing.overload +def estimateAffine3D(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, out: cv2.typing.MatLike | None = ..., inliers: cv2.typing.MatLike | None = ..., ransacThreshold: float = ..., confidence: float = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def estimateAffine3D(src: UMat, dst: UMat, out: UMat | None = ..., inliers: UMat | None = ..., ransacThreshold: float = ..., confidence: float = ...) -> tuple[int, UMat, UMat]: ... +@_typing.overload +def estimateAffine3D(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, force_rotation: bool = ...) -> tuple[cv2.typing.MatLike, float]: ... +@_typing.overload +def estimateAffine3D(src: UMat, dst: UMat, force_rotation: bool = ...) -> tuple[cv2.typing.MatLike, float]: ... + +@_typing.overload +def estimateAffinePartial2D(from_: cv2.typing.MatLike, to: cv2.typing.MatLike, inliers: cv2.typing.MatLike | None = ..., method: int = ..., ransacReprojThreshold: float = ..., maxIters: int = ..., confidence: float = ..., refineIters: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def estimateAffinePartial2D(from_: UMat, to: UMat, inliers: UMat | None = ..., method: int = ..., ransacReprojThreshold: float = ..., maxIters: int = ..., confidence: float = ..., refineIters: int = ...) -> tuple[cv2.typing.MatLike, UMat]: ... + +@_typing.overload +def estimateChessboardSharpness(image: cv2.typing.MatLike, patternSize: cv2.typing.Size, corners: cv2.typing.MatLike, rise_distance: float = ..., vertical: bool = ..., sharpness: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.Scalar, cv2.typing.MatLike]: ... +@_typing.overload +def estimateChessboardSharpness(image: UMat, patternSize: cv2.typing.Size, corners: UMat, rise_distance: float = ..., vertical: bool = ..., sharpness: UMat | None = ...) -> tuple[cv2.typing.Scalar, UMat]: ... + +@_typing.overload +def estimateTranslation3D(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, out: cv2.typing.MatLike | None = ..., inliers: cv2.typing.MatLike | None = ..., ransacThreshold: float = ..., confidence: float = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def estimateTranslation3D(src: UMat, dst: UMat, out: UMat | None = ..., inliers: UMat | None = ..., ransacThreshold: float = ..., confidence: float = ...) -> tuple[int, UMat, UMat]: ... + +@_typing.overload +def exp(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def exp(src: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def extractChannel(src: cv2.typing.MatLike, coi: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def extractChannel(src: UMat, coi: int, dst: UMat | None = ...) -> UMat: ... + +def fastAtan2(y: float, x: float) -> float: ... + +@_typing.overload +def fastNlMeansDenoising(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., h: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def fastNlMeansDenoising(src: UMat, dst: UMat | None = ..., h: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> UMat: ... +@_typing.overload +def fastNlMeansDenoising(src: cv2.typing.MatLike, h: _typing.Sequence[float], dst: cv2.typing.MatLike | None = ..., templateWindowSize: int = ..., searchWindowSize: int = ..., normType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def fastNlMeansDenoising(src: UMat, h: _typing.Sequence[float], dst: UMat | None = ..., templateWindowSize: int = ..., searchWindowSize: int = ..., normType: int = ...) -> UMat: ... + +@_typing.overload +def fastNlMeansDenoisingColored(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., h: float = ..., hColor: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def fastNlMeansDenoisingColored(src: UMat, dst: UMat | None = ..., h: float = ..., hColor: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> UMat: ... + +@_typing.overload +def fastNlMeansDenoisingColoredMulti(srcImgs: _typing.Sequence[cv2.typing.MatLike], imgToDenoiseIndex: int, temporalWindowSize: int, dst: cv2.typing.MatLike | None = ..., h: float = ..., hColor: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def fastNlMeansDenoisingColoredMulti(srcImgs: _typing.Sequence[UMat], imgToDenoiseIndex: int, temporalWindowSize: int, dst: UMat | None = ..., h: float = ..., hColor: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> UMat: ... + +@_typing.overload +def fastNlMeansDenoisingMulti(srcImgs: _typing.Sequence[cv2.typing.MatLike], imgToDenoiseIndex: int, temporalWindowSize: int, dst: cv2.typing.MatLike | None = ..., h: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def fastNlMeansDenoisingMulti(srcImgs: _typing.Sequence[UMat], imgToDenoiseIndex: int, temporalWindowSize: int, dst: UMat | None = ..., h: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> UMat: ... +@_typing.overload +def fastNlMeansDenoisingMulti(srcImgs: _typing.Sequence[cv2.typing.MatLike], imgToDenoiseIndex: int, temporalWindowSize: int, h: _typing.Sequence[float], dst: cv2.typing.MatLike | None = ..., templateWindowSize: int = ..., searchWindowSize: int = ..., normType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def fastNlMeansDenoisingMulti(srcImgs: _typing.Sequence[UMat], imgToDenoiseIndex: int, temporalWindowSize: int, h: _typing.Sequence[float], dst: UMat | None = ..., templateWindowSize: int = ..., searchWindowSize: int = ..., normType: int = ...) -> UMat: ... + +@_typing.overload +def fillConvexPoly(img: cv2.typing.MatLike, points: cv2.typing.MatLike, color: cv2.typing.Scalar, lineType: int = ..., shift: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def fillConvexPoly(img: UMat, points: UMat, color: cv2.typing.Scalar, lineType: int = ..., shift: int = ...) -> UMat: ... + +@_typing.overload +def fillPoly(img: cv2.typing.MatLike, pts: _typing.Sequence[cv2.typing.MatLike], color: cv2.typing.Scalar, lineType: int = ..., shift: int = ..., offset: cv2.typing.Point = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def fillPoly(img: UMat, pts: _typing.Sequence[UMat], color: cv2.typing.Scalar, lineType: int = ..., shift: int = ..., offset: cv2.typing.Point = ...) -> UMat: ... + +@_typing.overload +def filter2D(src: cv2.typing.MatLike, ddepth: int, kernel: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., delta: float = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def filter2D(src: UMat, ddepth: int, kernel: UMat, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., delta: float = ..., borderType: int = ...) -> UMat: ... + +@_typing.overload +def filterHomographyDecompByVisibleRefpoints(rotations: _typing.Sequence[cv2.typing.MatLike], normals: _typing.Sequence[cv2.typing.MatLike], beforePoints: cv2.typing.MatLike, afterPoints: cv2.typing.MatLike, possibleSolutions: cv2.typing.MatLike | None = ..., pointsMask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def filterHomographyDecompByVisibleRefpoints(rotations: _typing.Sequence[UMat], normals: _typing.Sequence[UMat], beforePoints: UMat, afterPoints: UMat, possibleSolutions: UMat | None = ..., pointsMask: UMat | None = ...) -> UMat: ... + +@_typing.overload +def filterSpeckles(img: cv2.typing.MatLike, newVal: float, maxSpeckleSize: int, maxDiff: float, buf: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def filterSpeckles(img: UMat, newVal: float, maxSpeckleSize: int, maxDiff: float, buf: UMat | None = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def find4QuadCornerSubpix(img: cv2.typing.MatLike, corners: cv2.typing.MatLike, region_size: cv2.typing.Size) -> tuple[bool, cv2.typing.MatLike]: ... +@_typing.overload +def find4QuadCornerSubpix(img: UMat, corners: UMat, region_size: cv2.typing.Size) -> tuple[bool, UMat]: ... + +@_typing.overload +def findChessboardCorners(image: cv2.typing.MatLike, patternSize: cv2.typing.Size, corners: cv2.typing.MatLike | None = ..., flags: int = ...) -> tuple[bool, cv2.typing.MatLike]: ... +@_typing.overload +def findChessboardCorners(image: UMat, patternSize: cv2.typing.Size, corners: UMat | None = ..., flags: int = ...) -> tuple[bool, UMat]: ... + +@_typing.overload +def findChessboardCornersSB(image: cv2.typing.MatLike, patternSize: cv2.typing.Size, corners: cv2.typing.MatLike | None = ..., flags: int = ...) -> tuple[bool, cv2.typing.MatLike]: ... +@_typing.overload +def findChessboardCornersSB(image: UMat, patternSize: cv2.typing.Size, corners: UMat | None = ..., flags: int = ...) -> tuple[bool, UMat]: ... + +@_typing.overload +def findChessboardCornersSBWithMeta(image: cv2.typing.MatLike, patternSize: cv2.typing.Size, flags: int, corners: cv2.typing.MatLike | None = ..., meta: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def findChessboardCornersSBWithMeta(image: UMat, patternSize: cv2.typing.Size, flags: int, corners: UMat | None = ..., meta: UMat | None = ...) -> tuple[bool, UMat, UMat]: ... + +@_typing.overload +def findCirclesGrid(image: cv2.typing.MatLike, patternSize: cv2.typing.Size, flags: int, blobDetector: cv2.typing.FeatureDetector, parameters: CirclesGridFinderParameters, centers: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ... +@_typing.overload +def findCirclesGrid(image: UMat, patternSize: cv2.typing.Size, flags: int, blobDetector: cv2.typing.FeatureDetector, parameters: CirclesGridFinderParameters, centers: UMat | None = ...) -> tuple[bool, UMat]: ... +@_typing.overload +def findCirclesGrid(image: cv2.typing.MatLike, patternSize: cv2.typing.Size, centers: cv2.typing.MatLike | None = ..., flags: int = ..., blobDetector: cv2.typing.FeatureDetector = ...) -> tuple[bool, cv2.typing.MatLike]: ... +@_typing.overload +def findCirclesGrid(image: UMat, patternSize: cv2.typing.Size, centers: UMat | None = ..., flags: int = ..., blobDetector: cv2.typing.FeatureDetector = ...) -> tuple[bool, UMat]: ... + +@_typing.overload +def findContours(image: cv2.typing.MatLike, mode: int, method: int, contours: _typing.Sequence[cv2.typing.MatLike] | None = ..., hierarchy: cv2.typing.MatLike | None = ..., offset: cv2.typing.Point = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ... +@_typing.overload +def findContours(image: UMat, mode: int, method: int, contours: _typing.Sequence[UMat] | None = ..., hierarchy: UMat | None = ..., offset: cv2.typing.Point = ...) -> tuple[_typing.Sequence[UMat], UMat]: ... + +@_typing.overload +def findContoursLinkRuns(image: cv2.typing.MatLike, contours: _typing.Sequence[cv2.typing.MatLike] | None = ..., hierarchy: cv2.typing.MatLike | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ... +@_typing.overload +def findContoursLinkRuns(image: UMat, contours: _typing.Sequence[UMat] | None = ..., hierarchy: UMat | None = ...) -> tuple[_typing.Sequence[UMat], UMat]: ... +@_typing.overload +def findContoursLinkRuns(image: cv2.typing.MatLike, contours: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ... +@_typing.overload +def findContoursLinkRuns(image: UMat, contours: _typing.Sequence[UMat] | None = ...) -> _typing.Sequence[UMat]: ... + +@_typing.overload +def findEssentialMat(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, method: int = ..., prob: float = ..., threshold: float = ..., maxIters: int = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def findEssentialMat(points1: UMat, points2: UMat, cameraMatrix: UMat, method: int = ..., prob: float = ..., threshold: float = ..., maxIters: int = ..., mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... +@_typing.overload +def findEssentialMat(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, focal: float = ..., pp: cv2.typing.Point2d = ..., method: int = ..., prob: float = ..., threshold: float = ..., maxIters: int = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def findEssentialMat(points1: UMat, points2: UMat, focal: float = ..., pp: cv2.typing.Point2d = ..., method: int = ..., prob: float = ..., threshold: float = ..., maxIters: int = ..., mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... +@_typing.overload +def findEssentialMat(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, cameraMatrix1: cv2.typing.MatLike, distCoeffs1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, distCoeffs2: cv2.typing.MatLike, method: int = ..., prob: float = ..., threshold: float = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def findEssentialMat(points1: UMat, points2: UMat, cameraMatrix1: UMat, distCoeffs1: UMat, cameraMatrix2: UMat, distCoeffs2: UMat, method: int = ..., prob: float = ..., threshold: float = ..., mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... +@_typing.overload +def findEssentialMat(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, cameraMatrix1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, dist_coeff1: cv2.typing.MatLike, dist_coeff2: cv2.typing.MatLike, params: UsacParams, mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def findEssentialMat(points1: UMat, points2: UMat, cameraMatrix1: UMat, cameraMatrix2: UMat, dist_coeff1: UMat, dist_coeff2: UMat, params: UsacParams, mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... + +@_typing.overload +def findFundamentalMat(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, method: int, ransacReprojThreshold: float, confidence: float, maxIters: int, mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def findFundamentalMat(points1: UMat, points2: UMat, method: int, ransacReprojThreshold: float, confidence: float, maxIters: int, mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... +@_typing.overload +def findFundamentalMat(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, method: int = ..., ransacReprojThreshold: float = ..., confidence: float = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def findFundamentalMat(points1: UMat, points2: UMat, method: int = ..., ransacReprojThreshold: float = ..., confidence: float = ..., mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... +@_typing.overload +def findFundamentalMat(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, params: UsacParams, mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def findFundamentalMat(points1: UMat, points2: UMat, params: UsacParams, mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... + +@_typing.overload +def findHomography(srcPoints: cv2.typing.MatLike, dstPoints: cv2.typing.MatLike, method: int = ..., ransacReprojThreshold: float = ..., mask: cv2.typing.MatLike | None = ..., maxIters: int = ..., confidence: float = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def findHomography(srcPoints: UMat, dstPoints: UMat, method: int = ..., ransacReprojThreshold: float = ..., mask: UMat | None = ..., maxIters: int = ..., confidence: float = ...) -> tuple[cv2.typing.MatLike, UMat]: ... +@_typing.overload +def findHomography(srcPoints: cv2.typing.MatLike, dstPoints: cv2.typing.MatLike, params: UsacParams, mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def findHomography(srcPoints: UMat, dstPoints: UMat, params: UsacParams, mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... + +@_typing.overload +def findNonZero(src: cv2.typing.MatLike, idx: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def findNonZero(src: UMat, idx: UMat | None = ...) -> UMat: ... + +@_typing.overload +def findTransformECC(templateImage: cv2.typing.MatLike, inputImage: cv2.typing.MatLike, warpMatrix: cv2.typing.MatLike, motionType: int, criteria: cv2.typing.TermCriteria, inputMask: cv2.typing.MatLike, gaussFiltSize: int) -> tuple[float, cv2.typing.MatLike]: ... +@_typing.overload +def findTransformECC(templateImage: UMat, inputImage: UMat, warpMatrix: UMat, motionType: int, criteria: cv2.typing.TermCriteria, inputMask: UMat, gaussFiltSize: int) -> tuple[float, UMat]: ... +@_typing.overload +def findTransformECC(templateImage: cv2.typing.MatLike, inputImage: cv2.typing.MatLike, warpMatrix: cv2.typing.MatLike, motionType: int = ..., criteria: cv2.typing.TermCriteria = ..., inputMask: cv2.typing.MatLike | None = ...) -> tuple[float, cv2.typing.MatLike]: ... +@_typing.overload +def findTransformECC(templateImage: UMat, inputImage: UMat, warpMatrix: UMat, motionType: int = ..., criteria: cv2.typing.TermCriteria = ..., inputMask: UMat | None = ...) -> tuple[float, UMat]: ... + +@_typing.overload +def fitEllipse(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... +@_typing.overload +def fitEllipse(points: UMat) -> cv2.typing.RotatedRect: ... + +@_typing.overload +def fitEllipseAMS(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... +@_typing.overload +def fitEllipseAMS(points: UMat) -> cv2.typing.RotatedRect: ... + +@_typing.overload +def fitEllipseDirect(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... +@_typing.overload +def fitEllipseDirect(points: UMat) -> cv2.typing.RotatedRect: ... + +@_typing.overload +def fitLine(points: cv2.typing.MatLike, distType: int, param: float, reps: float, aeps: float, line: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def fitLine(points: UMat, distType: int, param: float, reps: float, aeps: float, line: UMat | None = ...) -> UMat: ... + +@_typing.overload +def flip(src: cv2.typing.MatLike, flipCode: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def flip(src: UMat, flipCode: int, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def flipND(src: cv2.typing.MatLike, axis: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def flipND(src: UMat, axis: int, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def floodFill(image: cv2.typing.MatLike, mask: cv2.typing.MatLike, seedPoint: cv2.typing.Point, newVal: cv2.typing.Scalar, loDiff: cv2.typing.Scalar = ..., upDiff: cv2.typing.Scalar = ..., flags: int = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.Rect]: ... +@_typing.overload +def floodFill(image: UMat, mask: UMat, seedPoint: cv2.typing.Point, newVal: cv2.typing.Scalar, loDiff: cv2.typing.Scalar = ..., upDiff: cv2.typing.Scalar = ..., flags: int = ...) -> tuple[int, UMat, UMat, cv2.typing.Rect]: ... + +@_typing.overload +def gemm(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, alpha: float, src3: cv2.typing.MatLike, beta: float, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def gemm(src1: UMat, src2: UMat, alpha: float, src3: UMat, beta: float, dst: UMat | None = ..., flags: int = ...) -> UMat: ... + +@_typing.overload +def getAffineTransform(src: cv2.typing.MatLike, dst: cv2.typing.MatLike) -> cv2.typing.MatLike: ... +@_typing.overload +def getAffineTransform(src: UMat, dst: UMat) -> cv2.typing.MatLike: ... + +def getBuildInformation() -> str: ... + +def getCPUFeaturesLine() -> str: ... + +def getCPUTickCount() -> int: ... + +@_typing.overload +def getDefaultNewCameraMatrix(cameraMatrix: cv2.typing.MatLike, imgsize: cv2.typing.Size = ..., centerPrincipalPoint: bool = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def getDefaultNewCameraMatrix(cameraMatrix: UMat, imgsize: cv2.typing.Size = ..., centerPrincipalPoint: bool = ...) -> cv2.typing.MatLike: ... + +@_typing.overload +def getDerivKernels(dx: int, dy: int, ksize: int, kx: cv2.typing.MatLike | None = ..., ky: cv2.typing.MatLike | None = ..., normalize: bool = ..., ktype: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def getDerivKernels(dx: int, dy: int, ksize: int, kx: UMat | None = ..., ky: UMat | None = ..., normalize: bool = ..., ktype: int = ...) -> tuple[UMat, UMat]: ... + +def getFontScaleFromHeight(fontFace: int, pixelHeight: int, thickness: int = ...) -> float: ... + +def getGaborKernel(ksize: cv2.typing.Size, sigma: float, theta: float, lambd: float, gamma: float, psi: float = ..., ktype: int = ...) -> cv2.typing.MatLike: ... + +def getGaussianKernel(ksize: int, sigma: float, ktype: int = ...) -> cv2.typing.MatLike: ... + +def getHardwareFeatureName(feature: int) -> str: ... + +def getLogLevel() -> int: ... + +def getNumThreads() -> int: ... + +def getNumberOfCPUs() -> int: ... + +def getOptimalDFTSize(vecsize: int) -> int: ... + +@_typing.overload +def getOptimalNewCameraMatrix(cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, imageSize: cv2.typing.Size, alpha: float, newImgSize: cv2.typing.Size = ..., centerPrincipalPoint: bool = ...) -> tuple[cv2.typing.MatLike, cv2.typing.Rect]: ... +@_typing.overload +def getOptimalNewCameraMatrix(cameraMatrix: UMat, distCoeffs: UMat, imageSize: cv2.typing.Size, alpha: float, newImgSize: cv2.typing.Size = ..., centerPrincipalPoint: bool = ...) -> tuple[cv2.typing.MatLike, cv2.typing.Rect]: ... + +@_typing.overload +def getPerspectiveTransform(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, solveMethod: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def getPerspectiveTransform(src: UMat, dst: UMat, solveMethod: int = ...) -> cv2.typing.MatLike: ... + +@_typing.overload +def getRectSubPix(image: cv2.typing.MatLike, patchSize: cv2.typing.Size, center: cv2.typing.Point2f, patch: cv2.typing.MatLike | None = ..., patchType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def getRectSubPix(image: UMat, patchSize: cv2.typing.Size, center: cv2.typing.Point2f, patch: UMat | None = ..., patchType: int = ...) -> UMat: ... + +def getRotationMatrix2D(center: cv2.typing.Point2f, angle: float, scale: float) -> cv2.typing.MatLike: ... + +def getStructuringElement(shape: int, ksize: cv2.typing.Size, anchor: cv2.typing.Point = ...) -> cv2.typing.MatLike: ... + +def getTextSize(text: str, fontFace: int, fontScale: float, thickness: int) -> tuple[cv2.typing.Size, int]: ... + +def getThreadNum() -> int: ... + +def getTickCount() -> int: ... + +def getTickFrequency() -> float: ... + +def getTrackbarPos(trackbarname: str, winname: str) -> int: ... + +def getValidDisparityROI(roi1: cv2.typing.Rect, roi2: cv2.typing.Rect, minDisparity: int, numberOfDisparities: int, blockSize: int) -> cv2.typing.Rect: ... + +def getVersionMajor() -> int: ... + +def getVersionMinor() -> int: ... + +def getVersionRevision() -> int: ... + +def getVersionString() -> str: ... + +def getWindowImageRect(winname: str) -> cv2.typing.Rect: ... + +def getWindowProperty(winname: str, prop_id: int) -> float: ... + +@_typing.overload +def goodFeaturesToTrack(image: cv2.typing.MatLike, maxCorners: int, qualityLevel: float, minDistance: float, corners: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ..., blockSize: int = ..., useHarrisDetector: bool = ..., k: float = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def goodFeaturesToTrack(image: UMat, maxCorners: int, qualityLevel: float, minDistance: float, corners: UMat | None = ..., mask: UMat | None = ..., blockSize: int = ..., useHarrisDetector: bool = ..., k: float = ...) -> UMat: ... +@_typing.overload +def goodFeaturesToTrack(image: cv2.typing.MatLike, maxCorners: int, qualityLevel: float, minDistance: float, mask: cv2.typing.MatLike, blockSize: int, gradientSize: int, corners: cv2.typing.MatLike | None = ..., useHarrisDetector: bool = ..., k: float = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def goodFeaturesToTrack(image: UMat, maxCorners: int, qualityLevel: float, minDistance: float, mask: UMat, blockSize: int, gradientSize: int, corners: UMat | None = ..., useHarrisDetector: bool = ..., k: float = ...) -> UMat: ... + +@_typing.overload +def goodFeaturesToTrackWithQuality(image: cv2.typing.MatLike, maxCorners: int, qualityLevel: float, minDistance: float, mask: cv2.typing.MatLike, corners: cv2.typing.MatLike | None = ..., cornersQuality: cv2.typing.MatLike | None = ..., blockSize: int = ..., gradientSize: int = ..., useHarrisDetector: bool = ..., k: float = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def goodFeaturesToTrackWithQuality(image: UMat, maxCorners: int, qualityLevel: float, minDistance: float, mask: UMat, corners: UMat | None = ..., cornersQuality: UMat | None = ..., blockSize: int = ..., gradientSize: int = ..., useHarrisDetector: bool = ..., k: float = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def grabCut(img: cv2.typing.MatLike, mask: cv2.typing.MatLike, rect: cv2.typing.Rect, bgdModel: cv2.typing.MatLike, fgdModel: cv2.typing.MatLike, iterCount: int, mode: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def grabCut(img: UMat, mask: UMat, rect: cv2.typing.Rect, bgdModel: UMat, fgdModel: UMat, iterCount: int, mode: int = ...) -> tuple[UMat, UMat, UMat]: ... + +def groupRectangles(rectList: _typing.Sequence[cv2.typing.Rect], groupThreshold: int, eps: float = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[int]]: ... + +@_typing.overload +def hasNonZero(src: cv2.typing.MatLike) -> bool: ... +@_typing.overload +def hasNonZero(src: UMat) -> bool: ... + +def haveImageReader(filename: str) -> bool: ... + +def haveImageWriter(filename: str) -> bool: ... + +def haveOpenVX() -> bool: ... + +@_typing.overload +def hconcat(src: _typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def hconcat(src: _typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def idct(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def idct(src: UMat, dst: UMat | None = ..., flags: int = ...) -> UMat: ... + +@_typing.overload +def idft(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ..., nonzeroRows: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def idft(src: UMat, dst: UMat | None = ..., flags: int = ..., nonzeroRows: int = ...) -> UMat: ... + +@_typing.overload +def illuminationChange(src: cv2.typing.MatLike, mask: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., alpha: float = ..., beta: float = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def illuminationChange(src: UMat, mask: UMat, dst: UMat | None = ..., alpha: float = ..., beta: float = ...) -> UMat: ... + +def imcount(filename: str, flags: int = ...) -> int: ... + +@_typing.overload +def imdecode(buf: cv2.typing.MatLike, flags: int) -> cv2.typing.MatLike: ... +@_typing.overload +def imdecode(buf: UMat, flags: int) -> cv2.typing.MatLike: ... + +@_typing.overload +def imdecodemulti(buf: cv2.typing.MatLike, flags: int, mats: _typing.Sequence[cv2.typing.MatLike] | None = ..., range: cv2.typing.Range = ...) -> tuple[bool, _typing.Sequence[cv2.typing.MatLike]]: ... +@_typing.overload +def imdecodemulti(buf: UMat, flags: int, mats: _typing.Sequence[cv2.typing.MatLike] | None = ..., range: cv2.typing.Range = ...) -> tuple[bool, _typing.Sequence[cv2.typing.MatLike]]: ... + +@_typing.overload +def imencode(ext: str, img: cv2.typing.MatLike, params: _typing.Sequence[int] = ...) -> tuple[bool, numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]]: ... +@_typing.overload +def imencode(ext: str, img: UMat, params: _typing.Sequence[int] = ...) -> tuple[bool, numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]]: ... + +@_typing.overload +def imread(filename: str, flags: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def imread(filename: str, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def imread(filename: str, dst: UMat | None = ..., flags: int = ...) -> UMat: ... + +@_typing.overload +def imreadmulti(filename: str, mats: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ...) -> tuple[bool, _typing.Sequence[cv2.typing.MatLike]]: ... +@_typing.overload +def imreadmulti(filename: str, start: int, count: int, mats: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ...) -> tuple[bool, _typing.Sequence[cv2.typing.MatLike]]: ... + +@_typing.overload +def imshow(winname: str, mat: cv2.typing.MatLike) -> None: ... +@_typing.overload +def imshow(winname: str, mat: cv2.cuda.GpuMat) -> None: ... +@_typing.overload +def imshow(winname: str, mat: UMat) -> None: ... + +@_typing.overload +def imwrite(filename: str, img: cv2.typing.MatLike, params: _typing.Sequence[int] = ...) -> bool: ... +@_typing.overload +def imwrite(filename: str, img: UMat, params: _typing.Sequence[int] = ...) -> bool: ... + +@_typing.overload +def imwritemulti(filename: str, img: _typing.Sequence[cv2.typing.MatLike], params: _typing.Sequence[int] = ...) -> bool: ... +@_typing.overload +def imwritemulti(filename: str, img: _typing.Sequence[UMat], params: _typing.Sequence[int] = ...) -> bool: ... + +@_typing.overload +def inRange(src: cv2.typing.MatLike, lowerb: cv2.typing.MatLike, upperb: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def inRange(src: UMat, lowerb: UMat, upperb: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def initCameraMatrix2D(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints: _typing.Sequence[cv2.typing.MatLike], imageSize: cv2.typing.Size, aspectRatio: float = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def initCameraMatrix2D(objectPoints: _typing.Sequence[UMat], imagePoints: _typing.Sequence[UMat], imageSize: cv2.typing.Size, aspectRatio: float = ...) -> cv2.typing.MatLike: ... + +@_typing.overload +def initInverseRectificationMap(cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, R: cv2.typing.MatLike, newCameraMatrix: cv2.typing.MatLike, size: cv2.typing.Size, m1type: int, map1: cv2.typing.MatLike | None = ..., map2: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def initInverseRectificationMap(cameraMatrix: UMat, distCoeffs: UMat, R: UMat, newCameraMatrix: UMat, size: cv2.typing.Size, m1type: int, map1: UMat | None = ..., map2: UMat | None = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def initUndistortRectifyMap(cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, R: cv2.typing.MatLike, newCameraMatrix: cv2.typing.MatLike, size: cv2.typing.Size, m1type: int, map1: cv2.typing.MatLike | None = ..., map2: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def initUndistortRectifyMap(cameraMatrix: UMat, distCoeffs: UMat, R: UMat, newCameraMatrix: UMat, size: cv2.typing.Size, m1type: int, map1: UMat | None = ..., map2: UMat | None = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def inpaint(src: cv2.typing.MatLike, inpaintMask: cv2.typing.MatLike, inpaintRadius: float, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def inpaint(src: UMat, inpaintMask: UMat, inpaintRadius: float, flags: int, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def insertChannel(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, coi: int) -> cv2.typing.MatLike: ... +@_typing.overload +def insertChannel(src: UMat, dst: UMat, coi: int) -> UMat: ... + +@_typing.overload +def integral(src: cv2.typing.MatLike, sum: cv2.typing.MatLike | None = ..., sdepth: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def integral(src: UMat, sum: UMat | None = ..., sdepth: int = ...) -> UMat: ... + +@_typing.overload +def integral2(src: cv2.typing.MatLike, sum: cv2.typing.MatLike | None = ..., sqsum: cv2.typing.MatLike | None = ..., sdepth: int = ..., sqdepth: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def integral2(src: UMat, sum: UMat | None = ..., sqsum: UMat | None = ..., sdepth: int = ..., sqdepth: int = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def integral3(src: cv2.typing.MatLike, sum: cv2.typing.MatLike | None = ..., sqsum: cv2.typing.MatLike | None = ..., tilted: cv2.typing.MatLike | None = ..., sdepth: int = ..., sqdepth: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def integral3(src: UMat, sum: UMat | None = ..., sqsum: UMat | None = ..., tilted: UMat | None = ..., sdepth: int = ..., sqdepth: int = ...) -> tuple[UMat, UMat, UMat]: ... + +@_typing.overload +def intersectConvexConvex(p1: cv2.typing.MatLike, p2: cv2.typing.MatLike, p12: cv2.typing.MatLike | None = ..., handleNested: bool = ...) -> tuple[float, cv2.typing.MatLike]: ... +@_typing.overload +def intersectConvexConvex(p1: UMat, p2: UMat, p12: UMat | None = ..., handleNested: bool = ...) -> tuple[float, UMat]: ... + +@_typing.overload +def invert(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> tuple[float, cv2.typing.MatLike]: ... +@_typing.overload +def invert(src: UMat, dst: UMat | None = ..., flags: int = ...) -> tuple[float, UMat]: ... + +@_typing.overload +def invertAffineTransform(M: cv2.typing.MatLike, iM: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def invertAffineTransform(M: UMat, iM: UMat | None = ...) -> UMat: ... + +@_typing.overload +def isContourConvex(contour: cv2.typing.MatLike) -> bool: ... +@_typing.overload +def isContourConvex(contour: UMat) -> bool: ... + +@_typing.overload +def kmeans(data: cv2.typing.MatLike, K: int, bestLabels: cv2.typing.MatLike, criteria: cv2.typing.TermCriteria, attempts: int, flags: int, centers: cv2.typing.MatLike | None = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def kmeans(data: UMat, K: int, bestLabels: UMat, criteria: cv2.typing.TermCriteria, attempts: int, flags: int, centers: UMat | None = ...) -> tuple[float, UMat, UMat]: ... + +@_typing.overload +def line(img: cv2.typing.MatLike, pt1: cv2.typing.Point, pt2: cv2.typing.Point, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def line(img: UMat, pt1: cv2.typing.Point, pt2: cv2.typing.Point, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> UMat: ... + +@_typing.overload +def linearPolar(src: cv2.typing.MatLike, center: cv2.typing.Point2f, maxRadius: float, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def linearPolar(src: UMat, center: cv2.typing.Point2f, maxRadius: float, flags: int, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def log(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def log(src: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def logPolar(src: cv2.typing.MatLike, center: cv2.typing.Point2f, M: float, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def logPolar(src: UMat, center: cv2.typing.Point2f, M: float, flags: int, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def magnitude(x: cv2.typing.MatLike, y: cv2.typing.MatLike, magnitude: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def magnitude(x: UMat, y: UMat, magnitude: UMat | None = ...) -> UMat: ... + +@_typing.overload +def matMulDeriv(A: cv2.typing.MatLike, B: cv2.typing.MatLike, dABdA: cv2.typing.MatLike | None = ..., dABdB: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def matMulDeriv(A: UMat, B: UMat, dABdA: UMat | None = ..., dABdB: UMat | None = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def matchShapes(contour1: cv2.typing.MatLike, contour2: cv2.typing.MatLike, method: int, parameter: float) -> float: ... +@_typing.overload +def matchShapes(contour1: UMat, contour2: UMat, method: int, parameter: float) -> float: ... + +@_typing.overload +def matchTemplate(image: cv2.typing.MatLike, templ: cv2.typing.MatLike, method: int, result: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def matchTemplate(image: UMat, templ: UMat, method: int, result: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... + +@_typing.overload +def max(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def max(src1: UMat, src2: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def mean(src: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.Scalar: ... +@_typing.overload +def mean(src: UMat, mask: UMat | None = ...) -> cv2.typing.Scalar: ... + +@_typing.overload +def meanShift(probImage: cv2.typing.MatLike, window: cv2.typing.Rect, criteria: cv2.typing.TermCriteria) -> tuple[int, cv2.typing.Rect]: ... +@_typing.overload +def meanShift(probImage: UMat, window: cv2.typing.Rect, criteria: cv2.typing.TermCriteria) -> tuple[int, cv2.typing.Rect]: ... + +@_typing.overload +def meanStdDev(src: cv2.typing.MatLike, mean: cv2.typing.MatLike | None = ..., stddev: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def meanStdDev(src: UMat, mean: UMat | None = ..., stddev: UMat | None = ..., mask: UMat | None = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def medianBlur(src: cv2.typing.MatLike, ksize: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def medianBlur(src: UMat, ksize: int, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def merge(mv: _typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def merge(mv: _typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def min(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def min(src1: UMat, src2: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def minAreaRect(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... +@_typing.overload +def minAreaRect(points: UMat) -> cv2.typing.RotatedRect: ... + +@_typing.overload +def minEnclosingCircle(points: cv2.typing.MatLike) -> tuple[cv2.typing.Point2f, float]: ... +@_typing.overload +def minEnclosingCircle(points: UMat) -> tuple[cv2.typing.Point2f, float]: ... + +@_typing.overload +def minEnclosingTriangle(points: cv2.typing.MatLike, triangle: cv2.typing.MatLike | None = ...) -> tuple[float, cv2.typing.MatLike]: ... +@_typing.overload +def minEnclosingTriangle(points: UMat, triangle: UMat | None = ...) -> tuple[float, UMat]: ... + +@_typing.overload +def minMaxLoc(src: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> tuple[float, float, cv2.typing.Point, cv2.typing.Point]: ... +@_typing.overload +def minMaxLoc(src: UMat, mask: UMat | None = ...) -> tuple[float, float, cv2.typing.Point, cv2.typing.Point]: ... + +@_typing.overload +def mixChannels(src: _typing.Sequence[cv2.typing.MatLike], dst: _typing.Sequence[cv2.typing.MatLike], fromTo: _typing.Sequence[int]) -> _typing.Sequence[cv2.typing.MatLike]: ... +@_typing.overload +def mixChannels(src: _typing.Sequence[UMat], dst: _typing.Sequence[UMat], fromTo: _typing.Sequence[int]) -> _typing.Sequence[UMat]: ... + +@_typing.overload +def moments(array: cv2.typing.MatLike, binaryImage: bool = ...) -> cv2.typing.Moments: ... +@_typing.overload +def moments(array: UMat, binaryImage: bool = ...) -> cv2.typing.Moments: ... + +@_typing.overload +def morphologyEx(src: cv2.typing.MatLike, op: int, kernel: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def morphologyEx(src: UMat, op: int, kernel: UMat, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> UMat: ... + +def moveWindow(winname: str, x: int, y: int) -> None: ... + +@_typing.overload +def mulSpectrums(a: cv2.typing.MatLike, b: cv2.typing.MatLike, flags: int, c: cv2.typing.MatLike | None = ..., conjB: bool = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def mulSpectrums(a: UMat, b: UMat, flags: int, c: UMat | None = ..., conjB: bool = ...) -> UMat: ... + +@_typing.overload +def mulTransposed(src: cv2.typing.MatLike, aTa: bool, dst: cv2.typing.MatLike | None = ..., delta: cv2.typing.MatLike | None = ..., scale: float = ..., dtype: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def mulTransposed(src: UMat, aTa: bool, dst: UMat | None = ..., delta: UMat | None = ..., scale: float = ..., dtype: int = ...) -> UMat: ... + +@_typing.overload +def multiply(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., scale: float = ..., dtype: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def multiply(src1: UMat, src2: UMat, dst: UMat | None = ..., scale: float = ..., dtype: int = ...) -> UMat: ... + +def namedWindow(winname: str, flags: int = ...) -> None: ... + +@_typing.overload +def norm(src1: cv2.typing.MatLike, normType: int = ..., mask: cv2.typing.MatLike | None = ...) -> float: ... +@_typing.overload +def norm(src1: UMat, normType: int = ..., mask: UMat | None = ...) -> float: ... +@_typing.overload +def norm(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, normType: int = ..., mask: cv2.typing.MatLike | None = ...) -> float: ... +@_typing.overload +def norm(src1: UMat, src2: UMat, normType: int = ..., mask: UMat | None = ...) -> float: ... + +@_typing.overload +def normalize(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, alpha: float = ..., beta: float = ..., norm_type: int = ..., dtype: int = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def normalize(src: UMat, dst: UMat, alpha: float = ..., beta: float = ..., norm_type: int = ..., dtype: int = ..., mask: UMat | None = ...) -> UMat: ... + +@_typing.overload +def patchNaNs(a: cv2.typing.MatLike, val: float = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def patchNaNs(a: UMat, val: float = ...) -> UMat: ... + +@_typing.overload +def pencilSketch(src: cv2.typing.MatLike, dst1: cv2.typing.MatLike | None = ..., dst2: cv2.typing.MatLike | None = ..., sigma_s: float = ..., sigma_r: float = ..., shade_factor: float = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def pencilSketch(src: UMat, dst1: UMat | None = ..., dst2: UMat | None = ..., sigma_s: float = ..., sigma_r: float = ..., shade_factor: float = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def perspectiveTransform(src: cv2.typing.MatLike, m: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def perspectiveTransform(src: UMat, m: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def phase(x: cv2.typing.MatLike, y: cv2.typing.MatLike, angle: cv2.typing.MatLike | None = ..., angleInDegrees: bool = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def phase(x: UMat, y: UMat, angle: UMat | None = ..., angleInDegrees: bool = ...) -> UMat: ... + +@_typing.overload +def phaseCorrelate(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, window: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.Point2d, float]: ... +@_typing.overload +def phaseCorrelate(src1: UMat, src2: UMat, window: UMat | None = ...) -> tuple[cv2.typing.Point2d, float]: ... + +@_typing.overload +def pointPolygonTest(contour: cv2.typing.MatLike, pt: cv2.typing.Point2f, measureDist: bool) -> float: ... +@_typing.overload +def pointPolygonTest(contour: UMat, pt: cv2.typing.Point2f, measureDist: bool) -> float: ... + +@_typing.overload +def polarToCart(magnitude: cv2.typing.MatLike, angle: cv2.typing.MatLike, x: cv2.typing.MatLike | None = ..., y: cv2.typing.MatLike | None = ..., angleInDegrees: bool = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def polarToCart(magnitude: UMat, angle: UMat, x: UMat | None = ..., y: UMat | None = ..., angleInDegrees: bool = ...) -> tuple[UMat, UMat]: ... + +def pollKey() -> int: ... + +@_typing.overload +def polylines(img: cv2.typing.MatLike, pts: _typing.Sequence[cv2.typing.MatLike], isClosed: bool, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def polylines(img: UMat, pts: _typing.Sequence[UMat], isClosed: bool, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> UMat: ... + +@_typing.overload +def pow(src: cv2.typing.MatLike, power: float, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def pow(src: UMat, power: float, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def preCornerDetect(src: cv2.typing.MatLike, ksize: int, dst: cv2.typing.MatLike | None = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def preCornerDetect(src: UMat, ksize: int, dst: UMat | None = ..., borderType: int = ...) -> UMat: ... + +@_typing.overload +def projectPoints(objectPoints: cv2.typing.MatLike, rvec: cv2.typing.MatLike, tvec: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike | None = ..., jacobian: cv2.typing.MatLike | None = ..., aspectRatio: float = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def projectPoints(objectPoints: UMat, rvec: UMat, tvec: UMat, cameraMatrix: UMat, distCoeffs: UMat, imagePoints: UMat | None = ..., jacobian: UMat | None = ..., aspectRatio: float = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def putText(img: cv2.typing.MatLike, text: str, org: cv2.typing.Point, fontFace: int, fontScale: float, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., bottomLeftOrigin: bool = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def putText(img: UMat, text: str, org: cv2.typing.Point, fontFace: int, fontScale: float, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., bottomLeftOrigin: bool = ...) -> UMat: ... + +@_typing.overload +def pyrDown(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., dstsize: cv2.typing.Size = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def pyrDown(src: UMat, dst: UMat | None = ..., dstsize: cv2.typing.Size = ..., borderType: int = ...) -> UMat: ... + +@_typing.overload +def pyrMeanShiftFiltering(src: cv2.typing.MatLike, sp: float, sr: float, dst: cv2.typing.MatLike | None = ..., maxLevel: int = ..., termcrit: cv2.typing.TermCriteria = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def pyrMeanShiftFiltering(src: UMat, sp: float, sr: float, dst: UMat | None = ..., maxLevel: int = ..., termcrit: cv2.typing.TermCriteria = ...) -> UMat: ... + +@_typing.overload +def pyrUp(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., dstsize: cv2.typing.Size = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def pyrUp(src: UMat, dst: UMat | None = ..., dstsize: cv2.typing.Size = ..., borderType: int = ...) -> UMat: ... + +@_typing.overload +def randShuffle(dst: cv2.typing.MatLike, iterFactor: float = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def randShuffle(dst: UMat, iterFactor: float = ...) -> UMat: ... + +@_typing.overload +def randn(dst: cv2.typing.MatLike, mean: cv2.typing.MatLike, stddev: cv2.typing.MatLike) -> cv2.typing.MatLike: ... +@_typing.overload +def randn(dst: UMat, mean: UMat, stddev: UMat) -> UMat: ... + +@_typing.overload +def randu(dst: cv2.typing.MatLike, low: cv2.typing.MatLike, high: cv2.typing.MatLike) -> cv2.typing.MatLike: ... +@_typing.overload +def randu(dst: UMat, low: UMat, high: UMat) -> UMat: ... + +def readOpticalFlow(path: str) -> cv2.typing.MatLike: ... + +@_typing.overload +def recoverPose(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, cameraMatrix1: cv2.typing.MatLike, distCoeffs1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, distCoeffs2: cv2.typing.MatLike, E: cv2.typing.MatLike | None = ..., R: cv2.typing.MatLike | None = ..., t: cv2.typing.MatLike | None = ..., method: int = ..., prob: float = ..., threshold: float = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def recoverPose(points1: UMat, points2: UMat, cameraMatrix1: UMat, distCoeffs1: UMat, cameraMatrix2: UMat, distCoeffs2: UMat, E: UMat | None = ..., R: UMat | None = ..., t: UMat | None = ..., method: int = ..., prob: float = ..., threshold: float = ..., mask: UMat | None = ...) -> tuple[int, UMat, UMat, UMat, UMat]: ... +@_typing.overload +def recoverPose(E: cv2.typing.MatLike, points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, R: cv2.typing.MatLike | None = ..., t: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def recoverPose(E: UMat, points1: UMat, points2: UMat, cameraMatrix: UMat, R: UMat | None = ..., t: UMat | None = ..., mask: UMat | None = ...) -> tuple[int, UMat, UMat, UMat]: ... +@_typing.overload +def recoverPose(E: cv2.typing.MatLike, points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, R: cv2.typing.MatLike | None = ..., t: cv2.typing.MatLike | None = ..., focal: float = ..., pp: cv2.typing.Point2d = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def recoverPose(E: UMat, points1: UMat, points2: UMat, R: UMat | None = ..., t: UMat | None = ..., focal: float = ..., pp: cv2.typing.Point2d = ..., mask: UMat | None = ...) -> tuple[int, UMat, UMat, UMat]: ... +@_typing.overload +def recoverPose(E: cv2.typing.MatLike, points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distanceThresh: float, R: cv2.typing.MatLike | None = ..., t: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ..., triangulatedPoints: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def recoverPose(E: UMat, points1: UMat, points2: UMat, cameraMatrix: UMat, distanceThresh: float, R: UMat | None = ..., t: UMat | None = ..., mask: UMat | None = ..., triangulatedPoints: UMat | None = ...) -> tuple[int, UMat, UMat, UMat, UMat]: ... + +@_typing.overload +def rectangle(img: cv2.typing.MatLike, pt1: cv2.typing.Point, pt2: cv2.typing.Point, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def rectangle(img: UMat, pt1: cv2.typing.Point, pt2: cv2.typing.Point, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> UMat: ... +@_typing.overload +def rectangle(img: cv2.typing.MatLike, rec: cv2.typing.Rect, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def rectangle(img: UMat, rec: cv2.typing.Rect, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> UMat: ... + +def rectangleIntersectionArea(a: cv2.typing.Rect2d, b: cv2.typing.Rect2d) -> float: ... + +@_typing.overload +def rectify3Collinear(cameraMatrix1: cv2.typing.MatLike, distCoeffs1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, distCoeffs2: cv2.typing.MatLike, cameraMatrix3: cv2.typing.MatLike, distCoeffs3: cv2.typing.MatLike, imgpt1: _typing.Sequence[cv2.typing.MatLike], imgpt3: _typing.Sequence[cv2.typing.MatLike], imageSize: cv2.typing.Size, R12: cv2.typing.MatLike, T12: cv2.typing.MatLike, R13: cv2.typing.MatLike, T13: cv2.typing.MatLike, alpha: float, newImgSize: cv2.typing.Size, flags: int, R1: cv2.typing.MatLike | None = ..., R2: cv2.typing.MatLike | None = ..., R3: cv2.typing.MatLike | None = ..., P1: cv2.typing.MatLike | None = ..., P2: cv2.typing.MatLike | None = ..., P3: cv2.typing.MatLike | None = ..., Q: cv2.typing.MatLike | None = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.Rect, cv2.typing.Rect]: ... +@_typing.overload +def rectify3Collinear(cameraMatrix1: UMat, distCoeffs1: UMat, cameraMatrix2: UMat, distCoeffs2: UMat, cameraMatrix3: UMat, distCoeffs3: UMat, imgpt1: _typing.Sequence[UMat], imgpt3: _typing.Sequence[UMat], imageSize: cv2.typing.Size, R12: UMat, T12: UMat, R13: UMat, T13: UMat, alpha: float, newImgSize: cv2.typing.Size, flags: int, R1: UMat | None = ..., R2: UMat | None = ..., R3: UMat | None = ..., P1: UMat | None = ..., P2: UMat | None = ..., P3: UMat | None = ..., Q: UMat | None = ...) -> tuple[float, UMat, UMat, UMat, UMat, UMat, UMat, UMat, cv2.typing.Rect, cv2.typing.Rect]: ... + +@_typing.overload +def reduce(src: cv2.typing.MatLike, dim: int, rtype: int, dst: cv2.typing.MatLike | None = ..., dtype: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def reduce(src: UMat, dim: int, rtype: int, dst: UMat | None = ..., dtype: int = ...) -> UMat: ... + +@_typing.overload +def reduceArgMax(src: cv2.typing.MatLike, axis: int, dst: cv2.typing.MatLike | None = ..., lastIndex: bool = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def reduceArgMax(src: UMat, axis: int, dst: UMat | None = ..., lastIndex: bool = ...) -> UMat: ... + +@_typing.overload +def reduceArgMin(src: cv2.typing.MatLike, axis: int, dst: cv2.typing.MatLike | None = ..., lastIndex: bool = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def reduceArgMin(src: UMat, axis: int, dst: UMat | None = ..., lastIndex: bool = ...) -> UMat: ... + +@_typing.overload +def remap(src: cv2.typing.MatLike, map1: cv2.typing.MatLike, map2: cv2.typing.MatLike, interpolation: int, dst: cv2.typing.MatLike | None = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def remap(src: UMat, map1: UMat, map2: UMat, interpolation: int, dst: UMat | None = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> UMat: ... + +@_typing.overload +def repeat(src: cv2.typing.MatLike, ny: int, nx: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def repeat(src: UMat, ny: int, nx: int, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def reprojectImageTo3D(disparity: cv2.typing.MatLike, Q: cv2.typing.MatLike, _3dImage: cv2.typing.MatLike | None = ..., handleMissingValues: bool = ..., ddepth: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def reprojectImageTo3D(disparity: UMat, Q: UMat, _3dImage: UMat | None = ..., handleMissingValues: bool = ..., ddepth: int = ...) -> UMat: ... + +@_typing.overload +def resize(src: cv2.typing.MatLike, dsize: cv2.typing.Size | None, dst: cv2.typing.MatLike | None = ..., fx: float = ..., fy: float = ..., interpolation: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def resize(src: UMat, dsize: cv2.typing.Size | None, dst: UMat | None = ..., fx: float = ..., fy: float = ..., interpolation: int = ...) -> UMat: ... + +@_typing.overload +def resizeWindow(winname: str, width: int, height: int) -> None: ... +@_typing.overload +def resizeWindow(winname: str, size: cv2.typing.Size) -> None: ... + +@_typing.overload +def rotate(src: cv2.typing.MatLike, rotateCode: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def rotate(src: UMat, rotateCode: int, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def rotatedRectangleIntersection(rect1: cv2.typing.RotatedRect, rect2: cv2.typing.RotatedRect, intersectingRegion: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ... +@_typing.overload +def rotatedRectangleIntersection(rect1: cv2.typing.RotatedRect, rect2: cv2.typing.RotatedRect, intersectingRegion: UMat | None = ...) -> tuple[int, UMat]: ... + +@_typing.overload +def sampsonDistance(pt1: cv2.typing.MatLike, pt2: cv2.typing.MatLike, F: cv2.typing.MatLike) -> float: ... +@_typing.overload +def sampsonDistance(pt1: UMat, pt2: UMat, F: UMat) -> float: ... + +@_typing.overload +def scaleAdd(src1: cv2.typing.MatLike, alpha: float, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def scaleAdd(src1: UMat, alpha: float, src2: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def seamlessClone(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, mask: cv2.typing.MatLike, p: cv2.typing.Point, flags: int, blend: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def seamlessClone(src: UMat, dst: UMat, mask: UMat, p: cv2.typing.Point, flags: int, blend: UMat | None = ...) -> UMat: ... + +@_typing.overload +def selectROI(windowName: str, img: cv2.typing.MatLike, showCrosshair: bool = ..., fromCenter: bool = ..., printNotice: bool = ...) -> cv2.typing.Rect: ... +@_typing.overload +def selectROI(windowName: str, img: UMat, showCrosshair: bool = ..., fromCenter: bool = ..., printNotice: bool = ...) -> cv2.typing.Rect: ... +@_typing.overload +def selectROI(img: cv2.typing.MatLike, showCrosshair: bool = ..., fromCenter: bool = ..., printNotice: bool = ...) -> cv2.typing.Rect: ... +@_typing.overload +def selectROI(img: UMat, showCrosshair: bool = ..., fromCenter: bool = ..., printNotice: bool = ...) -> cv2.typing.Rect: ... + +@_typing.overload +def selectROIs(windowName: str, img: cv2.typing.MatLike, showCrosshair: bool = ..., fromCenter: bool = ..., printNotice: bool = ...) -> _typing.Sequence[cv2.typing.Rect]: ... +@_typing.overload +def selectROIs(windowName: str, img: UMat, showCrosshair: bool = ..., fromCenter: bool = ..., printNotice: bool = ...) -> _typing.Sequence[cv2.typing.Rect]: ... + +@_typing.overload +def sepFilter2D(src: cv2.typing.MatLike, ddepth: int, kernelX: cv2.typing.MatLike, kernelY: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., delta: float = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def sepFilter2D(src: UMat, ddepth: int, kernelX: UMat, kernelY: UMat, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., delta: float = ..., borderType: int = ...) -> UMat: ... + +@_typing.overload +def setIdentity(mtx: cv2.typing.MatLike, s: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def setIdentity(mtx: UMat, s: cv2.typing.Scalar = ...) -> UMat: ... + +def setLogLevel(level: int) -> int: ... + +def setNumThreads(nthreads: int) -> None: ... + +def setRNGSeed(seed: int) -> None: ... + +def setTrackbarMax(trackbarname: str, winname: str, maxval: int) -> None: ... + +def setTrackbarMin(trackbarname: str, winname: str, minval: int) -> None: ... + +def setTrackbarPos(trackbarname: str, winname: str, pos: int) -> None: ... + +def setUseOpenVX(flag: bool) -> None: ... + +def setUseOptimized(onoff: bool) -> None: ... + +def setWindowProperty(winname: str, prop_id: int, prop_value: float) -> None: ... + +def setWindowTitle(winname: str, title: str) -> None: ... + +@_typing.overload +def solve(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> tuple[bool, cv2.typing.MatLike]: ... +@_typing.overload +def solve(src1: UMat, src2: UMat, dst: UMat | None = ..., flags: int = ...) -> tuple[bool, UMat]: ... + +@_typing.overload +def solveCubic(coeffs: cv2.typing.MatLike, roots: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ... +@_typing.overload +def solveCubic(coeffs: UMat, roots: UMat | None = ...) -> tuple[int, UMat]: ... + +@_typing.overload +def solveLP(Func: cv2.typing.MatLike, Constr: cv2.typing.MatLike, constr_eps: float, z: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ... +@_typing.overload +def solveLP(Func: UMat, Constr: UMat, constr_eps: float, z: UMat | None = ...) -> tuple[int, UMat]: ... +@_typing.overload +def solveLP(Func: cv2.typing.MatLike, Constr: cv2.typing.MatLike, z: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ... +@_typing.overload +def solveLP(Func: UMat, Constr: UMat, z: UMat | None = ...) -> tuple[int, UMat]: ... + +@_typing.overload +def solveP3P(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, flags: int, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[int, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike]]: ... +@_typing.overload +def solveP3P(objectPoints: UMat, imagePoints: UMat, cameraMatrix: UMat, distCoeffs: UMat, flags: int, rvecs: _typing.Sequence[UMat] | None = ..., tvecs: _typing.Sequence[UMat] | None = ...) -> tuple[int, _typing.Sequence[UMat], _typing.Sequence[UMat]]: ... + +@_typing.overload +def solvePnP(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike | None = ..., tvec: cv2.typing.MatLike | None = ..., useExtrinsicGuess: bool = ..., flags: int = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def solvePnP(objectPoints: UMat, imagePoints: UMat, cameraMatrix: UMat, distCoeffs: UMat, rvec: UMat | None = ..., tvec: UMat | None = ..., useExtrinsicGuess: bool = ..., flags: int = ...) -> tuple[bool, UMat, UMat]: ... + +@_typing.overload +def solvePnPGeneric(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., useExtrinsicGuess: bool = ..., flags: SolvePnPMethod = ..., rvec: cv2.typing.MatLike | None = ..., tvec: cv2.typing.MatLike | None = ..., reprojectionError: cv2.typing.MatLike | None = ...) -> tuple[int, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ... +@_typing.overload +def solvePnPGeneric(objectPoints: UMat, imagePoints: UMat, cameraMatrix: UMat, distCoeffs: UMat, rvecs: _typing.Sequence[UMat] | None = ..., tvecs: _typing.Sequence[UMat] | None = ..., useExtrinsicGuess: bool = ..., flags: SolvePnPMethod = ..., rvec: UMat | None = ..., tvec: UMat | None = ..., reprojectionError: UMat | None = ...) -> tuple[int, _typing.Sequence[UMat], _typing.Sequence[UMat], UMat]: ... + +@_typing.overload +def solvePnPRansac(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike | None = ..., tvec: cv2.typing.MatLike | None = ..., useExtrinsicGuess: bool = ..., iterationsCount: int = ..., reprojectionError: float = ..., confidence: float = ..., inliers: cv2.typing.MatLike | None = ..., flags: int = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def solvePnPRansac(objectPoints: UMat, imagePoints: UMat, cameraMatrix: UMat, distCoeffs: UMat, rvec: UMat | None = ..., tvec: UMat | None = ..., useExtrinsicGuess: bool = ..., iterationsCount: int = ..., reprojectionError: float = ..., confidence: float = ..., inliers: UMat | None = ..., flags: int = ...) -> tuple[bool, UMat, UMat, UMat]: ... +@_typing.overload +def solvePnPRansac(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike | None = ..., tvec: cv2.typing.MatLike | None = ..., inliers: cv2.typing.MatLike | None = ..., params: UsacParams = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def solvePnPRansac(objectPoints: UMat, imagePoints: UMat, cameraMatrix: UMat, distCoeffs: UMat, rvec: UMat | None = ..., tvec: UMat | None = ..., inliers: UMat | None = ..., params: UsacParams = ...) -> tuple[bool, UMat, UMat, UMat, UMat]: ... + +@_typing.overload +def solvePnPRefineLM(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike, tvec: cv2.typing.MatLike, criteria: cv2.typing.TermCriteria = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def solvePnPRefineLM(objectPoints: UMat, imagePoints: UMat, cameraMatrix: UMat, distCoeffs: UMat, rvec: UMat, tvec: UMat, criteria: cv2.typing.TermCriteria = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def solvePnPRefineVVS(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike, tvec: cv2.typing.MatLike, criteria: cv2.typing.TermCriteria = ..., VVSlambda: float = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def solvePnPRefineVVS(objectPoints: UMat, imagePoints: UMat, cameraMatrix: UMat, distCoeffs: UMat, rvec: UMat, tvec: UMat, criteria: cv2.typing.TermCriteria = ..., VVSlambda: float = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def solvePoly(coeffs: cv2.typing.MatLike, roots: cv2.typing.MatLike | None = ..., maxIters: int = ...) -> tuple[float, cv2.typing.MatLike]: ... +@_typing.overload +def solvePoly(coeffs: UMat, roots: UMat | None = ..., maxIters: int = ...) -> tuple[float, UMat]: ... + +@_typing.overload +def sort(src: cv2.typing.MatLike, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def sort(src: UMat, flags: int, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def sortIdx(src: cv2.typing.MatLike, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def sortIdx(src: UMat, flags: int, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def spatialGradient(src: cv2.typing.MatLike, dx: cv2.typing.MatLike | None = ..., dy: cv2.typing.MatLike | None = ..., ksize: int = ..., borderType: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def spatialGradient(src: UMat, dx: UMat | None = ..., dy: UMat | None = ..., ksize: int = ..., borderType: int = ...) -> tuple[UMat, UMat]: ... + +@_typing.overload +def split(m: cv2.typing.MatLike, mv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ... +@_typing.overload +def split(m: UMat, mv: _typing.Sequence[UMat] | None = ...) -> _typing.Sequence[UMat]: ... + +@_typing.overload +def sqrBoxFilter(src: cv2.typing.MatLike, ddepth: int, ksize: cv2.typing.Size, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., normalize: bool = ..., borderType: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def sqrBoxFilter(src: UMat, ddepth: int, ksize: cv2.typing.Size, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., normalize: bool = ..., borderType: int = ...) -> UMat: ... + +@_typing.overload +def sqrt(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def sqrt(src: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def stackBlur(src: cv2.typing.MatLike, ksize: cv2.typing.Size, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def stackBlur(src: UMat, ksize: cv2.typing.Size, dst: UMat | None = ...) -> UMat: ... + +def startWindowThread() -> int: ... + +@_typing.overload +def stereoCalibrate(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints1: _typing.Sequence[cv2.typing.MatLike], imagePoints2: _typing.Sequence[cv2.typing.MatLike], cameraMatrix1: cv2.typing.MatLike, distCoeffs1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, distCoeffs2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike | None = ..., T: cv2.typing.MatLike | None = ..., E: cv2.typing.MatLike | None = ..., F: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def stereoCalibrate(objectPoints: _typing.Sequence[UMat], imagePoints1: _typing.Sequence[UMat], imagePoints2: _typing.Sequence[UMat], cameraMatrix1: UMat, distCoeffs1: UMat, cameraMatrix2: UMat, distCoeffs2: UMat, imageSize: cv2.typing.Size, R: UMat | None = ..., T: UMat | None = ..., E: UMat | None = ..., F: UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, UMat, UMat, UMat, UMat, UMat, UMat, UMat, UMat]: ... +@_typing.overload +def stereoCalibrate(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints1: _typing.Sequence[cv2.typing.MatLike], imagePoints2: _typing.Sequence[cv2.typing.MatLike], cameraMatrix1: cv2.typing.MatLike, distCoeffs1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, distCoeffs2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike, T: cv2.typing.MatLike, E: cv2.typing.MatLike | None = ..., F: cv2.typing.MatLike | None = ..., perViewErrors: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def stereoCalibrate(objectPoints: _typing.Sequence[UMat], imagePoints1: _typing.Sequence[UMat], imagePoints2: _typing.Sequence[UMat], cameraMatrix1: UMat, distCoeffs1: UMat, cameraMatrix2: UMat, distCoeffs2: UMat, imageSize: cv2.typing.Size, R: UMat, T: UMat, E: UMat | None = ..., F: UMat | None = ..., perViewErrors: UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, UMat, UMat, UMat, UMat, UMat, UMat, UMat, UMat, UMat]: ... + +@_typing.overload +def stereoCalibrateExtended(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints1: _typing.Sequence[cv2.typing.MatLike], imagePoints2: _typing.Sequence[cv2.typing.MatLike], cameraMatrix1: cv2.typing.MatLike, distCoeffs1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, distCoeffs2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike, T: cv2.typing.MatLike, E: cv2.typing.MatLike | None = ..., F: cv2.typing.MatLike | None = ..., rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., perViewErrors: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ... +@_typing.overload +def stereoCalibrateExtended(objectPoints: _typing.Sequence[UMat], imagePoints1: _typing.Sequence[UMat], imagePoints2: _typing.Sequence[UMat], cameraMatrix1: UMat, distCoeffs1: UMat, cameraMatrix2: UMat, distCoeffs2: UMat, imageSize: cv2.typing.Size, R: UMat, T: UMat, E: UMat | None = ..., F: UMat | None = ..., rvecs: _typing.Sequence[UMat] | None = ..., tvecs: _typing.Sequence[UMat] | None = ..., perViewErrors: UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, UMat, UMat, UMat, UMat, UMat, UMat, UMat, UMat, _typing.Sequence[UMat], _typing.Sequence[UMat], UMat]: ... + +@_typing.overload +def stereoRectify(cameraMatrix1: cv2.typing.MatLike, distCoeffs1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, distCoeffs2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike, T: cv2.typing.MatLike, R1: cv2.typing.MatLike | None = ..., R2: cv2.typing.MatLike | None = ..., P1: cv2.typing.MatLike | None = ..., P2: cv2.typing.MatLike | None = ..., Q: cv2.typing.MatLike | None = ..., flags: int = ..., alpha: float = ..., newImageSize: cv2.typing.Size = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.Rect, cv2.typing.Rect]: ... +@_typing.overload +def stereoRectify(cameraMatrix1: UMat, distCoeffs1: UMat, cameraMatrix2: UMat, distCoeffs2: UMat, imageSize: cv2.typing.Size, R: UMat, T: UMat, R1: UMat | None = ..., R2: UMat | None = ..., P1: UMat | None = ..., P2: UMat | None = ..., Q: UMat | None = ..., flags: int = ..., alpha: float = ..., newImageSize: cv2.typing.Size = ...) -> tuple[UMat, UMat, UMat, UMat, UMat, cv2.typing.Rect, cv2.typing.Rect]: ... + +@_typing.overload +def stereoRectifyUncalibrated(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, F: cv2.typing.MatLike, imgSize: cv2.typing.Size, H1: cv2.typing.MatLike | None = ..., H2: cv2.typing.MatLike | None = ..., threshold: float = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike]: ... +@_typing.overload +def stereoRectifyUncalibrated(points1: UMat, points2: UMat, F: UMat, imgSize: cv2.typing.Size, H1: UMat | None = ..., H2: UMat | None = ..., threshold: float = ...) -> tuple[bool, UMat, UMat]: ... + +@_typing.overload +def stylization(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., sigma_s: float = ..., sigma_r: float = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def stylization(src: UMat, dst: UMat | None = ..., sigma_s: float = ..., sigma_r: float = ...) -> UMat: ... + +@_typing.overload +def subtract(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ..., dtype: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def subtract(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ..., dtype: int = ...) -> UMat: ... + +@_typing.overload +def sumElems(src: cv2.typing.MatLike) -> cv2.typing.Scalar: ... +@_typing.overload +def sumElems(src: UMat) -> cv2.typing.Scalar: ... + +@_typing.overload +def textureFlattening(src: cv2.typing.MatLike, mask: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., low_threshold: float = ..., high_threshold: float = ..., kernel_size: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def textureFlattening(src: UMat, mask: UMat, dst: UMat | None = ..., low_threshold: float = ..., high_threshold: float = ..., kernel_size: int = ...) -> UMat: ... + +@_typing.overload +def threshold(src: cv2.typing.MatLike, thresh: float, maxval: float, type: int, dst: cv2.typing.MatLike | None = ...) -> tuple[float, cv2.typing.MatLike]: ... +@_typing.overload +def threshold(src: UMat, thresh: float, maxval: float, type: int, dst: UMat | None = ...) -> tuple[float, UMat]: ... + +@_typing.overload +def trace(mtx: cv2.typing.MatLike) -> cv2.typing.Scalar: ... +@_typing.overload +def trace(mtx: UMat) -> cv2.typing.Scalar: ... + +@_typing.overload +def transform(src: cv2.typing.MatLike, m: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def transform(src: UMat, m: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def transpose(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def transpose(src: UMat, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def transposeND(src: cv2.typing.MatLike, order: _typing.Sequence[int], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def transposeND(src: UMat, order: _typing.Sequence[int], dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def triangulatePoints(projMatr1: cv2.typing.MatLike, projMatr2: cv2.typing.MatLike, projPoints1: cv2.typing.MatLike, projPoints2: cv2.typing.MatLike, points4D: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def triangulatePoints(projMatr1: UMat, projMatr2: UMat, projPoints1: UMat, projPoints2: UMat, points4D: UMat | None = ...) -> UMat: ... + +@_typing.overload +def undistort(src: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., newCameraMatrix: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def undistort(src: UMat, cameraMatrix: UMat, distCoeffs: UMat, dst: UMat | None = ..., newCameraMatrix: UMat | None = ...) -> UMat: ... + +@_typing.overload +def undistortImagePoints(src: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., arg1: cv2.typing.TermCriteria = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def undistortImagePoints(src: UMat, cameraMatrix: UMat, distCoeffs: UMat, dst: UMat | None = ..., arg1: cv2.typing.TermCriteria = ...) -> UMat: ... + +@_typing.overload +def undistortPoints(src: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., R: cv2.typing.MatLike | None = ..., P: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def undistortPoints(src: UMat, cameraMatrix: UMat, distCoeffs: UMat, dst: UMat | None = ..., R: UMat | None = ..., P: UMat | None = ...) -> UMat: ... + +@_typing.overload +def undistortPointsIter(src: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, R: cv2.typing.MatLike, P: cv2.typing.MatLike, criteria: cv2.typing.TermCriteria, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def undistortPointsIter(src: UMat, cameraMatrix: UMat, distCoeffs: UMat, R: UMat, P: UMat, criteria: cv2.typing.TermCriteria, dst: UMat | None = ...) -> UMat: ... + +def useOpenVX() -> bool: ... + +def useOptimized() -> bool: ... + +@_typing.overload +def validateDisparity(disparity: cv2.typing.MatLike, cost: cv2.typing.MatLike, minDisparity: int, numberOfDisparities: int, disp12MaxDisp: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def validateDisparity(disparity: UMat, cost: UMat, minDisparity: int, numberOfDisparities: int, disp12MaxDisp: int = ...) -> UMat: ... + +@_typing.overload +def vconcat(src: _typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def vconcat(src: _typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... + +def waitKey(delay: int = ...) -> int: ... + +def waitKeyEx(delay: int = ...) -> int: ... + +@_typing.overload +def warpAffine(src: cv2.typing.MatLike, M: cv2.typing.MatLike, dsize: cv2.typing.Size, dst: cv2.typing.MatLike | None = ..., flags: int = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def warpAffine(src: UMat, M: UMat, dsize: cv2.typing.Size, dst: UMat | None = ..., flags: int = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> UMat: ... + +@_typing.overload +def warpPerspective(src: cv2.typing.MatLike, M: cv2.typing.MatLike, dsize: cv2.typing.Size, dst: cv2.typing.MatLike | None = ..., flags: int = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def warpPerspective(src: UMat, M: UMat, dsize: cv2.typing.Size, dst: UMat | None = ..., flags: int = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> UMat: ... + +@_typing.overload +def warpPolar(src: cv2.typing.MatLike, dsize: cv2.typing.Size, center: cv2.typing.Point2f, maxRadius: float, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def warpPolar(src: UMat, dsize: cv2.typing.Size, center: cv2.typing.Point2f, maxRadius: float, flags: int, dst: UMat | None = ...) -> UMat: ... + +@_typing.overload +def watershed(image: cv2.typing.MatLike, markers: cv2.typing.MatLike) -> cv2.typing.MatLike: ... +@_typing.overload +def watershed(image: UMat, markers: UMat) -> UMat: ... + +@_typing.overload +def writeOpticalFlow(path: str, flow: cv2.typing.MatLike) -> bool: ... +@_typing.overload +def writeOpticalFlow(path: str, flow: UMat) -> bool: ... + +def createTrackbar(trackbarName: str, windowName: str, value: int, count: int, onChange: _typing.Callable[[int], None]) -> None: ... + +def createButton(buttonName: str, onChange: _typing.Callable[[tuple[int] | tuple[int, _typing.Any]], None], userData: _typing.Any | None = ..., buttonType: int = ..., initialButtonState: int = ...) -> None: ... + +def setMouseCallback(windowName: str, onMouse: _typing.Callable[[int, int, int, int, _typing.Any | None], None], param: _typing.Any | None = ...) -> None: ... + +def CV_8UC(channels: int) -> int: ... + +def CV_8SC(channels: int) -> int: ... + +def CV_16UC(channels: int) -> int: ... + +def CV_16SC(channels: int) -> int: ... + +def CV_32SC(channels: int) -> int: ... + +def CV_32FC(channels: int) -> int: ... + +def CV_64FC(channels: int) -> int: ... + +def CV_16FC(channels: int) -> int: ... + +def CV_MAKETYPE(depth: int, channels: int) -> int: ... + +def dnn_registerLayer(layerTypeName: str, layerClass: _typing.Type[cv2.dnn.LayerProtocol]) -> None: ... + +def dnn_unregisterLayer(layerTypeName: str) -> None: ... + +def redirectError(onError: _typing.Callable[[int, str, str, str, int], None] | None) -> None: ... + + diff --git a/parrot/lib/python3.10/site-packages/cv2/aruco/__init__.pyi b/parrot/lib/python3.10/site-packages/cv2/aruco/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9e252203471e3b1a9867e94fa677120996e1c136 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cv2/aruco/__init__.pyi @@ -0,0 +1,303 @@ +__all__: list[str] = [] + +import cv2 +import cv2.typing +import typing as _typing + + +# Enumerations +CORNER_REFINE_NONE: int +CORNER_REFINE_SUBPIX: int +CORNER_REFINE_CONTOUR: int +CORNER_REFINE_APRILTAG: int +CornerRefineMethod = int +"""One of [CORNER_REFINE_NONE, CORNER_REFINE_SUBPIX, CORNER_REFINE_CONTOUR, CORNER_REFINE_APRILTAG]""" + +DICT_4X4_50: int +DICT_4X4_100: int +DICT_4X4_250: int +DICT_4X4_1000: int +DICT_5X5_50: int +DICT_5X5_100: int +DICT_5X5_250: int +DICT_5X5_1000: int +DICT_6X6_50: int +DICT_6X6_100: int +DICT_6X6_250: int +DICT_6X6_1000: int +DICT_7X7_50: int +DICT_7X7_100: int +DICT_7X7_250: int +DICT_7X7_1000: int +DICT_ARUCO_ORIGINAL: int +DICT_APRILTAG_16h5: int +DICT_APRILTAG_16H5: int +DICT_APRILTAG_25h9: int +DICT_APRILTAG_25H9: int +DICT_APRILTAG_36h10: int +DICT_APRILTAG_36H10: int +DICT_APRILTAG_36h11: int +DICT_APRILTAG_36H11: int +DICT_ARUCO_MIP_36h12: int +DICT_ARUCO_MIP_36H12: int +PredefinedDictionaryType = int +"""One of [DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL, DICT_APRILTAG_16h5, DICT_APRILTAG_16H5, DICT_APRILTAG_25h9, DICT_APRILTAG_25H9, DICT_APRILTAG_36h10, DICT_APRILTAG_36H10, DICT_APRILTAG_36h11, DICT_APRILTAG_36H11, DICT_ARUCO_MIP_36h12, DICT_ARUCO_MIP_36H12]""" + + + +# Classes +class Board: + # Functions + @_typing.overload + def __init__(self, objPoints: _typing.Sequence[cv2.typing.MatLike], dictionary: Dictionary, ids: cv2.typing.MatLike) -> None: ... + @_typing.overload + def __init__(self, objPoints: _typing.Sequence[cv2.UMat], dictionary: Dictionary, ids: cv2.UMat) -> None: ... + + def getDictionary(self) -> Dictionary: ... + + def getObjPoints(self) -> _typing.Sequence[_typing.Sequence[cv2.typing.Point3f]]: ... + + def getIds(self) -> _typing.Sequence[int]: ... + + def getRightBottomCorner(self) -> cv2.typing.Point3f: ... + + @_typing.overload + def matchImagePoints(self, detectedCorners: _typing.Sequence[cv2.typing.MatLike], detectedIds: cv2.typing.MatLike, objPoints: cv2.typing.MatLike | None = ..., imgPoints: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... + @_typing.overload + def matchImagePoints(self, detectedCorners: _typing.Sequence[cv2.UMat], detectedIds: cv2.UMat, objPoints: cv2.UMat | None = ..., imgPoints: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ... + + @_typing.overload + def generateImage(self, outSize: cv2.typing.Size, img: cv2.typing.MatLike | None = ..., marginSize: int = ..., borderBits: int = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def generateImage(self, outSize: cv2.typing.Size, img: cv2.UMat | None = ..., marginSize: int = ..., borderBits: int = ...) -> cv2.UMat: ... + + +class GridBoard(Board): + # Functions + @_typing.overload + def __init__(self, size: cv2.typing.Size, markerLength: float, markerSeparation: float, dictionary: Dictionary, ids: cv2.typing.MatLike | None = ...) -> None: ... + @_typing.overload + def __init__(self, size: cv2.typing.Size, markerLength: float, markerSeparation: float, dictionary: Dictionary, ids: cv2.UMat | None = ...) -> None: ... + + def getGridSize(self) -> cv2.typing.Size: ... + + def getMarkerLength(self) -> float: ... + + def getMarkerSeparation(self) -> float: ... + + +class CharucoBoard(Board): + # Functions + @_typing.overload + def __init__(self, size: cv2.typing.Size, squareLength: float, markerLength: float, dictionary: Dictionary, ids: cv2.typing.MatLike | None = ...) -> None: ... + @_typing.overload + def __init__(self, size: cv2.typing.Size, squareLength: float, markerLength: float, dictionary: Dictionary, ids: cv2.UMat | None = ...) -> None: ... + + def setLegacyPattern(self, legacyPattern: bool) -> None: ... + + def getLegacyPattern(self) -> bool: ... + + def getChessboardSize(self) -> cv2.typing.Size: ... + + def getSquareLength(self) -> float: ... + + def getMarkerLength(self) -> float: ... + + def getChessboardCorners(self) -> _typing.Sequence[cv2.typing.Point3f]: ... + + @_typing.overload + def checkCharucoCornersCollinear(self, charucoIds: cv2.typing.MatLike) -> bool: ... + @_typing.overload + def checkCharucoCornersCollinear(self, charucoIds: cv2.UMat) -> bool: ... + + +class DetectorParameters: + adaptiveThreshWinSizeMin: int + adaptiveThreshWinSizeMax: int + adaptiveThreshWinSizeStep: int + adaptiveThreshConstant: float + minMarkerPerimeterRate: float + maxMarkerPerimeterRate: float + polygonalApproxAccuracyRate: float + minCornerDistanceRate: float + minDistanceToBorder: int + minMarkerDistanceRate: float + minGroupDistance: float + cornerRefinementMethod: int + cornerRefinementWinSize: int + relativeCornerRefinmentWinSize: float + cornerRefinementMaxIterations: int + cornerRefinementMinAccuracy: float + markerBorderBits: int + perspectiveRemovePixelPerCell: int + perspectiveRemoveIgnoredMarginPerCell: float + maxErroneousBitsInBorderRate: float + minOtsuStdDev: float + errorCorrectionRate: float + aprilTagQuadDecimate: float + aprilTagQuadSigma: float + aprilTagMinClusterPixels: int + aprilTagMaxNmaxima: int + aprilTagCriticalRad: float + aprilTagMaxLineFitMse: float + aprilTagMinWhiteBlackDiff: int + aprilTagDeglitch: int + detectInvertedMarker: bool + useAruco3Detection: bool + minSideLengthCanonicalImg: int + minMarkerLengthRatioOriginalImg: float + + # Functions + def __init__(self) -> None: ... + + def readDetectorParameters(self, fn: cv2.FileNode) -> bool: ... + + def writeDetectorParameters(self, fs: cv2.FileStorage, name: str = ...) -> bool: ... + + +class RefineParameters: + minRepDistance: float + errorCorrectionRate: float + checkAllOrders: bool + + # Functions + def __init__(self, minRepDistance: float = ..., errorCorrectionRate: float = ..., checkAllOrders: bool = ...) -> None: ... + + def readRefineParameters(self, fn: cv2.FileNode) -> bool: ... + + def writeRefineParameters(self, fs: cv2.FileStorage, name: str = ...) -> bool: ... + + +class ArucoDetector(cv2.Algorithm): + # Functions + def __init__(self, dictionary: Dictionary = ..., detectorParams: DetectorParameters = ..., refineParams: RefineParameters = ...) -> None: ... + + @_typing.overload + def detectMarkers(self, image: cv2.typing.MatLike, corners: _typing.Sequence[cv2.typing.MatLike] | None = ..., ids: cv2.typing.MatLike | None = ..., rejectedImgPoints: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike]]: ... + @_typing.overload + def detectMarkers(self, image: cv2.UMat, corners: _typing.Sequence[cv2.UMat] | None = ..., ids: cv2.UMat | None = ..., rejectedImgPoints: _typing.Sequence[cv2.UMat] | None = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat, _typing.Sequence[cv2.UMat]]: ... + + @_typing.overload + def refineDetectedMarkers(self, image: cv2.typing.MatLike, board: Board, detectedCorners: _typing.Sequence[cv2.typing.MatLike], detectedIds: cv2.typing.MatLike, rejectedCorners: _typing.Sequence[cv2.typing.MatLike], cameraMatrix: cv2.typing.MatLike | None = ..., distCoeffs: cv2.typing.MatLike | None = ..., recoveredIdxs: cv2.typing.MatLike | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ... + @_typing.overload + def refineDetectedMarkers(self, image: cv2.UMat, board: Board, detectedCorners: _typing.Sequence[cv2.UMat], detectedIds: cv2.UMat, rejectedCorners: _typing.Sequence[cv2.UMat], cameraMatrix: cv2.UMat | None = ..., distCoeffs: cv2.UMat | None = ..., recoveredIdxs: cv2.UMat | None = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat, _typing.Sequence[cv2.UMat], cv2.UMat]: ... + + def getDictionary(self) -> Dictionary: ... + + def setDictionary(self, dictionary: Dictionary) -> None: ... + + def getDetectorParameters(self) -> DetectorParameters: ... + + def setDetectorParameters(self, detectorParameters: DetectorParameters) -> None: ... + + def getRefineParameters(self) -> RefineParameters: ... + + def setRefineParameters(self, refineParameters: RefineParameters) -> None: ... + + def write(self, fs: cv2.FileStorage, name: str) -> None: ... + + def read(self, fn: cv2.FileNode) -> None: ... + + +class Dictionary: + bytesList: cv2.typing.MatLike + markerSize: int + maxCorrectionBits: int + + # Functions + @_typing.overload + def __init__(self) -> None: ... + @_typing.overload + def __init__(self, bytesList: cv2.typing.MatLike, _markerSize: int, maxcorr: int = ...) -> None: ... + + def readDictionary(self, fn: cv2.FileNode) -> bool: ... + + def writeDictionary(self, fs: cv2.FileStorage, name: str = ...) -> None: ... + + def identify(self, onlyBits: cv2.typing.MatLike, maxCorrectionRate: float) -> tuple[bool, int, int]: ... + + @_typing.overload + def getDistanceToId(self, bits: cv2.typing.MatLike, id: int, allRotations: bool = ...) -> int: ... + @_typing.overload + def getDistanceToId(self, bits: cv2.UMat, id: int, allRotations: bool = ...) -> int: ... + + @_typing.overload + def generateImageMarker(self, id: int, sidePixels: int, _img: cv2.typing.MatLike | None = ..., borderBits: int = ...) -> cv2.typing.MatLike: ... + @_typing.overload + def generateImageMarker(self, id: int, sidePixels: int, _img: cv2.UMat | None = ..., borderBits: int = ...) -> cv2.UMat: ... + + @staticmethod + def getByteListFromBits(bits: cv2.typing.MatLike) -> cv2.typing.MatLike: ... + + @staticmethod + def getBitsFromByteList(byteList: cv2.typing.MatLike, markerSize: int) -> cv2.typing.MatLike: ... + + +class CharucoParameters: + cameraMatrix: cv2.typing.MatLike + distCoeffs: cv2.typing.MatLike + minMarkers: int + tryRefineMarkers: bool + + # Functions + def __init__(self) -> None: ... + + +class CharucoDetector(cv2.Algorithm): + # Functions + def __init__(self, board: CharucoBoard, charucoParams: CharucoParameters = ..., detectorParams: DetectorParameters = ..., refineParams: RefineParameters = ...) -> None: ... + + def getBoard(self) -> CharucoBoard: ... + + def setBoard(self, board: CharucoBoard) -> None: ... + + def getCharucoParameters(self) -> CharucoParameters: ... + + def setCharucoParameters(self, charucoParameters: CharucoParameters) -> None: ... + + def getDetectorParameters(self) -> DetectorParameters: ... + + def setDetectorParameters(self, detectorParameters: DetectorParameters) -> None: ... + + def getRefineParameters(self) -> RefineParameters: ... + + def setRefineParameters(self, refineParameters: RefineParameters) -> None: ... + + @_typing.overload + def detectBoard(self, image: cv2.typing.MatLike, charucoCorners: cv2.typing.MatLike | None = ..., charucoIds: cv2.typing.MatLike | None = ..., markerCorners: _typing.Sequence[cv2.typing.MatLike] | None = ..., markerIds: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ... + @_typing.overload + def detectBoard(self, image: cv2.UMat, charucoCorners: cv2.UMat | None = ..., charucoIds: cv2.UMat | None = ..., markerCorners: _typing.Sequence[cv2.UMat] | None = ..., markerIds: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat, _typing.Sequence[cv2.UMat], cv2.UMat]: ... + + @_typing.overload + def detectDiamonds(self, image: cv2.typing.MatLike, diamondCorners: _typing.Sequence[cv2.typing.MatLike] | None = ..., diamondIds: cv2.typing.MatLike | None = ..., markerCorners: _typing.Sequence[cv2.typing.MatLike] | None = ..., markerIds: cv2.typing.MatLike | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ... + @_typing.overload + def detectDiamonds(self, image: cv2.UMat, diamondCorners: _typing.Sequence[cv2.UMat] | None = ..., diamondIds: cv2.UMat | None = ..., markerCorners: _typing.Sequence[cv2.UMat] | None = ..., markerIds: cv2.UMat | None = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat, _typing.Sequence[cv2.UMat], cv2.UMat]: ... + + + +# Functions +@_typing.overload +def drawDetectedCornersCharuco(image: cv2.typing.MatLike, charucoCorners: cv2.typing.MatLike, charucoIds: cv2.typing.MatLike | None = ..., cornerColor: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def drawDetectedCornersCharuco(image: cv2.UMat, charucoCorners: cv2.UMat, charucoIds: cv2.UMat | None = ..., cornerColor: cv2.typing.Scalar = ...) -> cv2.UMat: ... + +@_typing.overload +def drawDetectedDiamonds(image: cv2.typing.MatLike, diamondCorners: _typing.Sequence[cv2.typing.MatLike], diamondIds: cv2.typing.MatLike | None = ..., borderColor: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def drawDetectedDiamonds(image: cv2.UMat, diamondCorners: _typing.Sequence[cv2.UMat], diamondIds: cv2.UMat | None = ..., borderColor: cv2.typing.Scalar = ...) -> cv2.UMat: ... + +@_typing.overload +def drawDetectedMarkers(image: cv2.typing.MatLike, corners: _typing.Sequence[cv2.typing.MatLike], ids: cv2.typing.MatLike | None = ..., borderColor: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def drawDetectedMarkers(image: cv2.UMat, corners: _typing.Sequence[cv2.UMat], ids: cv2.UMat | None = ..., borderColor: cv2.typing.Scalar = ...) -> cv2.UMat: ... + +def extendDictionary(nMarkers: int, markerSize: int, baseDictionary: Dictionary = ..., randomSeed: int = ...) -> Dictionary: ... + +@_typing.overload +def generateImageMarker(dictionary: Dictionary, id: int, sidePixels: int, img: cv2.typing.MatLike | None = ..., borderBits: int = ...) -> cv2.typing.MatLike: ... +@_typing.overload +def generateImageMarker(dictionary: Dictionary, id: int, sidePixels: int, img: cv2.UMat | None = ..., borderBits: int = ...) -> cv2.UMat: ... + +def getPredefinedDictionary(dict: int) -> Dictionary: ... + + diff --git a/parrot/lib/python3.10/site-packages/cv2/config-3.py b/parrot/lib/python3.10/site-packages/cv2/config-3.py new file mode 100644 index 0000000000000000000000000000000000000000..587a42bfacd8401281a51fe5dbc8ea44f4e156d5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cv2/config-3.py @@ -0,0 +1,24 @@ +PYTHON_EXTENSIONS_PATHS = [ + LOADER_DIR +] + PYTHON_EXTENSIONS_PATHS + +ci_and_not_headless = False + +try: + from .version import ci_build, headless + + ci_and_not_headless = ci_build and not headless +except: + pass + +# the Qt plugin is included currently only in the pre-built wheels +if sys.platform.startswith("linux") and ci_and_not_headless: + os.environ["QT_QPA_PLATFORM_PLUGIN_PATH"] = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "qt", "plugins" + ) + +# Qt will throw warning on Linux if fonts are not found +if sys.platform.startswith("linux") and ci_and_not_headless: + os.environ["QT_QPA_FONTDIR"] = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "qt", "fonts" + ) diff --git a/parrot/lib/python3.10/site-packages/cv2/config.py b/parrot/lib/python3.10/site-packages/cv2/config.py new file mode 100644 index 0000000000000000000000000000000000000000..a95fbcf0db643541979c557588cb264233bc6891 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cv2/config.py @@ -0,0 +1,5 @@ +import os + +BINARIES_PATHS = [ + os.path.join(os.path.join(LOADER_DIR, '../../'), 'lib64') +] + BINARIES_PATHS diff --git a/parrot/lib/python3.10/site-packages/cv2/gapi/__init__.py b/parrot/lib/python3.10/site-packages/cv2/gapi/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2b21e54e4195658f71a4d90d8a1d8b993710010b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cv2/gapi/__init__.py @@ -0,0 +1,323 @@ +__all__ = ['op', 'kernel'] + +import sys +import cv2 as cv + +# NB: Register function in specific module +def register(mname): + def parameterized(func): + sys.modules[mname].__dict__[func.__name__] = func + return func + return parameterized + + +@register('cv2.gapi') +def networks(*args): + return cv.gapi_GNetPackage(list(map(cv.detail.strip, args))) + + +@register('cv2.gapi') +def compile_args(*args): + return list(map(cv.GCompileArg, args)) + + +@register('cv2') +def GIn(*args): + return [*args] + + +@register('cv2') +def GOut(*args): + return [*args] + + +@register('cv2') +def gin(*args): + return [*args] + + +@register('cv2.gapi') +def descr_of(*args): + return [*args] + + +@register('cv2') +class GOpaque(): + # NB: Inheritance from c++ class cause segfault. + # So just aggregate cv.GOpaqueT instead of inheritance + def __new__(cls, argtype): + return cv.GOpaqueT(argtype) + + class Bool(): + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_BOOL) + + class Int(): + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_INT) + + class Int64(): + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_INT64) + + class UInt64(): + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_UINT64) + + class Double(): + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_DOUBLE) + + class Float(): + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_FLOAT) + + class String(): + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_STRING) + + class Point(): + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_POINT) + + class Point2f(): + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_POINT2F) + + class Point3f(): + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_POINT3F) + + class Size(): + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_SIZE) + + class Rect(): + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_RECT) + + class Prim(): + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_DRAW_PRIM) + + class Any(): + def __new__(self): + return cv.GOpaqueT(cv.gapi.CV_ANY) + +@register('cv2') +class GArray(): + # NB: Inheritance from c++ class cause segfault. + # So just aggregate cv.GArrayT instead of inheritance + def __new__(cls, argtype): + return cv.GArrayT(argtype) + + class Bool(): + def __new__(self): + return cv.GArrayT(cv.gapi.CV_BOOL) + + class Int(): + def __new__(self): + return cv.GArrayT(cv.gapi.CV_INT) + + class Int64(): + def __new__(self): + return cv.GArrayT(cv.gapi.CV_INT64) + + class UInt64(): + def __new__(self): + return cv.GArrayT(cv.gapi.CV_UINT64) + + class Double(): + def __new__(self): + return cv.GArrayT(cv.gapi.CV_DOUBLE) + + class Float(): + def __new__(self): + return cv.GArrayT(cv.gapi.CV_FLOAT) + + class String(): + def __new__(self): + return cv.GArrayT(cv.gapi.CV_STRING) + + class Point(): + def __new__(self): + return cv.GArrayT(cv.gapi.CV_POINT) + + class Point2f(): + def __new__(self): + return cv.GArrayT(cv.gapi.CV_POINT2F) + + class Point3f(): + def __new__(self): + return cv.GArrayT(cv.gapi.CV_POINT3F) + + class Size(): + def __new__(self): + return cv.GArrayT(cv.gapi.CV_SIZE) + + class Rect(): + def __new__(self): + return cv.GArrayT(cv.gapi.CV_RECT) + + class Scalar(): + def __new__(self): + return cv.GArrayT(cv.gapi.CV_SCALAR) + + class Mat(): + def __new__(self): + return cv.GArrayT(cv.gapi.CV_MAT) + + class GMat(): + def __new__(self): + return cv.GArrayT(cv.gapi.CV_GMAT) + + class Prim(): + def __new__(self): + return cv.GArray(cv.gapi.CV_DRAW_PRIM) + + class Any(): + def __new__(self): + return cv.GArray(cv.gapi.CV_ANY) + + +# NB: Top lvl decorator takes arguments +def op(op_id, in_types, out_types): + + garray_types= { + cv.GArray.Bool: cv.gapi.CV_BOOL, + cv.GArray.Int: cv.gapi.CV_INT, + cv.GArray.Int64: cv.gapi.CV_INT64, + cv.GArray.UInt64: cv.gapi.CV_UINT64, + cv.GArray.Double: cv.gapi.CV_DOUBLE, + cv.GArray.Float: cv.gapi.CV_FLOAT, + cv.GArray.String: cv.gapi.CV_STRING, + cv.GArray.Point: cv.gapi.CV_POINT, + cv.GArray.Point2f: cv.gapi.CV_POINT2F, + cv.GArray.Point3f: cv.gapi.CV_POINT3F, + cv.GArray.Size: cv.gapi.CV_SIZE, + cv.GArray.Rect: cv.gapi.CV_RECT, + cv.GArray.Scalar: cv.gapi.CV_SCALAR, + cv.GArray.Mat: cv.gapi.CV_MAT, + cv.GArray.GMat: cv.gapi.CV_GMAT, + cv.GArray.Prim: cv.gapi.CV_DRAW_PRIM, + cv.GArray.Any: cv.gapi.CV_ANY + } + + gopaque_types= { + cv.GOpaque.Size: cv.gapi.CV_SIZE, + cv.GOpaque.Rect: cv.gapi.CV_RECT, + cv.GOpaque.Bool: cv.gapi.CV_BOOL, + cv.GOpaque.Int: cv.gapi.CV_INT, + cv.GOpaque.Int64: cv.gapi.CV_INT64, + cv.GOpaque.UInt64: cv.gapi.CV_UINT64, + cv.GOpaque.Double: cv.gapi.CV_DOUBLE, + cv.GOpaque.Float: cv.gapi.CV_FLOAT, + cv.GOpaque.String: cv.gapi.CV_STRING, + cv.GOpaque.Point: cv.gapi.CV_POINT, + cv.GOpaque.Point2f: cv.gapi.CV_POINT2F, + cv.GOpaque.Point3f: cv.gapi.CV_POINT3F, + cv.GOpaque.Size: cv.gapi.CV_SIZE, + cv.GOpaque.Rect: cv.gapi.CV_RECT, + cv.GOpaque.Prim: cv.gapi.CV_DRAW_PRIM, + cv.GOpaque.Any: cv.gapi.CV_ANY + } + + type2str = { + cv.gapi.CV_BOOL: 'cv.gapi.CV_BOOL' , + cv.gapi.CV_INT: 'cv.gapi.CV_INT' , + cv.gapi.CV_INT64: 'cv.gapi.CV_INT64' , + cv.gapi.CV_UINT64: 'cv.gapi.CV_UINT64' , + cv.gapi.CV_DOUBLE: 'cv.gapi.CV_DOUBLE' , + cv.gapi.CV_FLOAT: 'cv.gapi.CV_FLOAT' , + cv.gapi.CV_STRING: 'cv.gapi.CV_STRING' , + cv.gapi.CV_POINT: 'cv.gapi.CV_POINT' , + cv.gapi.CV_POINT2F: 'cv.gapi.CV_POINT2F' , + cv.gapi.CV_POINT3F: 'cv.gapi.CV_POINT3F' , + cv.gapi.CV_SIZE: 'cv.gapi.CV_SIZE', + cv.gapi.CV_RECT: 'cv.gapi.CV_RECT', + cv.gapi.CV_SCALAR: 'cv.gapi.CV_SCALAR', + cv.gapi.CV_MAT: 'cv.gapi.CV_MAT', + cv.gapi.CV_GMAT: 'cv.gapi.CV_GMAT', + cv.gapi.CV_DRAW_PRIM: 'cv.gapi.CV_DRAW_PRIM' + } + + # NB: Second lvl decorator takes class to decorate + def op_with_params(cls): + if not in_types: + raise Exception('{} operation should have at least one input!'.format(cls.__name__)) + + if not out_types: + raise Exception('{} operation should have at least one output!'.format(cls.__name__)) + + for i, t in enumerate(out_types): + if t not in [cv.GMat, cv.GScalar, *garray_types, *gopaque_types]: + raise Exception('{} unsupported output type: {} in position: {}' + .format(cls.__name__, t.__name__, i)) + + def on(*args): + if len(in_types) != len(args): + raise Exception('Invalid number of input elements!\nExpected: {}, Actual: {}' + .format(len(in_types), len(args))) + + for i, (t, a) in enumerate(zip(in_types, args)): + if t in garray_types: + if not isinstance(a, cv.GArrayT): + raise Exception("{} invalid type for argument {}.\nExpected: {}, Actual: {}" + .format(cls.__name__, i, cv.GArrayT.__name__, type(a).__name__)) + + elif a.type() != garray_types[t]: + raise Exception("{} invalid GArrayT type for argument {}.\nExpected: {}, Actual: {}" + .format(cls.__name__, i, type2str[garray_types[t]], type2str[a.type()])) + + elif t in gopaque_types: + if not isinstance(a, cv.GOpaqueT): + raise Exception("{} invalid type for argument {}.\nExpected: {}, Actual: {}" + .format(cls.__name__, i, cv.GOpaqueT.__name__, type(a).__name__)) + + elif a.type() != gopaque_types[t]: + raise Exception("{} invalid GOpaque type for argument {}.\nExpected: {}, Actual: {}" + .format(cls.__name__, i, type2str[gopaque_types[t]], type2str[a.type()])) + + else: + if t != type(a): + raise Exception('{} invalid input type for argument {}.\nExpected: {}, Actual: {}' + .format(cls.__name__, i, t.__name__, type(a).__name__)) + + op = cv.gapi.__op(op_id, cls.outMeta, *args) + + out_protos = [] + for i, out_type in enumerate(out_types): + if out_type == cv.GMat: + out_protos.append(op.getGMat()) + elif out_type == cv.GScalar: + out_protos.append(op.getGScalar()) + elif out_type in gopaque_types: + out_protos.append(op.getGOpaque(gopaque_types[out_type])) + elif out_type in garray_types: + out_protos.append(op.getGArray(garray_types[out_type])) + else: + raise Exception("""In {}: G-API operation can't produce the output with type: {} in position: {}""" + .format(cls.__name__, out_type.__name__, i)) + + return tuple(out_protos) if len(out_protos) != 1 else out_protos[0] + + # NB: Extend operation class + cls.id = op_id + cls.on = staticmethod(on) + return cls + + return op_with_params + + +def kernel(op_cls): + # NB: Second lvl decorator takes class to decorate + def kernel_with_params(cls): + # NB: Add new members to kernel class + cls.id = op_cls.id + cls.outMeta = op_cls.outMeta + return cls + + return kernel_with_params + + +cv.gapi.wip.GStreamerPipeline = cv.gapi_wip_gst_GStreamerPipeline diff --git a/parrot/lib/python3.10/site-packages/cv2/gapi/__init__.pyi b/parrot/lib/python3.10/site-packages/cv2/gapi/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bdc68bd69c5b275a02c29b8605e8502d84f55bbe --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cv2/gapi/__init__.pyi @@ -0,0 +1,349 @@ +__all__: list[str] = [] + +import cv2 +import cv2.typing +import typing as _typing + + +from cv2.gapi import core as core +from cv2.gapi import ie as ie +from cv2.gapi import imgproc as imgproc +from cv2.gapi import oak as oak +from cv2.gapi import onnx as onnx +from cv2.gapi import ot as ot +from cv2.gapi import ov as ov +from cv2.gapi import own as own +from cv2.gapi import render as render +from cv2.gapi import streaming as streaming +from cv2.gapi import video as video +from cv2.gapi import wip as wip + + +# Enumerations +StereoOutputFormat_DEPTH_FLOAT16: int +STEREO_OUTPUT_FORMAT_DEPTH_FLOAT16: int +StereoOutputFormat_DEPTH_FLOAT32: int +STEREO_OUTPUT_FORMAT_DEPTH_FLOAT32: int +StereoOutputFormat_DISPARITY_FIXED16_11_5: int +STEREO_OUTPUT_FORMAT_DISPARITY_FIXED16_11_5: int +StereoOutputFormat_DISPARITY_FIXED16_12_4: int +STEREO_OUTPUT_FORMAT_DISPARITY_FIXED16_12_4: int +StereoOutputFormat_DEPTH_16F: int +STEREO_OUTPUT_FORMAT_DEPTH_16F: int +StereoOutputFormat_DEPTH_32F: int +STEREO_OUTPUT_FORMAT_DEPTH_32F: int +StereoOutputFormat_DISPARITY_16Q_10_5: int +STEREO_OUTPUT_FORMAT_DISPARITY_16Q_10_5: int +StereoOutputFormat_DISPARITY_16Q_11_4: int +STEREO_OUTPUT_FORMAT_DISPARITY_16Q_11_4: int +StereoOutputFormat = int +"""One of [StereoOutputFormat_DEPTH_FLOAT16, STEREO_OUTPUT_FORMAT_DEPTH_FLOAT16, StereoOutputFormat_DEPTH_FLOAT32, STEREO_OUTPUT_FORMAT_DEPTH_FLOAT32, StereoOutputFormat_DISPARITY_FIXED16_11_5, STEREO_OUTPUT_FORMAT_DISPARITY_FIXED16_11_5, StereoOutputFormat_DISPARITY_FIXED16_12_4, STEREO_OUTPUT_FORMAT_DISPARITY_FIXED16_12_4, StereoOutputFormat_DEPTH_16F, STEREO_OUTPUT_FORMAT_DEPTH_16F, StereoOutputFormat_DEPTH_32F, STEREO_OUTPUT_FORMAT_DEPTH_32F, StereoOutputFormat_DISPARITY_16Q_10_5, STEREO_OUTPUT_FORMAT_DISPARITY_16Q_10_5, StereoOutputFormat_DISPARITY_16Q_11_4, STEREO_OUTPUT_FORMAT_DISPARITY_16Q_11_4]""" + +CV_BOOL: int +CV_INT: int +CV_INT64: int +CV_UINT64: int +CV_DOUBLE: int +CV_FLOAT: int +CV_STRING: int +CV_POINT: int +CV_POINT2F: int +CV_POINT3F: int +CV_SIZE: int +CV_RECT: int +CV_SCALAR: int +CV_MAT: int +CV_GMAT: int +CV_DRAW_PRIM: int +CV_ANY: int +ArgType = int +"""One of [CV_BOOL, CV_INT, CV_INT64, CV_UINT64, CV_DOUBLE, CV_FLOAT, CV_STRING, CV_POINT, CV_POINT2F, CV_POINT3F, CV_SIZE, CV_RECT, CV_SCALAR, CV_MAT, CV_GMAT, CV_DRAW_PRIM, CV_ANY]""" + + + +# Classes +class GNetParam: + ... + +class GNetPackage: + # Functions + @_typing.overload + def __init__(self) -> None: ... + @_typing.overload + def __init__(self, nets: _typing.Sequence[GNetParam]) -> None: ... + + + +# Functions +def BGR2Gray(src: cv2.GMat) -> cv2.GMat: ... + +def BGR2I420(src: cv2.GMat) -> cv2.GMat: ... + +def BGR2LUV(src: cv2.GMat) -> cv2.GMat: ... + +def BGR2RGB(src: cv2.GMat) -> cv2.GMat: ... + +def BGR2YUV(src: cv2.GMat) -> cv2.GMat: ... + +def BayerGR2RGB(src_gr: cv2.GMat) -> cv2.GMat: ... + +def Canny(image: cv2.GMat, threshold1: float, threshold2: float, apertureSize: int = ..., L2gradient: bool = ...) -> cv2.GMat: ... + +def I4202BGR(src: cv2.GMat) -> cv2.GMat: ... + +def I4202RGB(src: cv2.GMat) -> cv2.GMat: ... + +def LUT(src: cv2.GMat, lut: cv2.typing.MatLike) -> cv2.GMat: ... + +def LUV2BGR(src: cv2.GMat) -> cv2.GMat: ... + +def Laplacian(src: cv2.GMat, ddepth: int, ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> cv2.GMat: ... + +def NV12toBGR(src_y: cv2.GMat, src_uv: cv2.GMat) -> cv2.GMat: ... + +def NV12toGray(src_y: cv2.GMat, src_uv: cv2.GMat) -> cv2.GMat: ... + +def NV12toRGB(src_y: cv2.GMat, src_uv: cv2.GMat) -> cv2.GMat: ... + +@_typing.overload +def RGB2Gray(src: cv2.GMat) -> cv2.GMat: ... +@_typing.overload +def RGB2Gray(src: cv2.GMat, rY: float, gY: float, bY: float) -> cv2.GMat: ... + +def RGB2HSV(src: cv2.GMat) -> cv2.GMat: ... + +def RGB2I420(src: cv2.GMat) -> cv2.GMat: ... + +def RGB2Lab(src: cv2.GMat) -> cv2.GMat: ... + +def RGB2YUV(src: cv2.GMat) -> cv2.GMat: ... + +def RGB2YUV422(src: cv2.GMat) -> cv2.GMat: ... + +def Sobel(src: cv2.GMat, ddepth: int, dx: int, dy: int, ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ... + +def SobelXY(src: cv2.GMat, ddepth: int, order: int, ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> tuple[cv2.GMat, cv2.GMat]: ... + +def YUV2BGR(src: cv2.GMat) -> cv2.GMat: ... + +def YUV2RGB(src: cv2.GMat) -> cv2.GMat: ... + +def absDiff(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ... + +def absDiffC(src: cv2.GMat, c: cv2.GScalar) -> cv2.GMat: ... + +def add(src1: cv2.GMat, src2: cv2.GMat, ddepth: int = ...) -> cv2.GMat: ... + +@_typing.overload +def addC(src1: cv2.GMat, c: cv2.GScalar, ddepth: int = ...) -> cv2.GMat: ... +@_typing.overload +def addC(c: cv2.GScalar, src1: cv2.GMat, ddepth: int = ...) -> cv2.GMat: ... + +def addWeighted(src1: cv2.GMat, alpha: float, src2: cv2.GMat, beta: float, gamma: float, ddepth: int = ...) -> cv2.GMat: ... + +def bilateralFilter(src: cv2.GMat, d: int, sigmaColor: float, sigmaSpace: float, borderType: int = ...) -> cv2.GMat: ... + +@_typing.overload +def bitwise_and(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ... +@_typing.overload +def bitwise_and(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ... + +def bitwise_not(src: cv2.GMat) -> cv2.GMat: ... + +@_typing.overload +def bitwise_or(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ... +@_typing.overload +def bitwise_or(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ... + +@_typing.overload +def bitwise_xor(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ... +@_typing.overload +def bitwise_xor(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ... + +def blur(src: cv2.GMat, ksize: cv2.typing.Size, anchor: cv2.typing.Point = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ... + +@_typing.overload +def boundingRect(src: cv2.GMat) -> cv2.GOpaqueT: ... +@_typing.overload +def boundingRect(src: cv2.GArrayT) -> cv2.GOpaqueT: ... +@_typing.overload +def boundingRect(src: cv2.GArrayT) -> cv2.GOpaqueT: ... + +def boxFilter(src: cv2.GMat, dtype: int, ksize: cv2.typing.Size, anchor: cv2.typing.Point = ..., normalize: bool = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ... + +def cartToPolar(x: cv2.GMat, y: cv2.GMat, angleInDegrees: bool = ...) -> tuple[cv2.GMat, cv2.GMat]: ... + +@_typing.overload +def cmpEQ(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ... +@_typing.overload +def cmpEQ(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ... + +@_typing.overload +def cmpGE(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ... +@_typing.overload +def cmpGE(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ... + +@_typing.overload +def cmpGT(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ... +@_typing.overload +def cmpGT(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ... + +@_typing.overload +def cmpLE(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ... +@_typing.overload +def cmpLE(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ... + +@_typing.overload +def cmpLT(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ... +@_typing.overload +def cmpLT(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ... + +@_typing.overload +def cmpNE(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ... +@_typing.overload +def cmpNE(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ... + +def combine(lhs: cv2.GKernelPackage, rhs: cv2.GKernelPackage) -> cv2.GKernelPackage: ... + +@_typing.overload +def concatHor(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ... +@_typing.overload +def concatHor(v: _typing.Sequence[cv2.GMat]) -> cv2.GMat: ... + +@_typing.overload +def concatVert(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ... +@_typing.overload +def concatVert(v: _typing.Sequence[cv2.GMat]) -> cv2.GMat: ... + +def convertTo(src: cv2.GMat, rdepth: int, alpha: float = ..., beta: float = ...) -> cv2.GMat: ... + +def copy(in_: cv2.GMat) -> cv2.GMat: ... + +def countNonZero(src: cv2.GMat) -> cv2.GOpaqueT: ... + +def crop(src: cv2.GMat, rect: cv2.typing.Rect) -> cv2.GMat: ... + +def dilate(src: cv2.GMat, kernel: cv2.typing.MatLike, anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ... + +def dilate3x3(src: cv2.GMat, iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ... + +def div(src1: cv2.GMat, src2: cv2.GMat, scale: float, ddepth: int = ...) -> cv2.GMat: ... + +def divC(src: cv2.GMat, divisor: cv2.GScalar, scale: float, ddepth: int = ...) -> cv2.GMat: ... + +def divRC(divident: cv2.GScalar, src: cv2.GMat, scale: float, ddepth: int = ...) -> cv2.GMat: ... + +def equalizeHist(src: cv2.GMat) -> cv2.GMat: ... + +def erode(src: cv2.GMat, kernel: cv2.typing.MatLike, anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ... + +def erode3x3(src: cv2.GMat, iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ... + +def filter2D(src: cv2.GMat, ddepth: int, kernel: cv2.typing.MatLike, anchor: cv2.typing.Point = ..., delta: cv2.typing.Scalar = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ... + +def flip(src: cv2.GMat, flipCode: int) -> cv2.GMat: ... + +def gaussianBlur(src: cv2.GMat, ksize: cv2.typing.Size, sigmaX: float, sigmaY: float = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ... + +def goodFeaturesToTrack(image: cv2.GMat, maxCorners: int, qualityLevel: float, minDistance: float, mask: cv2.typing.MatLike | None = ..., blockSize: int = ..., useHarrisDetector: bool = ..., k: float = ...) -> cv2.GArrayT: ... + +def inRange(src: cv2.GMat, threshLow: cv2.GScalar, threshUp: cv2.GScalar) -> cv2.GMat: ... + +@_typing.overload +def infer(name: str, inputs: cv2.GInferInputs) -> cv2.GInferOutputs: ... +@_typing.overload +def infer(name: str, roi: cv2.GOpaqueT, inputs: cv2.GInferInputs) -> cv2.GInferOutputs: ... +@_typing.overload +def infer(name: str, rois: cv2.GArrayT, inputs: cv2.GInferInputs) -> cv2.GInferListOutputs: ... + +def infer2(name: str, in_: cv2.GMat, inputs: cv2.GInferListInputs) -> cv2.GInferListOutputs: ... + +def integral(src: cv2.GMat, sdepth: int = ..., sqdepth: int = ...) -> tuple[cv2.GMat, cv2.GMat]: ... + +@_typing.overload +def kmeans(data: cv2.GMat, K: int, bestLabels: cv2.GMat, criteria: cv2.typing.TermCriteria, attempts: int, flags: cv2.KmeansFlags) -> tuple[cv2.GOpaqueT, cv2.GMat, cv2.GMat]: ... +@_typing.overload +def kmeans(data: cv2.GMat, K: int, criteria: cv2.typing.TermCriteria, attempts: int, flags: cv2.KmeansFlags) -> tuple[cv2.GOpaqueT, cv2.GMat, cv2.GMat]: ... +@_typing.overload +def kmeans(data: cv2.GArrayT, K: int, bestLabels: cv2.GArrayT, criteria: cv2.typing.TermCriteria, attempts: int, flags: cv2.KmeansFlags) -> tuple[cv2.GOpaqueT, cv2.GArrayT, cv2.GArrayT]: ... +@_typing.overload +def kmeans(data: cv2.GArrayT, K: int, bestLabels: cv2.GArrayT, criteria: cv2.typing.TermCriteria, attempts: int, flags: cv2.KmeansFlags) -> tuple[cv2.GOpaqueT, cv2.GArrayT, cv2.GArrayT]: ... + +def mask(src: cv2.GMat, mask: cv2.GMat) -> cv2.GMat: ... + +def max(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ... + +def mean(src: cv2.GMat) -> cv2.GScalar: ... + +def medianBlur(src: cv2.GMat, ksize: int) -> cv2.GMat: ... + +def merge3(src1: cv2.GMat, src2: cv2.GMat, src3: cv2.GMat) -> cv2.GMat: ... + +def merge4(src1: cv2.GMat, src2: cv2.GMat, src3: cv2.GMat, src4: cv2.GMat) -> cv2.GMat: ... + +def min(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ... + +def morphologyEx(src: cv2.GMat, op: cv2.MorphTypes, kernel: cv2.typing.MatLike, anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: cv2.BorderTypes = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ... + +def mul(src1: cv2.GMat, src2: cv2.GMat, scale: float = ..., ddepth: int = ...) -> cv2.GMat: ... + +@_typing.overload +def mulC(src: cv2.GMat, multiplier: float, ddepth: int = ...) -> cv2.GMat: ... +@_typing.overload +def mulC(src: cv2.GMat, multiplier: cv2.GScalar, ddepth: int = ...) -> cv2.GMat: ... +@_typing.overload +def mulC(multiplier: cv2.GScalar, src: cv2.GMat, ddepth: int = ...) -> cv2.GMat: ... + +def normInf(src: cv2.GMat) -> cv2.GScalar: ... + +def normL1(src: cv2.GMat) -> cv2.GScalar: ... + +def normL2(src: cv2.GMat) -> cv2.GScalar: ... + +def normalize(src: cv2.GMat, alpha: float, beta: float, norm_type: int, ddepth: int = ...) -> cv2.GMat: ... + +@_typing.overload +def parseSSD(in_: cv2.GMat, inSz: cv2.GOpaqueT, confidenceThreshold: float = ..., filterLabel: int = ...) -> tuple[cv2.GArrayT, cv2.GArrayT]: ... +@_typing.overload +def parseSSD(in_: cv2.GMat, inSz: cv2.GOpaqueT, confidenceThreshold: float, alignmentToSquare: bool, filterOutOfBounds: bool) -> cv2.GArrayT: ... + +def parseYolo(in_: cv2.GMat, inSz: cv2.GOpaqueT, confidenceThreshold: float = ..., nmsThreshold: float = ..., anchors: _typing.Sequence[float] = ...) -> tuple[cv2.GArrayT, cv2.GArrayT]: ... + +def phase(x: cv2.GMat, y: cv2.GMat, angleInDegrees: bool = ...) -> cv2.GMat: ... + +def polarToCart(magnitude: cv2.GMat, angle: cv2.GMat, angleInDegrees: bool = ...) -> tuple[cv2.GMat, cv2.GMat]: ... + +def remap(src: cv2.GMat, map1: cv2.typing.MatLike, map2: cv2.typing.MatLike, interpolation: int, borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ... + +def resize(src: cv2.GMat, dsize: cv2.typing.Size, fx: float = ..., fy: float = ..., interpolation: int = ...) -> cv2.GMat: ... + +def select(src1: cv2.GMat, src2: cv2.GMat, mask: cv2.GMat) -> cv2.GMat: ... + +def sepFilter(src: cv2.GMat, ddepth: int, kernelX: cv2.typing.MatLike, kernelY: cv2.typing.MatLike, anchor: cv2.typing.Point, delta: cv2.typing.Scalar, borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ... + +def split3(src: cv2.GMat) -> tuple[cv2.GMat, cv2.GMat, cv2.GMat]: ... + +def split4(src: cv2.GMat) -> tuple[cv2.GMat, cv2.GMat, cv2.GMat, cv2.GMat]: ... + +def sqrt(src: cv2.GMat) -> cv2.GMat: ... + +def sub(src1: cv2.GMat, src2: cv2.GMat, ddepth: int = ...) -> cv2.GMat: ... + +def subC(src: cv2.GMat, c: cv2.GScalar, ddepth: int = ...) -> cv2.GMat: ... + +def subRC(c: cv2.GScalar, src: cv2.GMat, ddepth: int = ...) -> cv2.GMat: ... + +def sum(src: cv2.GMat) -> cv2.GScalar: ... + +@_typing.overload +def threshold(src: cv2.GMat, thresh: cv2.GScalar, maxval: cv2.GScalar, type: int) -> cv2.GMat: ... +@_typing.overload +def threshold(src: cv2.GMat, maxval: cv2.GScalar, type: int) -> tuple[cv2.GMat, cv2.GScalar]: ... + +def transpose(src: cv2.GMat) -> cv2.GMat: ... + +def warpAffine(src: cv2.GMat, M: cv2.typing.MatLike, dsize: cv2.typing.Size, flags: int = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ... + +def warpPerspective(src: cv2.GMat, M: cv2.typing.MatLike, dsize: cv2.typing.Size, flags: int = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ... + + diff --git a/parrot/lib/python3.10/site-packages/cv2/gapi/wip/__init__.pyi b/parrot/lib/python3.10/site-packages/cv2/gapi/wip/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c38bca4c5085c288bf8921b8ed8624f22d2dba90 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cv2/gapi/wip/__init__.pyi @@ -0,0 +1,41 @@ +__all__: list[str] = [] + +import cv2 +import cv2.gapi +import cv2.gapi.wip.gst +import cv2.typing +import typing as _typing + + +from cv2.gapi.wip import draw as draw +from cv2.gapi.wip import gst as gst +from cv2.gapi.wip import onevpl as onevpl + + +# Classes +class GOutputs: + # Functions + def getGMat(self) -> cv2.GMat: ... + + def getGScalar(self) -> cv2.GScalar: ... + + def getGArray(self, type: cv2.gapi.ArgType) -> cv2.GArrayT: ... + + def getGOpaque(self, type: cv2.gapi.ArgType) -> cv2.GOpaqueT: ... + + +class IStreamSource: + ... + + +# Functions +def get_streaming_source(pipeline: cv2.gapi.wip.gst.GStreamerPipeline, appsinkName: str, outputType: cv2.gapi.wip.gst.GStreamerSource_OutputType = ...) -> IStreamSource: ... + +@_typing.overload +def make_capture_src(path: str, properties: cv2.typing.map_int_and_double = ...) -> IStreamSource: ... +@_typing.overload +def make_capture_src(id: int, properties: cv2.typing.map_int_and_double = ...) -> IStreamSource: ... + +def make_gst_src(pipeline: str, outputType: cv2.gapi.wip.gst.GStreamerSource_OutputType = ...) -> IStreamSource: ... + + diff --git a/parrot/lib/python3.10/site-packages/cv2/gapi/wip/gst/__init__.pyi b/parrot/lib/python3.10/site-packages/cv2/gapi/wip/gst/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..739778186a9ea96cb71a4f06244f895653524a44 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cv2/gapi/wip/gst/__init__.pyi @@ -0,0 +1,17 @@ +__all__: list[str] = [] + +GStreamerSource_OutputType_FRAME: int +GSTREAMER_SOURCE_OUTPUT_TYPE_FRAME: int +GStreamerSource_OutputType_MAT: int +GSTREAMER_SOURCE_OUTPUT_TYPE_MAT: int +GStreamerSource_OutputType = int +"""One of [GStreamerSource_OutputType_FRAME, GSTREAMER_SOURCE_OUTPUT_TYPE_FRAME, GStreamerSource_OutputType_MAT, GSTREAMER_SOURCE_OUTPUT_TYPE_MAT]""" + + +# Classes +class GStreamerPipeline: + # Functions + def __init__(self, pipeline: str) -> None: ... + + + diff --git a/parrot/lib/python3.10/site-packages/cv2/ipp/__init__.pyi b/parrot/lib/python3.10/site-packages/cv2/ipp/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..23b6636ee899355b99cfe4fd3c2d5f897e538b33 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cv2/ipp/__init__.pyi @@ -0,0 +1,14 @@ +__all__: list[str] = [] + +# Functions +def getIppVersion() -> str: ... + +def setUseIPP(flag: bool) -> None: ... + +def setUseIPP_NotExact(flag: bool) -> None: ... + +def useIPP() -> bool: ... + +def useIPP_NotExact() -> bool: ... + + diff --git a/parrot/lib/python3.10/site-packages/cv2/load_config_py2.py b/parrot/lib/python3.10/site-packages/cv2/load_config_py2.py new file mode 100644 index 0000000000000000000000000000000000000000..07fbae9f7aa704c64acfa4bbce187dc9dbf24759 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cv2/load_config_py2.py @@ -0,0 +1,6 @@ +# flake8: noqa +import sys + +if sys.version_info[:2] < (3, 0): + def exec_file_wrapper(fpath, g_vars, l_vars): + execfile(fpath, g_vars, l_vars) diff --git a/parrot/lib/python3.10/site-packages/cv2/load_config_py3.py b/parrot/lib/python3.10/site-packages/cv2/load_config_py3.py new file mode 100644 index 0000000000000000000000000000000000000000..6f3b21ab862d42ed4572bcd52c2c41a72dbc0521 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cv2/load_config_py3.py @@ -0,0 +1,9 @@ +# flake8: noqa +import os +import sys + +if sys.version_info[:2] >= (3, 0): + def exec_file_wrapper(fpath, g_vars, l_vars): + with open(fpath) as f: + code = compile(f.read(), os.path.basename(fpath), 'exec') + exec(code, g_vars, l_vars) diff --git a/parrot/lib/python3.10/site-packages/cv2/py.typed b/parrot/lib/python3.10/site-packages/cv2/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/cv2/version.py b/parrot/lib/python3.10/site-packages/cv2/version.py new file mode 100644 index 0000000000000000000000000000000000000000..05e3354e999a9078a73bccd6bfc5aa3d3cf87b14 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cv2/version.py @@ -0,0 +1,5 @@ +opencv_version = "4.10.0.84" +contrib = False +headless = False +rolling = False +ci_build = True \ No newline at end of file diff --git a/parrot/lib/python3.10/site-packages/cycler/__init__.py b/parrot/lib/python3.10/site-packages/cycler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..97949547ad37c7105beb76cfc9e72a683465fc07 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/cycler/__init__.py @@ -0,0 +1,573 @@ +""" +Cycler +====== + +Cycling through combinations of values, producing dictionaries. + +You can add cyclers:: + + from cycler import cycler + cc = (cycler(color=list('rgb')) + + cycler(linestyle=['-', '--', '-.'])) + for d in cc: + print(d) + +Results in:: + + {'color': 'r', 'linestyle': '-'} + {'color': 'g', 'linestyle': '--'} + {'color': 'b', 'linestyle': '-.'} + + +You can multiply cyclers:: + + from cycler import cycler + cc = (cycler(color=list('rgb')) * + cycler(linestyle=['-', '--', '-.'])) + for d in cc: + print(d) + +Results in:: + + {'color': 'r', 'linestyle': '-'} + {'color': 'r', 'linestyle': '--'} + {'color': 'r', 'linestyle': '-.'} + {'color': 'g', 'linestyle': '-'} + {'color': 'g', 'linestyle': '--'} + {'color': 'g', 'linestyle': '-.'} + {'color': 'b', 'linestyle': '-'} + {'color': 'b', 'linestyle': '--'} + {'color': 'b', 'linestyle': '-.'} +""" + + +from __future__ import annotations + +from collections.abc import Hashable, Iterable, Generator +import copy +from functools import reduce +from itertools import product, cycle +from operator import mul, add +# Dict, List, Union required for runtime cast calls +from typing import TypeVar, Generic, Callable, Union, Dict, List, Any, overload, cast + +__version__ = "0.12.1" + +K = TypeVar("K", bound=Hashable) +L = TypeVar("L", bound=Hashable) +V = TypeVar("V") +U = TypeVar("U") + + +def _process_keys( + left: Cycler[K, V] | Iterable[dict[K, V]], + right: Cycler[K, V] | Iterable[dict[K, V]] | None, +) -> set[K]: + """ + Helper function to compose cycler keys. + + Parameters + ---------- + left, right : iterable of dictionaries or None + The cyclers to be composed. + + Returns + ------- + keys : set + The keys in the composition of the two cyclers. + """ + l_peek: dict[K, V] = next(iter(left)) if left != [] else {} + r_peek: dict[K, V] = next(iter(right)) if right is not None else {} + l_key: set[K] = set(l_peek.keys()) + r_key: set[K] = set(r_peek.keys()) + if l_key & r_key: + raise ValueError("Can not compose overlapping cycles") + return l_key | r_key + + +def concat(left: Cycler[K, V], right: Cycler[K, U]) -> Cycler[K, V | U]: + r""" + Concatenate `Cycler`\s, as if chained using `itertools.chain`. + + The keys must match exactly. + + Examples + -------- + >>> num = cycler('a', range(3)) + >>> let = cycler('a', 'abc') + >>> num.concat(let) + cycler('a', [0, 1, 2, 'a', 'b', 'c']) + + Returns + ------- + `Cycler` + The concatenated cycler. + """ + if left.keys != right.keys: + raise ValueError( + "Keys do not match:\n" + "\tIntersection: {both!r}\n" + "\tDisjoint: {just_one!r}".format( + both=left.keys & right.keys, just_one=left.keys ^ right.keys + ) + ) + _l = cast(Dict[K, List[Union[V, U]]], left.by_key()) + _r = cast(Dict[K, List[Union[V, U]]], right.by_key()) + return reduce(add, (_cycler(k, _l[k] + _r[k]) for k in left.keys)) + + +class Cycler(Generic[K, V]): + """ + Composable cycles. + + This class has compositions methods: + + ``+`` + for 'inner' products (zip) + + ``+=`` + in-place ``+`` + + ``*`` + for outer products (`itertools.product`) and integer multiplication + + ``*=`` + in-place ``*`` + + and supports basic slicing via ``[]``. + + Parameters + ---------- + left, right : Cycler or None + The 'left' and 'right' cyclers. + op : func or None + Function which composes the 'left' and 'right' cyclers. + """ + + def __call__(self): + return cycle(self) + + def __init__( + self, + left: Cycler[K, V] | Iterable[dict[K, V]] | None, + right: Cycler[K, V] | None = None, + op: Any = None, + ): + """ + Semi-private init. + + Do not use this directly, use `cycler` function instead. + """ + if isinstance(left, Cycler): + self._left: Cycler[K, V] | list[dict[K, V]] = Cycler( + left._left, left._right, left._op + ) + elif left is not None: + # Need to copy the dictionary or else that will be a residual + # mutable that could lead to strange errors + self._left = [copy.copy(v) for v in left] + else: + self._left = [] + + if isinstance(right, Cycler): + self._right: Cycler[K, V] | None = Cycler( + right._left, right._right, right._op + ) + else: + self._right = None + + self._keys: set[K] = _process_keys(self._left, self._right) + self._op: Any = op + + def __contains__(self, k): + return k in self._keys + + @property + def keys(self) -> set[K]: + """The keys this Cycler knows about.""" + return set(self._keys) + + def change_key(self, old: K, new: K) -> None: + """ + Change a key in this cycler to a new name. + Modification is performed in-place. + + Does nothing if the old key is the same as the new key. + Raises a ValueError if the new key is already a key. + Raises a KeyError if the old key isn't a key. + """ + if old == new: + return + if new in self._keys: + raise ValueError( + f"Can't replace {old} with {new}, {new} is already a key" + ) + if old not in self._keys: + raise KeyError( + f"Can't replace {old} with {new}, {old} is not a key" + ) + + self._keys.remove(old) + self._keys.add(new) + + if self._right is not None and old in self._right.keys: + self._right.change_key(old, new) + + # self._left should always be non-None + # if self._keys is non-empty. + elif isinstance(self._left, Cycler): + self._left.change_key(old, new) + else: + # It should be completely safe at this point to + # assume that the old key can be found in each + # iteration. + self._left = [{new: entry[old]} for entry in self._left] + + @classmethod + def _from_iter(cls, label: K, itr: Iterable[V]) -> Cycler[K, V]: + """ + Class method to create 'base' Cycler objects + that do not have a 'right' or 'op' and for which + the 'left' object is not another Cycler. + + Parameters + ---------- + label : hashable + The property key. + + itr : iterable + Finite length iterable of the property values. + + Returns + ------- + `Cycler` + New 'base' cycler. + """ + ret: Cycler[K, V] = cls(None) + ret._left = list({label: v} for v in itr) + ret._keys = {label} + return ret + + def __getitem__(self, key: slice) -> Cycler[K, V]: + # TODO : maybe add numpy style fancy slicing + if isinstance(key, slice): + trans = self.by_key() + return reduce(add, (_cycler(k, v[key]) for k, v in trans.items())) + else: + raise ValueError("Can only use slices with Cycler.__getitem__") + + def __iter__(self) -> Generator[dict[K, V], None, None]: + if self._right is None: + for left in self._left: + yield dict(left) + else: + if self._op is None: + raise TypeError( + "Operation cannot be None when both left and right are defined" + ) + for a, b in self._op(self._left, self._right): + out = {} + out.update(a) + out.update(b) + yield out + + def __add__(self, other: Cycler[L, U]) -> Cycler[K | L, V | U]: + """ + Pair-wise combine two equal length cyclers (zip). + + Parameters + ---------- + other : Cycler + """ + if len(self) != len(other): + raise ValueError( + f"Can only add equal length cycles, not {len(self)} and {len(other)}" + ) + return Cycler( + cast(Cycler[Union[K, L], Union[V, U]], self), + cast(Cycler[Union[K, L], Union[V, U]], other), + zip + ) + + @overload + def __mul__(self, other: Cycler[L, U]) -> Cycler[K | L, V | U]: + ... + + @overload + def __mul__(self, other: int) -> Cycler[K, V]: + ... + + def __mul__(self, other): + """ + Outer product of two cyclers (`itertools.product`) or integer + multiplication. + + Parameters + ---------- + other : Cycler or int + """ + if isinstance(other, Cycler): + return Cycler( + cast(Cycler[Union[K, L], Union[V, U]], self), + cast(Cycler[Union[K, L], Union[V, U]], other), + product + ) + elif isinstance(other, int): + trans = self.by_key() + return reduce( + add, (_cycler(k, v * other) for k, v in trans.items()) + ) + else: + return NotImplemented + + @overload + def __rmul__(self, other: Cycler[L, U]) -> Cycler[K | L, V | U]: + ... + + @overload + def __rmul__(self, other: int) -> Cycler[K, V]: + ... + + def __rmul__(self, other): + return self * other + + def __len__(self) -> int: + op_dict: dict[Callable, Callable[[int, int], int]] = {zip: min, product: mul} + if self._right is None: + return len(self._left) + l_len = len(self._left) + r_len = len(self._right) + return op_dict[self._op](l_len, r_len) + + # iadd and imul do not exapand the the type as the returns must be consistent with + # self, thus they flag as inconsistent with add/mul + def __iadd__(self, other: Cycler[K, V]) -> Cycler[K, V]: # type: ignore[misc] + """ + In-place pair-wise combine two equal length cyclers (zip). + + Parameters + ---------- + other : Cycler + """ + if not isinstance(other, Cycler): + raise TypeError("Cannot += with a non-Cycler object") + # True shallow copy of self is fine since this is in-place + old_self = copy.copy(self) + self._keys = _process_keys(old_self, other) + self._left = old_self + self._op = zip + self._right = Cycler(other._left, other._right, other._op) + return self + + def __imul__(self, other: Cycler[K, V] | int) -> Cycler[K, V]: # type: ignore[misc] + """ + In-place outer product of two cyclers (`itertools.product`). + + Parameters + ---------- + other : Cycler + """ + if not isinstance(other, Cycler): + raise TypeError("Cannot *= with a non-Cycler object") + # True shallow copy of self is fine since this is in-place + old_self = copy.copy(self) + self._keys = _process_keys(old_self, other) + self._left = old_self + self._op = product + self._right = Cycler(other._left, other._right, other._op) + return self + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Cycler): + return False + if len(self) != len(other): + return False + if self.keys ^ other.keys: + return False + return all(a == b for a, b in zip(self, other)) + + __hash__ = None # type: ignore + + def __repr__(self) -> str: + op_map = {zip: "+", product: "*"} + if self._right is None: + lab = self.keys.pop() + itr = list(v[lab] for v in self) + return f"cycler({lab!r}, {itr!r})" + else: + op = op_map.get(self._op, "?") + msg = "({left!r} {op} {right!r})" + return msg.format(left=self._left, op=op, right=self._right) + + def _repr_html_(self) -> str: + # an table showing the value of each key through a full cycle + output = "" + sorted_keys = sorted(self.keys, key=repr) + for key in sorted_keys: + output += f"" + for d in iter(self): + output += "" + for k in sorted_keys: + output += f"" + output += "" + output += "
{key!r}
{d[k]!r}
" + return output + + def by_key(self) -> dict[K, list[V]]: + """ + Values by key. + + This returns the transposed values of the cycler. Iterating + over a `Cycler` yields dicts with a single value for each key, + this method returns a `dict` of `list` which are the values + for the given key. + + The returned value can be used to create an equivalent `Cycler` + using only `+`. + + Returns + ------- + transpose : dict + dict of lists of the values for each key. + """ + + # TODO : sort out if this is a bottle neck, if there is a better way + # and if we care. + + keys = self.keys + out: dict[K, list[V]] = {k: list() for k in keys} + + for d in self: + for k in keys: + out[k].append(d[k]) + return out + + # for back compatibility + _transpose = by_key + + def simplify(self) -> Cycler[K, V]: + """ + Simplify the cycler into a sum (but no products) of cyclers. + + Returns + ------- + simple : Cycler + """ + # TODO: sort out if it is worth the effort to make sure this is + # balanced. Currently it is is + # (((a + b) + c) + d) vs + # ((a + b) + (c + d)) + # I would believe that there is some performance implications + trans = self.by_key() + return reduce(add, (_cycler(k, v) for k, v in trans.items())) + + concat = concat + + +@overload +def cycler(arg: Cycler[K, V]) -> Cycler[K, V]: + ... + + +@overload +def cycler(**kwargs: Iterable[V]) -> Cycler[str, V]: + ... + + +@overload +def cycler(label: K, itr: Iterable[V]) -> Cycler[K, V]: + ... + + +def cycler(*args, **kwargs): + """ + Create a new `Cycler` object from a single positional argument, + a pair of positional arguments, or the combination of keyword arguments. + + cycler(arg) + cycler(label1=itr1[, label2=iter2[, ...]]) + cycler(label, itr) + + Form 1 simply copies a given `Cycler` object. + + Form 2 composes a `Cycler` as an inner product of the + pairs of keyword arguments. In other words, all of the + iterables are cycled simultaneously, as if through zip(). + + Form 3 creates a `Cycler` from a label and an iterable. + This is useful for when the label cannot be a keyword argument + (e.g., an integer or a name that has a space in it). + + Parameters + ---------- + arg : Cycler + Copy constructor for Cycler (does a shallow copy of iterables). + label : name + The property key. In the 2-arg form of the function, + the label can be any hashable object. In the keyword argument + form of the function, it must be a valid python identifier. + itr : iterable + Finite length iterable of the property values. + Can be a single-property `Cycler` that would + be like a key change, but as a shallow copy. + + Returns + ------- + cycler : Cycler + New `Cycler` for the given property + + """ + if args and kwargs: + raise TypeError( + "cycler() can only accept positional OR keyword arguments -- not both." + ) + + if len(args) == 1: + if not isinstance(args[0], Cycler): + raise TypeError( + "If only one positional argument given, it must " + "be a Cycler instance." + ) + return Cycler(args[0]) + elif len(args) == 2: + return _cycler(*args) + elif len(args) > 2: + raise TypeError( + "Only a single Cycler can be accepted as the lone " + "positional argument. Use keyword arguments instead." + ) + + if kwargs: + return reduce(add, (_cycler(k, v) for k, v in kwargs.items())) + + raise TypeError("Must have at least a positional OR keyword arguments") + + +def _cycler(label: K, itr: Iterable[V]) -> Cycler[K, V]: + """ + Create a new `Cycler` object from a property name and iterable of values. + + Parameters + ---------- + label : hashable + The property key. + itr : iterable + Finite length iterable of the property values. + + Returns + ------- + cycler : Cycler + New `Cycler` for the given property + """ + if isinstance(itr, Cycler): + keys = itr.keys + if len(keys) != 1: + msg = "Can not create Cycler from a multi-property Cycler" + raise ValueError(msg) + + lab = keys.pop() + # Doesn't need to be a new list because + # _from_iter() will be creating that new list anyway. + itr = (v[lab] for v in itr) + + return Cycler._from_iter(label, itr) diff --git a/parrot/lib/python3.10/site-packages/cycler/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/cycler/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d20c0926f2b084d680e0ce33523d56ecf64b30d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/cycler/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/cycler/py.typed b/parrot/lib/python3.10/site-packages/cycler/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/example.py b/parrot/lib/python3.10/site-packages/example.py new file mode 100644 index 0000000000000000000000000000000000000000..60173d51111e4b677b84251afc642d47bc63c193 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/example.py @@ -0,0 +1,169 @@ +################################################################################# +# Copyright (c) 2020, NVIDIA Corporation. All rights reserved. # +# # +# Redistribution and use in source and binary forms, with or without # +# modification, are permitted provided that the following conditions are met: # +# # +# * Redistributions of source code must retain the above copyright notice, # +# this list of conditions and the following disclaimer. # +# * Redistributions in binary form must reproduce the above copyright # +# notice, this list of conditions and the following disclaimer in the # +# documentation and/or other materials provided with the distribution. # +# * Neither the name of the NVIDIA Corporation nor the names of its # +# contributors may be used to endorse or promote products derived from # +# this software without specific prior written permission. # +# # +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # +# THE POSSIBILITY OF SUCH DAMAGE. # +################################################################################# + +# +# Sample script to demonstrate the usage of NVML API python bindings +# + +# To Run: +# $ python ./example.py + +from pynvml import * + +# +# Helper function +# +def StrVirt(mode): + if mode == NVML_GPU_VIRTUALIZATION_MODE_NONE: + return "None"; + elif mode == NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH: + return "Pass-Through"; + elif mode == NVML_GPU_VIRTUALIZATION_MODE_VGPU: + return "VGPU"; + elif mode == NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU: + return "Host VGPU"; + elif mode == NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA: + return "Host VSGA"; + else: + return "Unknown"; + +# +# Converts errors into string messages +# +def handleError(err): + if (err.value == NVML_ERROR_NOT_SUPPORTED): + return "N/A" + else: + return err.__str__() + +####### +def deviceQuery(): + + strResult = '' + try: + # + # Initialize NVML + # + nvmlInit() + + strResult += ' ' + str(nvmlSystemGetDriverVersion()) + '\n' + + deviceCount = nvmlDeviceGetCount() + strResult += ' ' + str(deviceCount) + '\n' + + for i in range(0, deviceCount): + handle = nvmlDeviceGetHandleByIndex(i) + + pciInfo = nvmlDeviceGetPciInfo(handle) + + strResult += ' \n' % pciInfo.busId + + strResult += ' ' + nvmlDeviceGetName(handle) + '\n' + + brandNames = {NVML_BRAND_UNKNOWN : "Unknown", + NVML_BRAND_QUADRO : "Quadro", + NVML_BRAND_TESLA : "Tesla", + NVML_BRAND_NVS : "NVS", + NVML_BRAND_GRID : "Grid", + NVML_BRAND_TITAN : "Titan", + NVML_BRAND_GEFORCE : "GeForce", + NVML_BRAND_NVIDIA_VAPPS : "NVIDIA Virtual Applications", + NVML_BRAND_NVIDIA_VPC : "NVIDIA Virtual PC", + NVML_BRAND_NVIDIA_VCS : "NVIDIA Virtual Compute Server", + NVML_BRAND_NVIDIA_VWS : "NVIDIA RTX Virtual Workstation", + NVML_BRAND_NVIDIA_CLOUD_GAMING : "NVIDIA Cloud Gaming", + NVML_BRAND_QUADRO_RTX : "Quadro RTX", + NVML_BRAND_NVIDIA_RTX : "NVIDIA RTX", + NVML_BRAND_NVIDIA : "NVIDIA", + NVML_BRAND_GEFORCE_RTX : "GeForce RTX", + NVML_BRAND_TITAN_RTX : "TITAN RTX", + + } + + try: + # If nvmlDeviceGetBrand() succeeds it is guaranteed to be in the dictionary + brandName = brandNames[nvmlDeviceGetBrand(handle)] + except NVMLError as err: + brandName = handleError(err) + + strResult += ' ' + brandName + '\n' + + try: + serial = nvmlDeviceGetSerial(handle) + except NVMLError as err: + serial = handleError(err) + + strResult += ' ' + serial + '\n' + + try: + uuid = nvmlDeviceGetUUID(handle) + except NVMLError as err: + uuid = handleError(err) + + strResult += ' ' + uuid + '\n' + + strResult += ' \n' + try: + mode = StrVirt(nvmlDeviceGetVirtualizationMode(handle)) + except NVMLError as err: + mode = handleError(err) + strResult += ' ' + mode + '\n' + strResult += ' \n' + + try: + gridLicensableFeatures = nvmlDeviceGetGridLicensableFeatures(handle) + if gridLicensableFeatures.isGridLicenseSupported == 1: + strResult += ' \n' + for i in range(gridLicensableFeatures.licensableFeaturesCount): + if gridLicensableFeatures.gridLicensableFeatures[i].featureState == 0: + if nvmlDeviceGetVirtualizationMode(handle) == NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH: + strResult += ' ' + 'NVIDIA Virtual Applications' + '\n' + strResult += ' ' + 'Licensed' + '\n' + else: + strResult += ' ' + gridLicensableFeatures.gridLicensableFeatures[i].productName + '\n' + strResult += ' ' + 'Unlicensed' + '\n' + else: + strResult += ' ' + gridLicensableFeatures.gridLicensableFeatures[i].productName + '\n' + strResult += ' ' + 'Licensed' + '\n' + strResult += ' \n' + except NVMLError as err: + gridLicensableFeatures = handleError(err) + + strResult += ' \n' + + except NVMLError as err: + strResult += 'example.py: ' + err.__str__() + '\n' + + nvmlShutdown() + + return strResult + +# If this is not exectued when module is imported +if __name__ == "__main__": + print(deviceQuery()) + diff --git a/parrot/lib/python3.10/site-packages/ffmpy.py b/parrot/lib/python3.10/site-packages/ffmpy.py new file mode 100644 index 0000000000000000000000000000000000000000..dd4b3a808bb3caee071c033f213dd399472c01af --- /dev/null +++ b/parrot/lib/python3.10/site-packages/ffmpy.py @@ -0,0 +1,217 @@ +import errno +import itertools +import shlex +import subprocess +from typing import IO, Any, List, Mapping, Optional, Sequence, Tuple, Union + + +class FFmpeg(object): + """Wrapper for various `FFmpeg `_ related applications (ffmpeg, + ffprobe). + """ + + def __init__( + self, + executable: str = "ffmpeg", + global_options: Optional[Union[Sequence[str], str]] = None, + inputs: Optional[Mapping[str, Optional[Union[Sequence[str], str]]]] = None, + outputs: Optional[Mapping[str, Optional[Union[Sequence[str], str]]]] = None, + ) -> None: + """Initialize FFmpeg command line wrapper. + + Compiles FFmpeg command line from passed arguments (executable path, options, inputs and + outputs). ``inputs`` and ``outputs`` are dictionares containing inputs/outputs as keys and + their respective options as values. One dictionary value (set of options) must be either a + single space separated string, or a list or strings without spaces (i.e. each part of the + option is a separate item of the list, the result of calling ``split()`` on the options + string). If the value is a list, it cannot be mixed, i.e. cannot contain items with spaces. + An exception are complex FFmpeg command lines that contain quotes: the quoted part must be + one string, even if it contains spaces (see *Examples* for more info). + For more info about FFmpeg command line format see `here + `_. + + :param str executable: path to ffmpeg executable; by default the ``ffmpeg`` command will be + searched for in the ``PATH``, but can be overridden with an absolute path to ``ffmpeg`` + executable + :param iterable global_options: global options passed to ``ffmpeg`` executable (e.g. + ``-y``, ``-v`` etc.); can be specified either as a list/tuple/set of strings, or one + space-separated string; by default no global options are passed + :param dict inputs: a dictionary specifying one or more input arguments as keys with their + corresponding options (either as a list of strings or a single space separated string) as + values + :param dict outputs: a dictionary specifying one or more output arguments as keys with their + corresponding options (either as a list of strings or a single space separated string) as + values + """ + self.executable = executable + self._cmd = [executable] + self._cmd += _normalize_options(global_options, split_mixed=True) + + if inputs is not None: + self._cmd += _merge_args_opts(inputs, add_minus_i_option=True) + + if outputs is not None: + self._cmd += _merge_args_opts(outputs) + + self.cmd = subprocess.list2cmdline(self._cmd) + self.process: Optional[subprocess.Popen] = None + + def __repr__(self) -> str: + return "<{0!r} {1!r}>".format(self.__class__.__name__, self.cmd) + + def run( + self, + input_data: Optional[bytes] = None, + stdout: Optional[Union[IO, int]] = None, + stderr: Optional[Union[IO, int]] = None, + env: Optional[Mapping[str, str]] = None, + **kwargs: Any + ) -> Tuple[Optional[bytes], Optional[bytes]]: + """Execute FFmpeg command line. + + ``input_data`` can contain input for FFmpeg in case ``pipe`` protocol is used for input. + ``stdout`` and ``stderr`` specify where to redirect the ``stdout`` and ``stderr`` of the + process. By default no redirection is done, which means all output goes to running shell + (this mode should normally only be used for debugging purposes). If FFmpeg ``pipe`` protocol + is used for output, ``stdout`` must be redirected to a pipe by passing `subprocess.PIPE` as + ``stdout`` argument. You can pass custom environment to ffmpeg process with ``env``. + + Returns a 2-tuple containing ``stdout`` and ``stderr`` of the process. If there was no + redirection or if the output was redirected to e.g. `os.devnull`, the value returned will + be a tuple of two `None` values, otherwise it will contain the actual ``stdout`` and + ``stderr`` data returned by ffmpeg process. + + More info about ``pipe`` protocol `here `_. + + :param str input_data: input data for FFmpeg to deal with (audio, video etc.) as bytes (e.g. + the result of reading a file in binary mode) + :param stdout: redirect FFmpeg ``stdout`` there (default is `None` which means no + redirection) + :param stderr: redirect FFmpeg ``stderr`` there (default is `None` which means no + redirection) + :param env: custom environment for ffmpeg process + :param kwargs: any other keyword arguments to be forwarded to `subprocess.Popen + `_ + :return: a 2-tuple containing ``stdout`` and ``stderr`` of the process + :rtype: tuple + :raise: `FFRuntimeError` in case FFmpeg command exits with a non-zero code; + `FFExecutableNotFoundError` in case the executable path passed was not valid + """ + try: + self.process = subprocess.Popen( + self._cmd, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, env=env, **kwargs + ) + except OSError as e: + if e.errno == errno.ENOENT: + raise FFExecutableNotFoundError("Executable '{0}' not found".format(self.executable)) + else: + raise + + o_stdout, o_stderr = self.process.communicate(input=input_data) + if self.process.returncode != 0: + raise FFRuntimeError(self.cmd, self.process.returncode, o_stdout, o_stderr) + + return o_stdout, o_stderr + + +class FFprobe(FFmpeg): + """Wrapper for `ffprobe `_.""" + + def __init__( + self, + executable: str = "ffprobe", + global_options: Optional[Union[Sequence[str], str]] = None, + inputs: Optional[Mapping[str, Optional[Union[Sequence[str], str]]]] = None, + ) -> None: + """Create an instance of FFprobe. + + Compiles FFprobe command line from passed arguments (executable path, options, inputs). + FFprobe executable by default is taken from ``PATH`` but can be overridden with an + absolute path. For more info about FFprobe command line format see + `here `_. + + :param str executable: absolute path to ffprobe executable + :param iterable global_options: global options passed to ffmpeg executable; can be specified + either as a list/tuple of strings or a space-separated string + :param dict inputs: a dictionary specifying one or more inputs as keys with their + corresponding options as values + """ + super(FFprobe, self).__init__( + executable=executable, global_options=global_options, inputs=inputs + ) + + +class FFExecutableNotFoundError(Exception): + """Raise when FFmpeg/FFprobe executable was not found.""" + + +class FFRuntimeError(Exception): + """Raise when FFmpeg/FFprobe command line execution returns a non-zero exit code. + + The resulting exception object will contain the attributes relates to command line execution: + ``cmd``, ``exit_code``, ``stdout``, ``stderr``. + """ + + def __init__(self, cmd: str, exit_code: int, stdout: bytes, stderr: bytes) -> None: + self.cmd = cmd + self.exit_code = exit_code + self.stdout = stdout + self.stderr = stderr + + message = "`{0}` exited with status {1}\n\nSTDOUT:\n{2}\n\nSTDERR:\n{3}".format( + self.cmd, exit_code, (stdout or b"").decode(), (stderr or b"").decode() + ) + + super(FFRuntimeError, self).__init__(message) + + +def _merge_args_opts( + args_opts_dict: Mapping[str, Optional[Union[Sequence[str], str]]], + add_minus_i_option: bool = False, +) -> List[str]: + """Merge options with their corresponding arguments. + + Iterates over the dictionary holding arguments (keys) and options (values). Merges each + options string with its corresponding argument. + + :param dict args_opts_dict: a dictionary of arguments and options + :param dict kwargs: *input_option* - if specified prepends ``-i`` to input argument + :return: merged list of strings with arguments and their corresponding options + :rtype: list + """ + merged: List[str] = [] + + for arg, opt in args_opts_dict.items(): + merged += _normalize_options(opt) + + if not arg: + continue + + if add_minus_i_option: + merged.append("-i") + + merged.append(arg) + + return merged + + +def _normalize_options( + options: Optional[Union[Sequence[str], str]], split_mixed: bool = False +) -> List[str]: + """Normalize options string or list of strings. + + Splits `options` into a list of strings. If `split_mixed` is `True`, splits (flattens) mixed + options (i.e. list of strings with spaces) into separate items. + + :param options: options string or list of strings + :param bool split_mixed: whether to split mixed options into separate items + """ + if options is None: + return [] + elif isinstance(options, str): + return shlex.split(options) + else: + if split_mixed: + return list(itertools.chain(*[shlex.split(o) for o in options])) + else: + return list(options) diff --git a/parrot/lib/python3.10/site-packages/hjson/__pycache__/ordered_dict.cpython-310.pyc b/parrot/lib/python3.10/site-packages/hjson/__pycache__/ordered_dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18fda64e537461533ef3c8bc8e0a5157492cd5fa Binary files /dev/null and b/parrot/lib/python3.10/site-packages/hjson/__pycache__/ordered_dict.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/hjson/encoder.py b/parrot/lib/python3.10/site-packages/hjson/encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..52b77adc770e9eb3958a9dd25ad898cbb394ac28 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/encoder.py @@ -0,0 +1,521 @@ +"""Implementation of JSONEncoder +""" +from __future__ import absolute_import +import re +from operator import itemgetter +from decimal import Decimal +from .compat import u, unichr, binary_type, string_types, integer_types, PY3 +from .decoder import PosInf + +#ESCAPE = re.compile(ur'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]') +# This is required because u() will mangle the string and ur'' isn't valid +# python3 syntax +ESCAPE = re.compile(u'[\\x00-\\x1f\\\\"\\b\\f\\n\\r\\t\u2028\u2029]') +ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') +HAS_UTF8 = re.compile(r'[\x80-\xff]') +ESCAPE_DCT = { + '\\': '\\\\', + '"': '\\"', + '\b': '\\b', + '\f': '\\f', + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', +} +for i in range(0x20): + #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) + ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) +for i in [0x2028, 0x2029]: + ESCAPE_DCT.setdefault(unichr(i), '\\u%04x' % (i,)) + +FLOAT_REPR = repr + +def encode_basestring(s, _PY3=PY3, _q=u('"')): + """Return a JSON representation of a Python string + + """ + if _PY3: + if isinstance(s, binary_type): + s = s.decode('utf-8') + else: + if isinstance(s, str) and HAS_UTF8.search(s) is not None: + s = s.decode('utf-8') + def replace(match): + return ESCAPE_DCT[match.group(0)] + return _q + ESCAPE.sub(replace, s) + _q + + +def py_encode_basestring_ascii(s, _PY3=PY3): + """Return an ASCII-only JSON representation of a Python string + + """ + if _PY3: + if isinstance(s, binary_type): + s = s.decode('utf-8') + else: + if isinstance(s, str) and HAS_UTF8.search(s) is not None: + s = s.decode('utf-8') + def replace(match): + s = match.group(0) + try: + return ESCAPE_DCT[s] + except KeyError: + n = ord(s) + if n < 0x10000: + #return '\\u{0:04x}'.format(n) + return '\\u%04x' % (n,) + else: + # surrogate pair + n -= 0x10000 + s1 = 0xd800 | ((n >> 10) & 0x3ff) + s2 = 0xdc00 | (n & 0x3ff) + #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) + return '\\u%04x\\u%04x' % (s1, s2) + return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' + + +encode_basestring_ascii = ( + py_encode_basestring_ascii) + +class JSONEncoder(object): + """Extensible JSON encoder for Python data structures. + + Supports the following objects and types by default: + + +-------------------+---------------+ + | Python | JSON | + +===================+===============+ + | dict, namedtuple | object | + +-------------------+---------------+ + | list, tuple | array | + +-------------------+---------------+ + | str, unicode | string | + +-------------------+---------------+ + | int, long, float | number | + +-------------------+---------------+ + | True | true | + +-------------------+---------------+ + | False | false | + +-------------------+---------------+ + | None | null | + +-------------------+---------------+ + + To extend this to recognize other objects, subclass and implement a + ``.default()`` method with another method that returns a serializable + object for ``o`` if possible, otherwise it should call the superclass + implementation (to raise ``TypeError``). + + """ + item_separator = ', ' + key_separator = ': ' + + def __init__(self, skipkeys=False, ensure_ascii=True, + check_circular=True, sort_keys=False, + indent=None, separators=None, encoding='utf-8', default=None, + use_decimal=True, namedtuple_as_object=True, + tuple_as_array=True, bigint_as_string=False, + item_sort_key=None, for_json=False, + int_as_string_bitcount=None): + """Constructor for JSONEncoder, with sensible defaults. + + If skipkeys is false, then it is a TypeError to attempt + encoding of keys that are not str, int, long, float or None. If + skipkeys is True, such items are simply skipped. + + If ensure_ascii is true, the output is guaranteed to be str + objects with all incoming unicode characters escaped. If + ensure_ascii is false, the output will be unicode object. + + If check_circular is true, then lists, dicts, and custom encoded + objects will be checked for circular references during encoding to + prevent an infinite recursion (which would cause an OverflowError). + Otherwise, no such check takes place. + + If sort_keys is true, then the output of dictionaries will be + sorted by key; this is useful for regression tests to ensure + that JSON serializations can be compared on a day-to-day basis. + + If indent is a string, then JSON array elements and object members + will be pretty-printed with a newline followed by that string repeated + for each level of nesting. ``None`` (the default) selects the most compact + representation without any newlines. For backwards compatibility with + versions of hjson earlier than 2.1.0, an integer is also accepted + and is converted to a string with that many spaces. + + If specified, separators should be an (item_separator, key_separator) + tuple. The default is (', ', ': ') if *indent* is ``None`` and + (',', ': ') otherwise. To get the most compact JSON representation, + you should specify (',', ':') to eliminate whitespace. + + If specified, default is a function that gets called for objects + that can't otherwise be serialized. It should return a JSON encodable + version of the object or raise a ``TypeError``. + + If encoding is not None, then all input strings will be + transformed into unicode using that encoding prior to JSON-encoding. + The default is UTF-8. + + If use_decimal is true (not the default), ``decimal.Decimal`` will + be supported directly by the encoder. For the inverse, decode JSON + with ``parse_float=decimal.Decimal``. + + If namedtuple_as_object is true (the default), objects with + ``_asdict()`` methods will be encoded as JSON objects. + + If tuple_as_array is true (the default), tuple (and subclasses) will + be encoded as JSON arrays. + + If bigint_as_string is true (not the default), ints 2**53 and higher + or lower than -2**53 will be encoded as strings. This is to avoid the + rounding that happens in Javascript otherwise. + + If int_as_string_bitcount is a positive number (n), then int of size + greater than or equal to 2**n or lower than or equal to -2**n will be + encoded as strings. + + If specified, item_sort_key is a callable used to sort the items in + each dictionary. This is useful if you want to sort items other than + in alphabetical order by key. + + If for_json is true (not the default), objects with a ``for_json()`` + method will use the return value of that method for encoding as JSON + instead of the object. + + """ + + self.skipkeys = skipkeys + self.ensure_ascii = ensure_ascii + self.check_circular = check_circular + self.sort_keys = sort_keys + self.use_decimal = use_decimal + self.namedtuple_as_object = namedtuple_as_object + self.tuple_as_array = tuple_as_array + self.bigint_as_string = bigint_as_string + self.item_sort_key = item_sort_key + self.for_json = for_json + self.int_as_string_bitcount = int_as_string_bitcount + if indent is not None and not isinstance(indent, string_types): + indent = indent * ' ' + self.indent = indent + if separators is not None: + self.item_separator, self.key_separator = separators + elif indent is not None: + self.item_separator = ',' + if default is not None: + self.default = default + self.encoding = encoding + + def default(self, o): + """Implement this method in a subclass such that it returns + a serializable object for ``o``, or calls the base implementation + (to raise a ``TypeError``). + + For example, to support arbitrary iterators, you could + implement default like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + return JSONEncoder.default(self, o) + + """ + raise TypeError(repr(o) + " is not JSON serializable") + + def encode(self, o): + """Return a JSON string representation of a Python data structure. + + >>> from hjson import JSONEncoder + >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) + '{"foo": ["bar", "baz"]}' + + """ + # This is for extremely simple cases and benchmarks. + if isinstance(o, binary_type): + _encoding = self.encoding + if (_encoding is not None and not (_encoding == 'utf-8')): + o = o.decode(_encoding) + if isinstance(o, string_types): + if self.ensure_ascii: + return encode_basestring_ascii(o) + else: + return encode_basestring(o) + # This doesn't pass the iterator directly to ''.join() because the + # exceptions aren't as detailed. The list call should be roughly + # equivalent to the PySequence_Fast that ''.join() would do. + chunks = self.iterencode(o, _one_shot=True) + if not isinstance(chunks, (list, tuple)): + chunks = list(chunks) + if self.ensure_ascii: + return ''.join(chunks) + else: + return u''.join(chunks) + + def iterencode(self, o, _one_shot=False): + """Encode the given object and yield each string + representation as available. + + For example:: + + for chunk in JSONEncoder().iterencode(bigobject): + mysocket.write(chunk) + + """ + if self.check_circular: + markers = {} + else: + markers = None + if self.ensure_ascii: + _encoder = encode_basestring_ascii + else: + _encoder = encode_basestring + if self.encoding != 'utf-8': + def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): + if isinstance(o, binary_type): + o = o.decode(_encoding) + return _orig_encoder(o) + + def floatstr(o, _repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf): + # Check for specials. Note that this type of test is processor + # and/or platform-specific, so do tests which don't depend on + # the internals. + + if o != o: + text = 'null' + elif o == _inf: + text = 'null' + elif o == _neginf: + text = 'null' + else: + return _repr(o) + + return text + + key_memo = {} + int_as_string_bitcount = ( + 53 if self.bigint_as_string else self.int_as_string_bitcount) + _iterencode = _make_iterencode( + markers, self.default, _encoder, self.indent, floatstr, + self.key_separator, self.item_separator, self.sort_keys, + self.skipkeys, _one_shot, self.use_decimal, + self.namedtuple_as_object, self.tuple_as_array, + int_as_string_bitcount, + self.item_sort_key, self.encoding, self.for_json, + Decimal=Decimal) + try: + return _iterencode(o, 0) + finally: + key_memo.clear() + + +def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, + _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, + _use_decimal, _namedtuple_as_object, _tuple_as_array, + _int_as_string_bitcount, _item_sort_key, + _encoding,_for_json, + ## HACK: hand-optimized bytecode; turn globals into locals + _PY3=PY3, + ValueError=ValueError, + string_types=string_types, + Decimal=Decimal, + dict=dict, + float=float, + id=id, + integer_types=integer_types, + isinstance=isinstance, + list=list, + str=str, + tuple=tuple, + ): + if _item_sort_key and not callable(_item_sort_key): + raise TypeError("item_sort_key must be None or callable") + elif _sort_keys and not _item_sort_key: + _item_sort_key = itemgetter(0) + + if (_int_as_string_bitcount is not None and + (_int_as_string_bitcount <= 0 or + not isinstance(_int_as_string_bitcount, integer_types))): + raise TypeError("int_as_string_bitcount must be a positive integer") + + def _encode_int(value): + skip_quoting = ( + _int_as_string_bitcount is None + or + _int_as_string_bitcount < 1 + ) + if ( + skip_quoting or + (-1 << _int_as_string_bitcount) + < value < + (1 << _int_as_string_bitcount) + ): + return str(value) + return '"' + str(value) + '"' + + def _iterencode_list(lst, _current_indent_level): + if not lst: + yield '[]' + return + if markers is not None: + markerid = id(lst) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = lst + buf = '[' + if _indent is not None: + _current_indent_level += 1 + newline_indent = '\n' + (_indent * _current_indent_level) + separator = _item_separator + newline_indent + buf += newline_indent + else: + newline_indent = None + separator = _item_separator + first = True + for value in lst: + if first: + first = False + else: + buf = separator + yield buf + + for chunk in _iterencode(value, _current_indent_level): + yield chunk + + if newline_indent is not None: + _current_indent_level -= 1 + yield '\n' + (_indent * _current_indent_level) + yield ']' + if markers is not None: + del markers[markerid] + + def _stringify_key(key): + if isinstance(key, string_types): # pragma: no cover + pass + elif isinstance(key, binary_type): + key = key.decode(_encoding) + elif isinstance(key, float): + key = _floatstr(key) + elif key is True: + key = 'true' + elif key is False: + key = 'false' + elif key is None: + key = 'null' + elif isinstance(key, integer_types): + key = str(key) + elif _use_decimal and isinstance(key, Decimal): + key = str(key) + elif _skipkeys: + key = None + else: + raise TypeError("key " + repr(key) + " is not a string") + return key + + def _iterencode_dict(dct, _current_indent_level): + if not dct: + yield '{}' + return + if markers is not None: + markerid = id(dct) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = dct + yield '{' + if _indent is not None: + _current_indent_level += 1 + newline_indent = '\n' + (_indent * _current_indent_level) + item_separator = _item_separator + newline_indent + yield newline_indent + else: + newline_indent = None + item_separator = _item_separator + first = True + if _PY3: + iteritems = dct.items() + else: + iteritems = dct.iteritems() + if _item_sort_key: + items = [] + for k, v in dct.items(): + if not isinstance(k, string_types): + k = _stringify_key(k) + if k is None: + continue + items.append((k, v)) + items.sort(key=_item_sort_key) + else: + items = iteritems + for key, value in items: + if not (_item_sort_key or isinstance(key, string_types)): + key = _stringify_key(key) + if key is None: + # _skipkeys must be True + continue + if first: + first = False + else: + yield item_separator + yield _encoder(key) + yield _key_separator + + for chunk in _iterencode(value, _current_indent_level): + yield chunk + + if newline_indent is not None: + _current_indent_level -= 1 + yield '\n' + (_indent * _current_indent_level) + yield '}' + if markers is not None: + del markers[markerid] + + def _iterencode(o, _current_indent_level): + if (isinstance(o, string_types) or + (_PY3 and isinstance(o, binary_type))): + yield _encoder(o) + elif o is None: + yield 'null' + elif o is True: + yield 'true' + elif o is False: + yield 'false' + elif isinstance(o, integer_types): + yield _encode_int(o) + elif isinstance(o, float): + yield _floatstr(o) + else: + for_json = _for_json and getattr(o, 'for_json', None) + if for_json and callable(for_json): + for chunk in _iterencode(for_json(), _current_indent_level): + yield chunk + elif isinstance(o, list): + for chunk in _iterencode_list(o, _current_indent_level): + yield chunk + else: + _asdict = _namedtuple_as_object and getattr(o, '_asdict', None) + if _asdict and callable(_asdict): + for chunk in _iterencode_dict(_asdict(), _current_indent_level): + yield chunk + elif (_tuple_as_array and isinstance(o, tuple)): + for chunk in _iterencode_list(o, _current_indent_level): + yield chunk + elif isinstance(o, dict): + for chunk in _iterencode_dict(o, _current_indent_level): + yield chunk + elif _use_decimal and isinstance(o, Decimal): + yield str(o) + else: + if markers is not None: + markerid = id(o) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = o + o = _default(o) + for chunk in _iterencode(o, _current_indent_level): + yield chunk + if markers is not None: + del markers[markerid] + + return _iterencode diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/__init__.py b/parrot/lib/python3.10/site-packages/hjson/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..15cbdc13cfdd4c9ddc939cc30a9c40a8a9af8d77 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/__init__.py @@ -0,0 +1,61 @@ +from __future__ import absolute_import +import unittest +import doctest +import sys + + +def additional_tests(suite=None): + import hjson + import hjson.encoder + import hjson.decoder + if suite is None: + suite = unittest.TestSuite() + for mod in (hjson, hjson.encoder, hjson.decoder): + suite.addTest(doctest.DocTestSuite(mod)) + return suite + + +def all_tests_suite(): + def get_suite(): + return additional_tests( + unittest.TestLoader().loadTestsFromNames([ + 'hjson.tests.test_hjson', + 'hjson.tests.test_bitsize_int_as_string', + 'hjson.tests.test_bigint_as_string', + 'hjson.tests.test_check_circular', + 'hjson.tests.test_decode', + 'hjson.tests.test_default', + 'hjson.tests.test_dump', + 'hjson.tests.test_encode_basestring_ascii', + 'hjson.tests.test_errors', + 'hjson.tests.test_fail', + 'hjson.tests.test_float', + 'hjson.tests.test_indent', + 'hjson.tests.test_pass1', + 'hjson.tests.test_pass2', + 'hjson.tests.test_pass3', + 'hjson.tests.test_recursion', + 'hjson.tests.test_scanstring', + 'hjson.tests.test_separators', + 'hjson.tests.test_unicode', + 'hjson.tests.test_decimal', + 'hjson.tests.test_tuple', + 'hjson.tests.test_namedtuple', + #'hjson.tests.test_tool', # fails on windows + 'hjson.tests.test_for_json', + ])) + suite = get_suite() + return suite + + +def main(): + runner = unittest.TextTestRunner(verbosity=1 + sys.argv.count('-v')) + suite = all_tests_suite() + raise SystemExit(not runner.run(suite).wasSuccessful()) + + +if __name__ == '__main__': + import os + import sys + sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) + main() diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/__pycache__/test_fail.cpython-310.pyc b/parrot/lib/python3.10/site-packages/hjson/tests/__pycache__/test_fail.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..109b4a573db708e0a31bf76e3b785599ea21159e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/hjson/tests/__pycache__/test_fail.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/__pycache__/test_float.cpython-310.pyc b/parrot/lib/python3.10/site-packages/hjson/tests/__pycache__/test_float.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3fad936be53d0f30cca039e1df478807f2d4e26 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/hjson/tests/__pycache__/test_float.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/__pycache__/test_item_sort_key.cpython-310.pyc b/parrot/lib/python3.10/site-packages/hjson/tests/__pycache__/test_item_sort_key.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cb2a72a4b17dfa394273288558f639f466f93ee Binary files /dev/null and b/parrot/lib/python3.10/site-packages/hjson/tests/__pycache__/test_item_sort_key.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/__pycache__/test_pass1.cpython-310.pyc b/parrot/lib/python3.10/site-packages/hjson/tests/__pycache__/test_pass1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c550d6560c7a9f78666fa71a5eee2f8252e75eb Binary files /dev/null and b/parrot/lib/python3.10/site-packages/hjson/tests/__pycache__/test_pass1.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/__pycache__/test_unicode.cpython-310.pyc b/parrot/lib/python3.10/site-packages/hjson/tests/__pycache__/test_unicode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1ea060ccfdd7045acf2894a7b729250d8eb66e9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/hjson/tests/__pycache__/test_unicode.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_bigint_as_string.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_bigint_as_string.py new file mode 100644 index 0000000000000000000000000000000000000000..3cdb04fe33dc6c057d4d2f44cc197b5ecddbb92d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_bigint_as_string.py @@ -0,0 +1,67 @@ +from unittest import TestCase + +import hjson as json + + +class TestBigintAsString(TestCase): + # Python 2.5, at least the one that ships on Mac OS X, calculates + # 2 ** 53 as 0! It manages to calculate 1 << 53 correctly. + values = [(200, 200), + ((1 << 53) - 1, 9007199254740991), + ((1 << 53), '9007199254740992'), + ((1 << 53) + 1, '9007199254740993'), + (-100, -100), + ((-1 << 53), '-9007199254740992'), + ((-1 << 53) - 1, '-9007199254740993'), + ((-1 << 53) + 1, -9007199254740991)] + + options = ( + {"bigint_as_string": True}, + {"int_as_string_bitcount": 53} + ) + + def test_ints(self): + for opts in self.options: + for val, expect in self.values: + self.assertEqual( + val, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, **opts))) + + def test_lists(self): + for opts in self.options: + for val, expect in self.values: + val = [val, val] + expect = [expect, expect] + self.assertEqual( + val, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, **opts))) + + def test_dicts(self): + for opts in self.options: + for val, expect in self.values: + val = {'k': val} + expect = {'k': expect} + self.assertEqual( + val, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, **opts))) + + def test_dict_keys(self): + for opts in self.options: + for val, _ in self.values: + expect = {str(val): 'value'} + val = {val: 'value'} + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, **opts))) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_bitsize_int_as_string.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_bitsize_int_as_string.py new file mode 100644 index 0000000000000000000000000000000000000000..b8955b9c56b6ce5430aa60c31da74ec7d5c65aa7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_bitsize_int_as_string.py @@ -0,0 +1,73 @@ +from unittest import TestCase + +import hjson as json + + +class TestBitSizeIntAsString(TestCase): + # Python 2.5, at least the one that ships on Mac OS X, calculates + # 2 ** 31 as 0! It manages to calculate 1 << 31 correctly. + values = [ + (200, 200), + ((1 << 31) - 1, (1 << 31) - 1), + ((1 << 31), str(1 << 31)), + ((1 << 31) + 1, str((1 << 31) + 1)), + (-100, -100), + ((-1 << 31), str(-1 << 31)), + ((-1 << 31) - 1, str((-1 << 31) - 1)), + ((-1 << 31) + 1, (-1 << 31) + 1), + ] + + def test_invalid_counts(self): + for n in ['foo', -1, 0, 1.0]: + self.assertRaises( + TypeError, + json.dumpsJSON, 0, int_as_string_bitcount=n) + + def test_ints_outside_range_fails(self): + self.assertNotEqual( + str(1 << 15), + json.loads(json.dumpsJSON(1 << 15, int_as_string_bitcount=16)), + ) + + def test_ints(self): + for val, expect in self.values: + self.assertEqual( + val, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, int_as_string_bitcount=31)), + ) + + def test_lists(self): + for val, expect in self.values: + val = [val, val] + expect = [expect, expect] + self.assertEqual( + val, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, int_as_string_bitcount=31))) + + def test_dicts(self): + for val, expect in self.values: + val = {'k': val} + expect = {'k': expect} + self.assertEqual( + val, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, int_as_string_bitcount=31))) + + def test_dict_keys(self): + for val, _ in self.values: + expect = {str(val): 'value'} + val = {val: 'value'} + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val))) + self.assertEqual( + expect, + json.loads(json.dumpsJSON(val, int_as_string_bitcount=31))) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_check_circular.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_check_circular.py new file mode 100644 index 0000000000000000000000000000000000000000..4b1046549427651db8a80cf60617cc0e2b04bf33 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_check_circular.py @@ -0,0 +1,30 @@ +from unittest import TestCase +import hjson as json + +def default_iterable(obj): + return list(obj) + +class TestCheckCircular(TestCase): + def test_circular_dict(self): + dct = {} + dct['a'] = dct + self.assertRaises(ValueError, json.dumpsJSON, dct) + + def test_circular_list(self): + lst = [] + lst.append(lst) + self.assertRaises(ValueError, json.dumpsJSON, lst) + + def test_circular_composite(self): + dct2 = {} + dct2['a'] = [] + dct2['a'].append(dct2) + self.assertRaises(ValueError, json.dumpsJSON, dct2) + + def test_circular_default(self): + json.dumpsJSON([set()], default=default_iterable) + self.assertRaises(TypeError, json.dumpsJSON, [set()]) + + def test_circular_off_default(self): + json.dumpsJSON([set()], default=default_iterable, check_circular=False) + self.assertRaises(TypeError, json.dumpsJSON, [set()], check_circular=False) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_decimal.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_decimal.py new file mode 100644 index 0000000000000000000000000000000000000000..6fb3910610b9d028fffca530086519d492567afc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_decimal.py @@ -0,0 +1,71 @@ +import decimal +from decimal import Decimal +from unittest import TestCase +from hjson.compat import StringIO, reload_module + +import hjson as json + +class TestDecimal(TestCase): + NUMS = "1.0", "10.00", "1.1", "1234567890.1234567890", "500" + def dumps(self, obj, **kw): + sio = StringIO() + json.dumpJSON(obj, sio, **kw) + res = json.dumpsJSON(obj, **kw) + self.assertEqual(res, sio.getvalue()) + return res + + def loads(self, s, **kw): + sio = StringIO(s) + res = json.loads(s, **kw) + self.assertEqual(res, json.load(sio, **kw)) + return res + + def test_decimal_encode(self): + for d in map(Decimal, self.NUMS): + self.assertEqual(self.dumps(d, use_decimal=True), str(d)) + + def test_decimal_decode(self): + for s in self.NUMS: + self.assertEqual(self.loads(s, parse_float=Decimal), Decimal(s)) + + def test_stringify_key(self): + for d in map(Decimal, self.NUMS): + v = {d: d} + self.assertEqual( + self.loads( + self.dumps(v, use_decimal=True), parse_float=Decimal), + {str(d): d}) + + def test_decimal_roundtrip(self): + for d in map(Decimal, self.NUMS): + # The type might not be the same (int and Decimal) but they + # should still compare equal. + for v in [d, [d], {'': d}]: + self.assertEqual( + self.loads( + self.dumps(v, use_decimal=True), parse_float=Decimal), + v) + + def test_decimal_defaults(self): + d = Decimal('1.1') + # use_decimal=True is the default + self.assertRaises(TypeError, json.dumpsJSON, d, use_decimal=False) + self.assertEqual('1.1', json.dumpsJSON(d)) + self.assertEqual('1.1', json.dumpsJSON(d, use_decimal=True)) + self.assertRaises(TypeError, json.dumpJSON, d, StringIO(), + use_decimal=False) + sio = StringIO() + json.dumpJSON(d, sio) + self.assertEqual('1.1', sio.getvalue()) + sio = StringIO() + json.dumpJSON(d, sio, use_decimal=True) + self.assertEqual('1.1', sio.getvalue()) + + def test_decimal_reload(self): + # Simulate a subinterpreter that reloads the Python modules but not + # the C code https://github.com/simplejson/simplejson/issues/34 + global Decimal + Decimal = reload_module(decimal).Decimal + import hjson.encoder + hjson.encoder.Decimal = Decimal + self.test_decimal_roundtrip() diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_default.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_default.py new file mode 100644 index 0000000000000000000000000000000000000000..cfed1155e9eb05e5437e1f475160f01a48f602b8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_default.py @@ -0,0 +1,9 @@ +from unittest import TestCase + +import hjson as json + +class TestDefault(TestCase): + def test_default(self): + self.assertEqual( + json.dumpsJSON(type, default=repr), + json.dumpsJSON(repr(type))) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_dump.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_dump.py new file mode 100644 index 0000000000000000000000000000000000000000..2c711b04d110d8dcf7b1be74da4f77a1e4167267 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_dump.py @@ -0,0 +1,130 @@ +from unittest import TestCase +from hjson.compat import StringIO, long_type, b, binary_type, PY3 +import hjson as json + +def as_text_type(s): + if PY3 and isinstance(s, binary_type): + return s.decode('ascii') + return s + +class TestDump(TestCase): + def test_dump(self): + sio = StringIO() + json.dumpJSON({}, sio) + self.assertEqual(sio.getvalue(), '{}') + + def test_constants(self): + for c in [None, True, False]: + self.assertTrue(json.loads(json.dumpsJSON(c)) is c) + self.assertTrue(json.loads(json.dumpsJSON([c]))[0] is c) + self.assertTrue(json.loads(json.dumpsJSON({'a': c}))['a'] is c) + + def test_stringify_key(self): + items = [(b('bytes'), 'bytes'), + (1.0, '1.0'), + (10, '10'), + (True, 'true'), + (False, 'false'), + (None, 'null'), + (long_type(100), '100')] + for k, expect in items: + self.assertEqual( + json.loads(json.dumpsJSON({k: expect})), + {expect: expect}) + self.assertEqual( + json.loads(json.dumpsJSON({k: expect}, sort_keys=True)), + {expect: expect}) + self.assertRaises(TypeError, json.dumpsJSON, {json: 1}) + for v in [{}, {'other': 1}, {b('derp'): 1, 'herp': 2}]: + for sort_keys in [False, True]: + v0 = dict(v) + v0[json] = 1 + v1 = dict((as_text_type(key), val) for (key, val) in v.items()) + self.assertEqual( + json.loads(json.dumpsJSON(v0, skipkeys=True, sort_keys=sort_keys)), + v1) + self.assertEqual( + json.loads(json.dumpsJSON({'': v0}, skipkeys=True, sort_keys=sort_keys)), + {'': v1}) + self.assertEqual( + json.loads(json.dumpsJSON([v0], skipkeys=True, sort_keys=sort_keys)), + [v1]) + + def test_dumps(self): + self.assertEqual(json.dumpsJSON({}), '{}') + + def test_encode_truefalse(self): + self.assertEqual(json.dumpsJSON( + {True: False, False: True}, sort_keys=True), + '{"false": true, "true": false}') + self.assertEqual( + json.dumpsJSON( + {2: 3.0, + 4.0: long_type(5), + False: 1, + long_type(6): True, + "7": 0}, + sort_keys=True), + '{"2": 3.0, "4.0": 5, "6": true, "7": 0, "false": 1}') + + def test_ordered_dict(self): + # http://bugs.python.org/issue6105 + items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)] + s = json.dumpsJSON(json.OrderedDict(items)) + self.assertEqual( + s, + '{"one": 1, "two": 2, "three": 3, "four": 4, "five": 5}') + + def test_indent_unknown_type_acceptance(self): + """ + A test against the regression mentioned at `github issue 29`_. + + The indent parameter should accept any type which pretends to be + an instance of int or long when it comes to being multiplied by + strings, even if it is not actually an int or long, for + backwards compatibility. + + .. _github issue 29: + http://github.com/simplejson/simplejson/issue/29 + """ + + class AwesomeInt(object): + """An awesome reimplementation of integers""" + + def __init__(self, *args, **kwargs): + if len(args) > 0: + # [construct from literals, objects, etc.] + # ... + + # Finally, if args[0] is an integer, store it + if isinstance(args[0], int): + self._int = args[0] + + # [various methods] + + def __mul__(self, other): + # [various ways to multiply AwesomeInt objects] + # ... finally, if the right-hand operand is not awesome enough, + # try to do a normal integer multiplication + if hasattr(self, '_int'): + return self._int * other + else: + raise NotImplementedError("To do non-awesome things with" + " this object, please construct it from an integer!") + + s = json.dumpsJSON([0, 1, 2], indent=AwesomeInt(3)) + self.assertEqual(s, '[\n 0,\n 1,\n 2\n]') + + def test_accumulator(self): + # the C API uses an accumulator that collects after 100,000 appends + lst = [0] * 100000 + self.assertEqual(json.loads(json.dumpsJSON(lst)), lst) + + def test_sort_keys(self): + # https://github.com/simplejson/simplejson/issues/106 + for num_keys in range(2, 32): + p = dict((str(x), x) for x in range(num_keys)) + sio = StringIO() + json.dumpJSON(p, sio, sort_keys=True) + self.assertEqual(sio.getvalue(), json.dumpsJSON(p, sort_keys=True)) + self.assertEqual(json.loads(sio.getvalue()), p) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_encode_basestring_ascii.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_encode_basestring_ascii.py new file mode 100644 index 0000000000000000000000000000000000000000..6783eff681a9f2638d57706bf01987c58280c04f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_encode_basestring_ascii.py @@ -0,0 +1,42 @@ +from unittest import TestCase + +import hjson.encoder +from hjson.compat import b + +CASES = [ + (u'/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', '"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'), + (u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'), + (u'controls', '"controls"'), + (u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'), + (u'{"object with 1 member":["array with 1 element"]}', '"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'), + (u' s p a c e d ', '" s p a c e d "'), + (u'\U0001d120', '"\\ud834\\udd20"'), + (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'), + (b('\xce\xb1\xce\xa9'), '"\\u03b1\\u03a9"'), + (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'), + (b('\xce\xb1\xce\xa9'), '"\\u03b1\\u03a9"'), + (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'), + (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'), + (u"`1~!@#$%^&*()_+-={':[,]}|;.?", '"`1~!@#$%^&*()_+-={\':[,]}|;.?"'), + (u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'), + (u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'), +] + +class TestEncodeBaseStringAscii(TestCase): + def test_py_encode_basestring_ascii(self): + self._test_encode_basestring_ascii(hjson.encoder.encode_basestring_ascii) + + def _test_encode_basestring_ascii(self, encode_basestring_ascii): + fname = encode_basestring_ascii.__name__ + for input_string, expect in CASES: + result = encode_basestring_ascii(input_string) + #self.assertEqual(result, expect, + # '{0!r} != {1!r} for {2}({3!r})'.format( + # result, expect, fname, input_string)) + self.assertEqual(result, expect, + '%r != %r for %s(%r)' % (result, expect, fname, input_string)) + + def test_sorted_dict(self): + items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)] + s = hjson.dumpsJSON(dict(items), sort_keys=True) + self.assertEqual(s, '{"five": 5, "four": 4, "one": 1, "three": 3, "two": 2}') diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_errors.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..fd256043be9c4a0e02575a0c77a0e19db5e69a0c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_errors.py @@ -0,0 +1,51 @@ +import sys, pickle +from unittest import TestCase + +import hjson as json +from hjson.compat import u, b + +class TestErrors(TestCase): + def test_string_keys_error(self): + data = [{'a': 'A', 'b': (2, 4), 'c': 3.0, ('d',): 'D tuple'}] + self.assertRaises(TypeError, json.dumpsJSON, data) + + def test_decode_error(self): + err = None + try: + json.loads('{}\na\nb') + except json.HjsonDecodeError: + err = sys.exc_info()[1] + else: + self.fail('Expected HjsonDecodeError') + self.assertEqual(err.lineno, 2) + self.assertEqual(err.colno, 1) + self.assertEqual(err.endlineno, 3) + self.assertEqual(err.endcolno, 2) + + def test_scan_error(self): + err = None + for t in (u, b): + try: + json.loads(t('{"asdf": "')) + except json.HjsonDecodeError: + err = sys.exc_info()[1] + else: + self.fail('Expected HjsonDecodeError') + self.assertEqual(err.lineno, 1) + self.assertEqual(err.colno, 10) + + def test_error_is_pickable(self): + err = None + try: + json.loads('{}\na\nb') + except json.HjsonDecodeError: + err = sys.exc_info()[1] + else: + self.fail('Expected HjsonDecodeError') + s = pickle.dumps(err) + e = pickle.loads(s) + + self.assertEqual(err.msg, e.msg) + self.assertEqual(err.doc, e.doc) + self.assertEqual(err.pos, e.pos) + self.assertEqual(err.end, e.end) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_fail.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_fail.py new file mode 100644 index 0000000000000000000000000000000000000000..ca591e784aae3e4884c0768cba07d8a26508467e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_fail.py @@ -0,0 +1,143 @@ +import sys +from unittest import TestCase + +import hjson as json + +# 2007-10-05 +JSONDOCS = [ + # http://json.org/JSON_checker/test/fail1.json + # '"A JSON payload should be an object or array, not a string."', + # http://json.org/JSON_checker/test/fail2.json + '["Unclosed array"', + # http://json.org/JSON_checker/test/fail3.json + #'{unquoted_key: "keys must be quoted"}', + # http://json.org/JSON_checker/test/fail4.json + # '["extra comma",]', + # http://json.org/JSON_checker/test/fail5.json + '["double extra comma",,]', + # http://json.org/JSON_checker/test/fail6.json + '[ , "<-- missing value"]', + # http://json.org/JSON_checker/test/fail7.json + '["Comma after the close"],', + # http://json.org/JSON_checker/test/fail8.json + '["Extra close"]]', + # http://json.org/JSON_checker/test/fail9.json + # '{"Extra comma": true,}', + # http://json.org/JSON_checker/test/fail10.json + '{"Extra value after close": true} "misplaced quoted value"', + # http://json.org/JSON_checker/test/fail11.json + '{"Illegal expression": 1 + 2}', + # http://json.org/JSON_checker/test/fail12.json + '{"Illegal invocation": alert()}', + # http://json.org/JSON_checker/test/fail13.json + '{"Numbers cannot have leading zeroes": 013}', + # http://json.org/JSON_checker/test/fail14.json + '{"Numbers cannot be hex": 0x14}', + # http://json.org/JSON_checker/test/fail15.json + '["Illegal backslash escape: \\x15"]', + # http://json.org/JSON_checker/test/fail16.json + '[\\naked]', + # http://json.org/JSON_checker/test/fail17.json + '["Illegal backslash escape: \\017"]', + # http://json.org/JSON_checker/test/fail18.json + # '[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', + # http://json.org/JSON_checker/test/fail19.json + '{"Missing colon" null}', + # http://json.org/JSON_checker/test/fail20.json + '{"Double colon":: null}', + # http://json.org/JSON_checker/test/fail21.json + '{"Comma instead of colon", null}', + # http://json.org/JSON_checker/test/fail22.json + '["Colon instead of comma": false]', + # http://json.org/JSON_checker/test/fail23.json + '["Bad value", truth]', + # http://json.org/JSON_checker/test/fail24.json + #"['single quote']", + # http://json.org/JSON_checker/test/fail25.json + '["\ttab\tcharacter\tin\tstring\t"]', + # http://json.org/JSON_checker/test/fail26.json + '["tab\\ character\\ in\\ string\\ "]', + # http://json.org/JSON_checker/test/fail27.json + '["line\nbreak"]', + # http://json.org/JSON_checker/test/fail28.json + '["line\\\nbreak"]', + # http://json.org/JSON_checker/test/fail29.json + '[0e]', + # http://json.org/JSON_checker/test/fail30.json + '[0e+]', + # http://json.org/JSON_checker/test/fail31.json + '[0e+-1]', + # http://json.org/JSON_checker/test/fail32.json + '{"Comma instead if closing brace": true,', + # http://json.org/JSON_checker/test/fail33.json + '["mismatch"}', + # http://code.google.com/p/simplejson/issues/detail?id=3 + u'["A\u001FZ control characters in string"]', + # misc based on coverage + '{', + '{]', + '{"foo": "bar"]', + '{"foo": "bar"', +] + +class TestFail(TestCase): + def test_failures(self): + for idx, doc in enumerate(JSONDOCS): + idx = idx + 1 + try: + json.loads(doc) + except json.HjsonDecodeError: + pass + else: + self.fail("Expected failure for fail%d.json: %r" % (idx, doc)) + + def test_array_decoder_issue46(self): + # http://code.google.com/p/simplejson/issues/detail?id=46 + for doc in [u'[,]', '[,]']: + try: + json.loads(doc) + except json.HjsonDecodeError: + pass + except Exception: + e = sys.exc_info()[1] + self.fail("Unexpected exception raised %r %s" % (e, e)) + else: + self.fail("Unexpected success parsing '[,]'") + + def test_truncated_input(self): + test_cases = [ + ('[', "End of input while parsing an array", 1), + # ('[42', "Expecting ',' delimiter", 3), + ('[42,', 'Expecting value', 4), + ('["', 'Unterminated string starting at', 1), + ('["spam', 'Unterminated string starting at', 1), + # ('["spam"', "Expecting ',' delimiter", 7), + ('["spam",', 'Expecting value', 8), + ('{', 'Bad key name (eof)', 1), + ('{"', 'Unterminated string starting at', 1), + ('{"spam', 'Unterminated string starting at', 1), + ('{"spam"', "Expecting ':' delimiter", 7), + ('{"spam":', 'Expecting value', 8), + # ('{"spam":42', "Expecting ',' delimiter", 10), + ('{"spam":42,', 'Bad key name (eof)', 11), + ('"', 'Unterminated string starting at', 0), + ('"spam', 'Unterminated string starting at', 0), + ('[,', "Found a punctuator character", 1), + ] + for data, msg, idx in test_cases: + try: + json.loads(data) + except json.HjsonDecodeError: + e = sys.exc_info()[1] + self.assertEqual( + e.msg[:len(msg)], + msg, + "%r doesn't start with %r for %r" % (e.msg, msg, data)) + self.assertEqual( + e.pos, idx, + "pos %r != %r for %r" % (e.pos, idx, data)) + except Exception: + e = sys.exc_info()[1] + self.fail("Unexpected exception raised %r %s" % (e, e)) + else: + self.fail("Unexpected success parsing '%r'" % (data,)) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_float.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_float.py new file mode 100644 index 0000000000000000000000000000000000000000..567ea427a64efebba1277e65ad7b1c1e4d52d118 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_float.py @@ -0,0 +1,25 @@ +import math +from unittest import TestCase +from hjson.compat import long_type, text_type +import hjson as json +from hjson.decoder import NaN, PosInf, NegInf + +class TestFloat(TestCase): + + def test_degenerates_ignore(self): + for f in (PosInf, NegInf, NaN): + self.assertEqual(json.loads(json.dumpsJSON(f)), None) + + def test_floats(self): + for num in [1617161771.7650001, math.pi, math.pi**100, + math.pi**-100, 3.1]: + self.assertEqual(float(json.dumpsJSON(num)), num) + self.assertEqual(json.loads(json.dumpsJSON(num)), num) + self.assertEqual(json.loads(text_type(json.dumpsJSON(num))), num) + + def test_ints(self): + for num in [1, long_type(1), 1<<32, 1<<64]: + self.assertEqual(json.dumpsJSON(num), str(num)) + self.assertEqual(int(json.dumpsJSON(num)), num) + self.assertEqual(json.loads(json.dumpsJSON(num)), num) + self.assertEqual(json.loads(text_type(json.dumpsJSON(num))), num) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_for_json.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_for_json.py new file mode 100644 index 0000000000000000000000000000000000000000..672b8b278a844d6885d66a2d8380f18846aabb99 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_for_json.py @@ -0,0 +1,97 @@ +import unittest +import hjson as json + + +class ForJson(object): + def for_json(self): + return {'for_json': 1} + + +class NestedForJson(object): + def for_json(self): + return {'nested': ForJson()} + + +class ForJsonList(object): + def for_json(self): + return ['list'] + + +class DictForJson(dict): + def for_json(self): + return {'alpha': 1} + + +class ListForJson(list): + def for_json(self): + return ['list'] + + +class TestForJson(unittest.TestCase): + def assertRoundTrip(self, obj, other, for_json=True): + if for_json is None: + # None will use the default + s = json.dumpsJSON(obj) + else: + s = json.dumpsJSON(obj, for_json=for_json) + self.assertEqual( + json.loads(s), + other) + + def test_for_json_encodes_stand_alone_object(self): + self.assertRoundTrip( + ForJson(), + ForJson().for_json()) + + def test_for_json_encodes_object_nested_in_dict(self): + self.assertRoundTrip( + {'hooray': ForJson()}, + {'hooray': ForJson().for_json()}) + + def test_for_json_encodes_object_nested_in_list_within_dict(self): + self.assertRoundTrip( + {'list': [0, ForJson(), 2, 3]}, + {'list': [0, ForJson().for_json(), 2, 3]}) + + def test_for_json_encodes_object_nested_within_object(self): + self.assertRoundTrip( + NestedForJson(), + {'nested': {'for_json': 1}}) + + def test_for_json_encodes_list(self): + self.assertRoundTrip( + ForJsonList(), + ForJsonList().for_json()) + + def test_for_json_encodes_list_within_object(self): + self.assertRoundTrip( + {'nested': ForJsonList()}, + {'nested': ForJsonList().for_json()}) + + def test_for_json_encodes_dict_subclass(self): + self.assertRoundTrip( + DictForJson(a=1), + DictForJson(a=1).for_json()) + + def test_for_json_encodes_list_subclass(self): + self.assertRoundTrip( + ListForJson(['l']), + ListForJson(['l']).for_json()) + + def test_for_json_ignored_if_not_true_with_dict_subclass(self): + for for_json in (None, False): + self.assertRoundTrip( + DictForJson(a=1), + {'a': 1}, + for_json=for_json) + + def test_for_json_ignored_if_not_true_with_list_subclass(self): + for for_json in (None, False): + self.assertRoundTrip( + ListForJson(['l']), + ['l'], + for_json=for_json) + + def test_raises_typeerror_if_for_json_not_true_with_object(self): + self.assertRaises(TypeError, json.dumpsJSON, ForJson()) + self.assertRaises(TypeError, json.dumpsJSON, ForJson(), for_json=False) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_hjson.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_hjson.py new file mode 100644 index 0000000000000000000000000000000000000000..725c6abfd7e28db4b88def7217f0361d81900971 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_hjson.py @@ -0,0 +1,65 @@ +from __future__ import with_statement + +import os +import sys +import subprocess +import tempfile +import codecs # dump + +from unittest import TestCase + +import hjson + +class TestAssets(TestCase): + + def __init__(self, *args, **kwargs): + super(TestAssets, self).__init__(*args, **kwargs) + self.assetsDir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "assets") + self.assets = self.load('testlist.txt', False).split('\n') + self.maxDiff = None + self.verma, self.vermi = sys.version_info[0:2] + + def load(self, name, cr): + name = os.path.join(self.assetsDir, name) + with open(name, 'rb') as f: + text = f.read().decode('utf-8') + text = text.replace('\r', '') + if cr: text = text.replace('\n', '\r\n') + return text + + def check(self, name, file, inputCr): + text = self.load(file, inputCr) + shouldFail = name[0:4] == "fail" + + try: + data = hjson.loads(text) + self.assertFalse(shouldFail, file) + + text1 = hjson.dumpsJSON(data) + hjson1 = hjson.dumps(data, ensure_ascii=False); + result = hjson.loads(self.load(name + "_result.json", inputCr)) + text2 = hjson.dumpsJSON(result) + hjson2 = self.load(name + "_result.hjson", False) + + # dbg + # with open(name + "_dbg1.txt", "w") as tmp: tmp.write(hjson1.encode("utf-8")) + # with open(name + "_dbg2.txt", "w") as tmp: tmp.write(hjson2.encode("utf-8")) + # with codecs.open(name + "_dbg3.txt", 'w', 'utf-8') as tmp: hjson.dump(data, tmp) + + if self.verma>2 or self.vermi>6: + # final check fails on py2.6 because of string formatting issues + self.assertEqual(text2, text1, file) + self.assertEqual(hjson2, hjson1, file) + + except hjson.HjsonDecodeError as e: + if not shouldFail: + self.fail("raised error on parsing %s: %r" % (file, e)) + + def test_files(self): + for file in self.assets: + name, sep, ext = file.partition("_test.") + if name.startswith("stringify/quotes") or \ + name.startswith("extra/"): continue # ignore/not supported + + self.check(name, file, True) + self.check(name, file, False) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_indent.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_indent.py new file mode 100644 index 0000000000000000000000000000000000000000..372e276cfad91077b434857d65b67a3592768603 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_indent.py @@ -0,0 +1,86 @@ +from unittest import TestCase +import textwrap + +import hjson as json +from hjson.compat import StringIO + +class TestIndent(TestCase): + def test_indent(self): + h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', + 'i-vhbjkhnth', + {'nifty': 87}, {'field': 'yes', 'morefield': False} ] + + expect = textwrap.dedent("""\ + [ + \t[ + \t\t"blorpie" + \t], + \t[ + \t\t"whoops" + \t], + \t[], + \t"d-shtaeou", + \t"d-nthiouh", + \t"i-vhbjkhnth", + \t{ + \t\t"nifty": 87 + \t}, + \t{ + \t\t"field": "yes", + \t\t"morefield": false + \t} + ]""") + + + d1 = json.dumpsJSON(h) + d2 = json.dumpsJSON(h, indent='\t', sort_keys=True, separators=(',', ': ')) + d3 = json.dumpsJSON(h, indent=' ', sort_keys=True, separators=(',', ': ')) + d4 = json.dumpsJSON(h, indent=2, sort_keys=True, separators=(',', ': ')) + + h1 = json.loads(d1) + h2 = json.loads(d2) + h3 = json.loads(d3) + h4 = json.loads(d4) + + self.assertEqual(h1, h) + self.assertEqual(h2, h) + self.assertEqual(h3, h) + self.assertEqual(h4, h) + self.assertEqual(d3, expect.replace('\t', ' ')) + self.assertEqual(d4, expect.replace('\t', ' ')) + # NOTE: Python 2.4 textwrap.dedent converts tabs to spaces, + # so the following is expected to fail. Python 2.4 is not a + # supported platform in hjson 2.1.0+. + self.assertEqual(d2, expect) + + def test_indent0(self): + h = {3: 1} + def check(indent, expected): + d1 = json.dumpsJSON(h, indent=indent) + self.assertEqual(d1, expected) + + sio = StringIO() + json.dumpJSON(h, sio, indent=indent) + self.assertEqual(sio.getvalue(), expected) + + # indent=0 should emit newlines + check(0, '{\n"3": 1\n}') + # indent=None is more compact + check(None, '{"3": 1}') + + def test_separators(self): + lst = [1,2,3,4] + expect = '[\n1,\n2,\n3,\n4\n]' + expect_spaces = '[\n1, \n2, \n3, \n4\n]' + # Ensure that separators still works + self.assertEqual( + expect_spaces, + json.dumpsJSON(lst, indent=0, separators=(', ', ': '))) + # Force the new defaults + self.assertEqual( + expect, + json.dumpsJSON(lst, indent=0, separators=(',', ': '))) + # Added in 2.1.4 + self.assertEqual( + expect, + json.dumpsJSON(lst, indent=0)) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_item_sort_key.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_item_sort_key.py new file mode 100644 index 0000000000000000000000000000000000000000..ee460f598b336a9ccba8bb52ae006a070ae98739 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_item_sort_key.py @@ -0,0 +1,20 @@ +from unittest import TestCase + +import hjson as json +from operator import itemgetter + +class TestItemSortKey(TestCase): + def test_simple_first(self): + a = {'a': 1, 'c': 5, 'jack': 'jill', 'pick': 'axe', 'array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'} + self.assertEqual( + '{"a": 1, "c": 5, "crate": "dog", "jack": "jill", "pick": "axe", "zeak": "oh", "array": [1, 5, 6, 9], "tuple": [83, 12, 3]}', + json.dumpsJSON(a, item_sort_key=json.simple_first)) + + def test_case(self): + a = {'a': 1, 'c': 5, 'Jack': 'jill', 'pick': 'axe', 'Array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'} + self.assertEqual( + '{"Array": [1, 5, 6, 9], "Jack": "jill", "a": 1, "c": 5, "crate": "dog", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}', + json.dumpsJSON(a, item_sort_key=itemgetter(0))) + self.assertEqual( + '{"a": 1, "Array": [1, 5, 6, 9], "c": 5, "crate": "dog", "Jack": "jill", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}', + json.dumpsJSON(a, item_sort_key=lambda kv: kv[0].lower())) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_namedtuple.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_namedtuple.py new file mode 100644 index 0000000000000000000000000000000000000000..9d91b0f1d66bcf5e943c7bd204dc45f8a80b5d94 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_namedtuple.py @@ -0,0 +1,122 @@ +from __future__ import absolute_import +import unittest +import hjson as json +from hjson.compat import StringIO + +try: + from collections import namedtuple +except ImportError: + class Value(tuple): + def __new__(cls, *args): + return tuple.__new__(cls, args) + + def _asdict(self): + return {'value': self[0]} + class Point(tuple): + def __new__(cls, *args): + return tuple.__new__(cls, args) + + def _asdict(self): + return {'x': self[0], 'y': self[1]} +else: + Value = namedtuple('Value', ['value']) + Point = namedtuple('Point', ['x', 'y']) + +class DuckValue(object): + def __init__(self, *args): + self.value = Value(*args) + + def _asdict(self): + return self.value._asdict() + +class DuckPoint(object): + def __init__(self, *args): + self.point = Point(*args) + + def _asdict(self): + return self.point._asdict() + +class DeadDuck(object): + _asdict = None + +class DeadDict(dict): + _asdict = None + +CONSTRUCTORS = [ + lambda v: v, + lambda v: [v], + lambda v: [{'key': v}], +] + +class TestNamedTuple(unittest.TestCase): + def test_namedtuple_dumps(self): + for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]: + d = v._asdict() + self.assertEqual(d, json.loads(json.dumpsJSON(v))) + self.assertEqual( + d, + json.loads(json.dumpsJSON(v, namedtuple_as_object=True))) + self.assertEqual(d, json.loads(json.dumpsJSON(v, tuple_as_array=False))) + self.assertEqual( + d, + json.loads(json.dumpsJSON(v, namedtuple_as_object=True, + tuple_as_array=False))) + + def test_namedtuple_dumps_false(self): + for v in [Value(1), Point(1, 2)]: + l = list(v) + self.assertEqual( + l, + json.loads(json.dumpsJSON(v, namedtuple_as_object=False))) + self.assertRaises(TypeError, json.dumpsJSON, v, + tuple_as_array=False, namedtuple_as_object=False) + + def test_namedtuple_dump(self): + for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]: + d = v._asdict() + sio = StringIO() + json.dumpJSON(v, sio) + self.assertEqual(d, json.loads(sio.getvalue())) + sio = StringIO() + json.dumpJSON(v, sio, namedtuple_as_object=True) + self.assertEqual( + d, + json.loads(sio.getvalue())) + sio = StringIO() + json.dumpJSON(v, sio, tuple_as_array=False) + self.assertEqual(d, json.loads(sio.getvalue())) + sio = StringIO() + json.dumpJSON(v, sio, namedtuple_as_object=True, + tuple_as_array=False) + self.assertEqual( + d, + json.loads(sio.getvalue())) + + def test_namedtuple_dump_false(self): + for v in [Value(1), Point(1, 2)]: + l = list(v) + sio = StringIO() + json.dumpJSON(v, sio, namedtuple_as_object=False) + self.assertEqual( + l, + json.loads(sio.getvalue())) + self.assertRaises(TypeError, json.dumpJSON, v, StringIO(), + tuple_as_array=False, namedtuple_as_object=False) + + def test_asdict_not_callable_dump(self): + for f in CONSTRUCTORS: + self.assertRaises(TypeError, + json.dumpJSON, f(DeadDuck()), StringIO(), namedtuple_as_object=True) + sio = StringIO() + json.dumpJSON(f(DeadDict()), sio, namedtuple_as_object=True) + self.assertEqual( + json.dumpsJSON(f({})), + sio.getvalue()) + + def test_asdict_not_callable_dumps(self): + for f in CONSTRUCTORS: + self.assertRaises(TypeError, + json.dumpsJSON, f(DeadDuck()), namedtuple_as_object=True) + self.assertEqual( + json.dumpsJSON(f({})), + json.dumpsJSON(f(DeadDict()), namedtuple_as_object=True)) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_pass1.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_pass1.py new file mode 100644 index 0000000000000000000000000000000000000000..8ddddd2bad08bfa8ff48c5321021deb05a15f5c8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_pass1.py @@ -0,0 +1,71 @@ +from unittest import TestCase + +import hjson as json + +# from http://json.org/JSON_checker/test/pass1.json +JSON = r''' +[ + "JSON Test Pattern pass1", + {"object with 1 member":["array with 1 element"]}, + {}, + [], + -42, + true, + false, + null, + { + "integer": 1234567890, + "real": -9876.543210, + "e": 0.123456789e-12, + "E": 1.234567890E+34, + "": 23456789012E66, + "zero": 0, + "one": 1, + "space": " ", + "quote": "\"", + "backslash": "\\", + "controls": "\b\f\n\r\t", + "slash": "/ & \/", + "alpha": "abcdefghijklmnopqrstuvwyz", + "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ", + "digit": "0123456789", + "special": "`1~!@#$%^&*()_+-={':[,]}|;.?", + "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A", + "true": true, + "false": false, + "null": null, + "array":[ ], + "object":{ }, + "address": "50 St. James Street", + "url": "http://www.JSON.org/", + "comment": "// /* */": " ", + " s p a c e d " :[1,2 , 3 + +, + +4 , 5 , 6 ,7 ],"compact": [1,2,3,4,5,6,7], + "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}", + "quotes": "" \u0022 %22 0x22 034 "", + "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?" +: "A key can be any string" + }, + 0.5 ,98.6 +, +99.44 +, + +1066, +1e1, +0.1e1, +1e-1, +1e00,2e+00,2e-00 +,"rosebud"] +''' + +class TestPass1(TestCase): + def test_parse(self): + # test in/out equivalence and parsing + res = json.loads(JSON) + out = json.dumpsJSON(res) + self.assertEqual(res, json.loads(out)) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_pass2.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_pass2.py new file mode 100644 index 0000000000000000000000000000000000000000..a15db307d877d8796072778def9a66603d731df3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_pass2.py @@ -0,0 +1,14 @@ +from unittest import TestCase +import hjson as json + +# from http://json.org/JSON_checker/test/pass2.json +JSON = r''' +[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]] +''' + +class TestPass2(TestCase): + def test_parse(self): + # test in/out equivalence and parsing + res = json.loads(JSON) + out = json.dumpsJSON(res) + self.assertEqual(res, json.loads(out)) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_pass3.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_pass3.py new file mode 100644 index 0000000000000000000000000000000000000000..cb528e2efae57ac8da5918f7be21bd1e1af6f7eb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_pass3.py @@ -0,0 +1,20 @@ +from unittest import TestCase + +import hjson as json + +# from http://json.org/JSON_checker/test/pass3.json +JSON = r''' +{ + "JSON Test Pattern pass3": { + "The outermost value": "must be an object or array.", + "In this test": "It is an object." + } +} +''' + +class TestPass3(TestCase): + def test_parse(self): + # test in/out equivalence and parsing + res = json.loads(JSON) + out = json.dumpsJSON(res) + self.assertEqual(res, json.loads(out)) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_recursion.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_recursion.py new file mode 100644 index 0000000000000000000000000000000000000000..56e1f71e3a4be2382e2b9f47312d62aeec42286c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_recursion.py @@ -0,0 +1,67 @@ +from unittest import TestCase + +import hjson as json + +class JSONTestObject: + pass + + +class RecursiveJSONEncoder(json.JSONEncoder): + recurse = False + def default(self, o): + if o is JSONTestObject: + if self.recurse: + return [JSONTestObject] + else: + return 'JSONTestObject' + return json.JSONEncoder.default(o) + + +class TestRecursion(TestCase): + def test_listrecursion(self): + x = [] + x.append(x) + try: + json.dumpsJSON(x) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on list recursion") + x = [] + y = [x] + x.append(y) + try: + json.dumpsJSON(x) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on alternating list recursion") + y = [] + x = [y, y] + # ensure that the marker is cleared + json.dumpsJSON(x) + + def test_dictrecursion(self): + x = {} + x["test"] = x + try: + json.dumpsJSON(x) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on dict recursion") + x = {} + y = {"a": x, "b": x} + # ensure that the marker is cleared + json.dumpsJSON(y) + + def test_defaultrecursion(self): + enc = RecursiveJSONEncoder() + self.assertEqual(enc.encode(JSONTestObject), '"JSONTestObject"') + enc.recurse = True + try: + enc.encode(JSONTestObject) + except ValueError: + pass + else: + self.fail("didn't raise ValueError on default recursion") diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_scanstring.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_scanstring.py new file mode 100644 index 0000000000000000000000000000000000000000..ba6557cc7f3f25eefdb511c5dfee7725dffc8bec --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_scanstring.py @@ -0,0 +1,168 @@ +import sys +from unittest import TestCase + +import hjson as json +import hjson.decoder +from hjson.compat import b, PY3 + +class TestScanString(TestCase): + # The bytes type is intentionally not used in most of these tests + # under Python 3 because the decoder immediately coerces to str before + # calling scanstring. In Python 2 we are testing the code paths + # for both unicode and str. + # + # The reason this is done is because Python 3 would require + # entirely different code paths for parsing bytes and str. + # + def test_py_scanstring(self): + self._test_scanstring(hjson.decoder.scanstring) + + def _test_scanstring(self, scanstring): + if sys.maxunicode == 65535: + self.assertEqual( + scanstring(u'"z\U0001d120x"', 1, None, True), + (u'z\U0001d120x', 6)) + else: + self.assertEqual( + scanstring(u'"z\U0001d120x"', 1, None, True), + (u'z\U0001d120x', 5)) + + self.assertEqual( + scanstring('"\\u007b"', 1, None, True), + (u'{', 8)) + + self.assertEqual( + scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True), + (u'A JSON payload should be an object or array, not a string.', 60)) + + self.assertEqual( + scanstring('["Unclosed array"', 2, None, True), + (u'Unclosed array', 17)) + + self.assertEqual( + scanstring('["extra comma",]', 2, None, True), + (u'extra comma', 14)) + + self.assertEqual( + scanstring('["double extra comma",,]', 2, None, True), + (u'double extra comma', 21)) + + self.assertEqual( + scanstring('["Comma after the close"],', 2, None, True), + (u'Comma after the close', 24)) + + self.assertEqual( + scanstring('["Extra close"]]', 2, None, True), + (u'Extra close', 14)) + + self.assertEqual( + scanstring('{"Extra comma": true,}', 2, None, True), + (u'Extra comma', 14)) + + self.assertEqual( + scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True), + (u'Extra value after close', 26)) + + self.assertEqual( + scanstring('{"Illegal expression": 1 + 2}', 2, None, True), + (u'Illegal expression', 21)) + + self.assertEqual( + scanstring('{"Illegal invocation": alert()}', 2, None, True), + (u'Illegal invocation', 21)) + + self.assertEqual( + scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True), + (u'Numbers cannot have leading zeroes', 37)) + + self.assertEqual( + scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True), + (u'Numbers cannot be hex', 24)) + + self.assertEqual( + scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True), + (u'Too deep', 30)) + + self.assertEqual( + scanstring('{"Missing colon" null}', 2, None, True), + (u'Missing colon', 16)) + + self.assertEqual( + scanstring('{"Double colon":: null}', 2, None, True), + (u'Double colon', 15)) + + self.assertEqual( + scanstring('{"Comma instead of colon", null}', 2, None, True), + (u'Comma instead of colon', 25)) + + self.assertEqual( + scanstring('["Colon instead of comma": false]', 2, None, True), + (u'Colon instead of comma', 25)) + + self.assertEqual( + scanstring('["Bad value", truth]', 2, None, True), + (u'Bad value', 12)) + + for c in map(chr, range(0x00, 0x1f)): + self.assertEqual( + scanstring(c + '"', 0, None, False), + (c, 2)) + self.assertRaises( + ValueError, + scanstring, c + '"', 0, None, True) + + def test_issue3623(self): + self.assertRaises(ValueError, json.decoder.scanstring, "xxx", 1, + "xxx") + self.assertRaises(UnicodeDecodeError, + json.encoder.encode_basestring_ascii, b("xx\xff")) + + def test_surrogates(self): + scanstring = json.decoder.scanstring + + def assertScan(given, expect, test_utf8=True): + givens = [given] + if not PY3 and test_utf8: + givens.append(given.encode('utf8')) + for given in givens: + (res, count) = scanstring(given, 1, None, True) + self.assertEqual(len(given), count) + self.assertEqual(res, expect) + + assertScan( + u'"z\\ud834\\u0079x"', + u'z\ud834yx') + assertScan( + u'"z\\ud834\\udd20x"', + u'z\U0001d120x') + assertScan( + u'"z\\ud834\\ud834\\udd20x"', + u'z\ud834\U0001d120x') + assertScan( + u'"z\\ud834x"', + u'z\ud834x') + assertScan( + u'"z\\udd20x"', + u'z\udd20x') + assertScan( + u'"z\ud834x"', + u'z\ud834x') + # It may look strange to join strings together, but Python is drunk. + # https://gist.github.com/etrepum/5538443 + assertScan( + u'"z\\ud834\udd20x12345"', + u''.join([u'z\ud834', u'\udd20x12345'])) + assertScan( + u'"z\ud834\\udd20x"', + u''.join([u'z\ud834', u'\udd20x'])) + # these have different behavior given UTF8 input, because the surrogate + # pair may be joined (in maxunicode > 65535 builds) + assertScan( + u''.join([u'"z\ud834', u'\udd20x"']), + u''.join([u'z\ud834', u'\udd20x']), + test_utf8=False) + + self.assertRaises(ValueError, + scanstring, u'"z\\ud83x"', 1, None, True) + self.assertRaises(ValueError, + scanstring, u'"z\\ud834\\udd2x"', 1, None, True) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_separators.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_separators.py new file mode 100644 index 0000000000000000000000000000000000000000..3be74efe7ac502e25193e15498279eba88fd4224 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_separators.py @@ -0,0 +1,42 @@ +import textwrap +from unittest import TestCase + +import hjson as json + + +class TestSeparators(TestCase): + def test_separators(self): + h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth', + {'nifty': 87}, {'field': 'yes', 'morefield': False} ] + + expect = textwrap.dedent("""\ + [ + [ + "blorpie" + ] , + [ + "whoops" + ] , + [] , + "d-shtaeou" , + "d-nthiouh" , + "i-vhbjkhnth" , + { + "nifty" : 87 + } , + { + "field" : "yes" , + "morefield" : false + } + ]""") + + + d1 = json.dumpsJSON(h) + d2 = json.dumpsJSON(h, indent=' ', sort_keys=True, separators=(' ,', ' : ')) + + h1 = json.loads(d1) + h2 = json.loads(d2) + + self.assertEqual(h1, h) + self.assertEqual(h2, h) + self.assertEqual(d2, expect) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_tool.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_tool.py new file mode 100644 index 0000000000000000000000000000000000000000..475fc5aa94be8729da9d0f92f272d2753b6d92cb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_tool.py @@ -0,0 +1,98 @@ +from __future__ import with_statement +import os +import sys +import textwrap +import unittest +import subprocess +import tempfile +try: + # Python 3.x + from test.support import strip_python_stderr +except ImportError: + # Python 2.6+ + try: + from test.test_support import strip_python_stderr + except ImportError: + # Python 2.5 + import re + def strip_python_stderr(stderr): + return re.sub( + r"\[\d+ refs\]\r?\n?$".encode(), + "".encode(), + stderr).strip() + +class TestTool(unittest.TestCase): + data = """ + + [["blorpie"],[ "whoops" ] , [ + ],\t"d-shtaeou",\r"d-nthiouh", + "i-vhbjkhnth", {"nifty":87}, {"morefield" :\tfalse,"field" + :"yes"} ] + """ + + expect = textwrap.dedent("""\ + [ + [ + blorpie + ] + [ + whoops + ] + [] + d-shtaeou + d-nthiouh + i-vhbjkhnth + { + nifty: 87 + } + { + morefield: false + field: yes + } + ] + """) + + def runTool(self, args=None, data=None): + argv = [sys.executable, '-m', 'hjson.tool'] + if args: + argv.extend(args) + proc = subprocess.Popen(argv, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE) + out, err = proc.communicate(data) + self.assertEqual(strip_python_stderr(err), ''.encode()) + self.assertEqual(proc.returncode, 0) + return out + + def test_stdin_stdout(self): + self.assertEqual( + self.runTool(data=self.data.encode()), + self.expect.encode()) + + def test_infile_stdout(self): + with tempfile.NamedTemporaryFile() as infile: + infile.write(self.data.encode()) + infile.flush() + self.assertEqual( + self.runTool(args=[infile.name]), + self.expect.encode()) + + def x_test_infile_outfile(self): + """Not currently an option in tool""" + with tempfile.NamedTemporaryFile() as infile: + infile.write(self.data.encode()) + infile.flush() + # outfile will get overwritten by tool, so the delete + # may not work on some platforms. Do it manually. + outfile = tempfile.NamedTemporaryFile() + try: + self.assertEqual( + self.runTool(args=[infile.name, outfile.name]), + ''.encode()) + with open(outfile.name, 'rb') as f: + self.assertEqual(f.read(), self.expect.encode()) + finally: + outfile.close() + if os.path.exists(outfile.name): + os.unlink(outfile.name) diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_tuple.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_tuple.py new file mode 100644 index 0000000000000000000000000000000000000000..9b7289df60759a379d511e127a8cec6ec2b980ac --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_tuple.py @@ -0,0 +1,51 @@ +import unittest + +from hjson.compat import StringIO +import hjson as json + +class TestTuples(unittest.TestCase): + def test_tuple_array_dumps(self): + t = (1, 2, 3) + expect = json.dumpsJSON(list(t)) + # Default is True + self.assertEqual(expect, json.dumpsJSON(t)) + self.assertEqual(expect, json.dumpsJSON(t, tuple_as_array=True)) + self.assertRaises(TypeError, json.dumpsJSON, t, tuple_as_array=False) + # Ensure that the "default" does not get called + self.assertEqual(expect, json.dumpsJSON(t, default=repr)) + self.assertEqual(expect, json.dumpsJSON(t, tuple_as_array=True, + default=repr)) + # Ensure that the "default" gets called + self.assertEqual( + json.dumpsJSON(repr(t)), + json.dumpsJSON(t, tuple_as_array=False, default=repr)) + + def test_tuple_array_dump(self): + t = (1, 2, 3) + expect = json.dumpsJSON(list(t)) + # Default is True + sio = StringIO() + json.dumpJSON(t, sio) + self.assertEqual(expect, sio.getvalue()) + sio = StringIO() + json.dumpJSON(t, sio, tuple_as_array=True) + self.assertEqual(expect, sio.getvalue()) + self.assertRaises(TypeError, json.dumpJSON, t, StringIO(), + tuple_as_array=False) + # Ensure that the "default" does not get called + sio = StringIO() + json.dumpJSON(t, sio, default=repr) + self.assertEqual(expect, sio.getvalue()) + sio = StringIO() + json.dumpJSON(t, sio, tuple_as_array=True, default=repr) + self.assertEqual(expect, sio.getvalue()) + # Ensure that the "default" gets called + sio = StringIO() + json.dumpJSON(t, sio, tuple_as_array=False, default=repr) + self.assertEqual( + json.dumpsJSON(repr(t)), + sio.getvalue()) + +class TestNamedTuple(unittest.TestCase): + def test_namedtuple_dump(self): + pass diff --git a/parrot/lib/python3.10/site-packages/hjson/tests/test_unicode.py b/parrot/lib/python3.10/site-packages/hjson/tests/test_unicode.py new file mode 100644 index 0000000000000000000000000000000000000000..d634fe40671742aa16074ce3704c56c504125237 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/hjson/tests/test_unicode.py @@ -0,0 +1,153 @@ +import sys +import codecs +from unittest import TestCase + +import hjson as json +from hjson.compat import unichr, text_type, b, u, BytesIO + +class TestUnicode(TestCase): + def test_encoding1(self): + encoder = json.JSONEncoder(encoding='utf-8') + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + s = u.encode('utf-8') + ju = encoder.encode(u) + js = encoder.encode(s) + self.assertEqual(ju, js) + + def test_encoding2(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + s = u.encode('utf-8') + ju = json.dumpsJSON(u, encoding='utf-8') + js = json.dumpsJSON(s, encoding='utf-8') + self.assertEqual(ju, js) + + def test_encoding3(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumpsJSON(u) + self.assertEqual(j, '"\\u03b1\\u03a9"') + + def test_encoding4(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumpsJSON([u]) + self.assertEqual(j, '["\\u03b1\\u03a9"]') + + def test_encoding5(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumpsJSON(u, ensure_ascii=False) + self.assertEqual(j, u'"' + u + u'"') + + def test_encoding6(self): + u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}' + j = json.dumpsJSON([u], ensure_ascii=False) + self.assertEqual(j, u'["' + u + u'"]') + + def test_big_unicode_encode(self): + u = u'\U0001d120' + self.assertEqual(json.dumpsJSON(u), '"\\ud834\\udd20"') + self.assertEqual(json.dumpsJSON(u, ensure_ascii=False), u'"\U0001d120"') + + def test_big_unicode_decode(self): + u = u'z\U0001d120x' + self.assertEqual(json.loads('"' + u + '"'), u) + self.assertEqual(json.loads('"z\\ud834\\udd20x"'), u) + + def test_unicode_decode(self): + for i in range(0, 0xd7ff): + u = unichr(i) + #s = '"\\u{0:04x}"'.format(i) + s = '"\\u%04x"' % (i,) + self.assertEqual(json.loads(s), u) + + def test_object_pairs_hook_with_unicode(self): + s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}' + p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4), + (u"qrt", 5), (u"pad", 6), (u"hoy", 7)] + self.assertEqual(json.loads(s), eval(s)) + self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p) + od = json.loads(s, object_pairs_hook=json.OrderedDict) + self.assertEqual(od, json.OrderedDict(p)) + self.assertEqual(type(od), json.OrderedDict) + # the object_pairs_hook takes priority over the object_hook + self.assertEqual(json.loads(s, + object_pairs_hook=json.OrderedDict, + object_hook=lambda x: None), + json.OrderedDict(p)) + + + def test_default_encoding(self): + self.assertEqual(json.loads(u'{"a": "\xe9"}'.encode('utf-8')), + {'a': u'\xe9'}) + + def test_unicode_preservation(self): + self.assertEqual(type(json.loads(u'""')), text_type) + self.assertEqual(type(json.loads(u'"a"')), text_type) + self.assertEqual(type(json.loads(u'["a"]')[0]), text_type) + + def test_ensure_ascii_false_returns_unicode(self): + # http://code.google.com/p/simplejson/issues/detail?id=48 + self.assertEqual(type(json.dumpsJSON([], ensure_ascii=False)), text_type) + self.assertEqual(type(json.dumpsJSON(0, ensure_ascii=False)), text_type) + self.assertEqual(type(json.dumpsJSON({}, ensure_ascii=False)), text_type) + self.assertEqual(type(json.dumpsJSON("", ensure_ascii=False)), text_type) + + def test_ensure_ascii_false_bytestring_encoding(self): + # http://code.google.com/p/simplejson/issues/detail?id=48 + doc1 = {u'quux': b('Arr\xc3\xaat sur images')} + doc2 = {u'quux': u('Arr\xeat sur images')} + doc_ascii = '{"quux": "Arr\\u00eat sur images"}' + doc_unicode = u'{"quux": "Arr\xeat sur images"}' + self.assertEqual(json.dumpsJSON(doc1), doc_ascii) + self.assertEqual(json.dumpsJSON(doc2), doc_ascii) + self.assertEqual(json.dumpsJSON(doc1, ensure_ascii=False), doc_unicode) + self.assertEqual(json.dumpsJSON(doc2, ensure_ascii=False), doc_unicode) + + def test_ensure_ascii_linebreak_encoding(self): + # http://timelessrepo.com/json-isnt-a-javascript-subset + s1 = u'\u2029\u2028' + s2 = s1.encode('utf8') + expect = '"\\u2029\\u2028"' + self.assertEqual(json.dumpsJSON(s1), expect) + self.assertEqual(json.dumpsJSON(s2), expect) + self.assertEqual(json.dumpsJSON(s1, ensure_ascii=False), expect) + self.assertEqual(json.dumpsJSON(s2, ensure_ascii=False), expect) + + def test_invalid_escape_sequences(self): + # incomplete escape sequence + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u1') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u12') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u123') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u1234') + # invalid escape sequence + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u123x"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u12x4"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\u1x34"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ux234"') + if sys.maxunicode > 65535: + # invalid escape sequence for low surrogate + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\u"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\u0"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\u00"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\u000"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\u000x"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\u00x0"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\u0x00"') + self.assertRaises(json.HjsonDecodeError, json.loads, '"\\ud800\\ux000"') + + def test_ensure_ascii_still_works(self): + # in the ascii range, ensure that everything is the same + for c in map(unichr, range(0, 127)): + self.assertEqual( + json.dumpsJSON(c, ensure_ascii=False), + json.dumpsJSON(c)) + snowman = u'\N{SNOWMAN}' + self.assertEqual( + json.dumpsJSON(c, ensure_ascii=False), + '"' + c + '"') + + def test_strip_bom(self): + content = u"\u3053\u3093\u306b\u3061\u308f" + json_doc = codecs.BOM_UTF8 + b(json.dumpsJSON(content)) + self.assertEqual(json.load(BytesIO(json_doc)), content) + for doc in json_doc, json_doc.decode('utf8'): + self.assertEqual(json.loads(doc), content) diff --git a/parrot/lib/python3.10/site-packages/markdown2.py b/parrot/lib/python3.10/site-packages/markdown2.py new file mode 100644 index 0000000000000000000000000000000000000000..57a972679254a002eb0a5ad2616c0cb06854f04a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/markdown2.py @@ -0,0 +1,3977 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Trent Mick. +# Copyright (c) 2007-2008 ActiveState Corp. +# License: MIT (http://www.opensource.org/licenses/mit-license.php) + +r"""A fast and complete Python implementation of Markdown. + +[from http://daringfireball.net/projects/markdown/] +> Markdown is a text-to-HTML filter; it translates an easy-to-read / +> easy-to-write structured text format into HTML. Markdown's text +> format is most similar to that of plain text email, and supports +> features such as headers, *emphasis*, code blocks, blockquotes, and +> links. +> +> Markdown's syntax is designed not as a generic markup language, but +> specifically to serve as a front-end to (X)HTML. You can use span-level +> HTML tags anywhere in a Markdown document, and you can use block level +> HTML tags (like
and as well). + +Module usage: + + >>> import markdown2 + >>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)` + u'

boo!

\n' + + >>> markdowner = Markdown() + >>> markdowner.convert("*boo!*") + u'

boo!

\n' + >>> markdowner.convert("**boom!**") + u'

boom!

\n' + +This implementation of Markdown implements the full "core" syntax plus a +number of extras (e.g., code syntax coloring, footnotes) as described on +. +""" + +cmdln_desc = """A fast and complete Python implementation of Markdown, a +text-to-HTML conversion tool for web writers. + +Supported extra syntax options (see -x|--extras option below and +see for details): + +* admonitions: Enable parsing of RST admonitions. +* breaks: Control where hard breaks are inserted in the markdown. + Options include: + - on_newline: Replace single new line characters with
when True + - on_backslash: Replace backslashes at the end of a line with
+* break-on-newline: Alias for the on_newline option in the breaks extra. +* code-friendly: Disable _ and __ for em and strong. +* cuddled-lists: Allow lists to be cuddled to the preceding paragraph. +* fenced-code-blocks: Allows a code block to not have to be indented + by fencing it with '```' on a line before and after. Based on + with support for + syntax highlighting. +* footnotes: Support footnotes as in use on daringfireball.net and + implemented in other Markdown processors (tho not in Markdown.pl v1.0.1). +* header-ids: Adds "id" attributes to headers. The id value is a slug of + the header text. +* highlightjs-lang: Allows specifying the language which used for syntax + highlighting when using fenced-code-blocks and highlightjs. +* html-classes: Takes a dict mapping html tag names (lowercase) to a + string to use for a "class" tag attribute. Currently only supports "img", + "table", "thead", "pre", "code", "ul" and "ol" tags. Add an issue if you require + this for other tags. +* link-patterns: Auto-link given regex patterns in text (e.g. bug number + references, revision number references). +* markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to + have markdown processing be done on its contents. Similar to + but with + some limitations. +* metadata: Extract metadata from a leading '---'-fenced block. + See for details. +* middle-word-em: Allows or disallows emphasis syntax in the middle of words, + defaulting to allow. Disabling this means that `this_text_here` will not be + converted to `thistexthere`. +* nofollow: Add `rel="nofollow"` to add `` tags with an href. See + . +* numbering: Support of generic counters. Non standard extension to + allow sequential numbering of figures, tables, equations, exhibits etc. +* pyshell: Treats unindented Python interactive shell sessions as + blocks. +* smarty-pants: Replaces ' and " with curly quotation marks or curly + apostrophes. Replaces --, ---, ..., and . . . with en dashes, em dashes, + and ellipses. +* spoiler: A special kind of blockquote commonly hidden behind a + click on SO. Syntax per . +* strike: text inside of double tilde is ~~strikethrough~~ +* tag-friendly: Requires atx style headers to have a space between the # and + the header text. Useful for applications that require twitter style tags to + pass through the parser. +* tables: Tables using the same format as GFM + and + PHP-Markdown Extra . +* toc: The returned HTML string gets a new "toc_html" attribute which is + a Table of Contents for the document. (experimental) +* use-file-vars: Look for an Emacs-style markdown-extras file variable to turn + on Extras. +* wiki-tables: Google Code Wiki-style tables. See + . +* wavedrom: Support for generating Wavedrom digital timing diagrams +* xml: Passes one-liner processing instructions and namespaced XML tags. +""" + +# Dev Notes: +# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm +# not yet sure if there implications with this. Compare 'pydoc sre' +# and 'perldoc perlre'. + +__version_info__ = (2, 5, 0) +__version__ = '.'.join(map(str, __version_info__)) +__author__ = "Trent Mick" + +import argparse +import codecs +import logging +import re +import sys +from collections import defaultdict, OrderedDict +from abc import ABC, abstractmethod +import functools +from hashlib import sha256 +from random import randint, random +from typing import Any, Callable, Collection, Dict, List, Literal, Optional, Tuple, Type, TypedDict, Union +from enum import IntEnum, auto + +if sys.version_info[1] < 9: + from typing import Iterable +else: + from collections.abc import Iterable + +# ---- type defs +_safe_mode = Literal['replace', 'escape'] +_extras_dict = Dict[str, Any] +_extras_param = Union[List[str], _extras_dict] +_link_patterns = Iterable[Tuple[re.Pattern, Union[str, Callable[[re.Match], str]]]] + +# ---- globals + +DEBUG = False +log = logging.getLogger("markdown") + +DEFAULT_TAB_WIDTH = 4 + + +SECRET_SALT = bytes(randint(0, 1000000)) +# MD5 function was previously used for this; the "md5" prefix was kept for +# backwards compatibility. +def _hash_text(s: str) -> str: + return 'md5-' + sha256(SECRET_SALT + s.encode("utf-8")).hexdigest()[32:] + +# Table of hash values for escaped characters: +g_escape_table = dict([(ch, _hash_text(ch)) + for ch in '\\`*_{}[]()>#+-.!']) + +# Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin: +# http://bumppo.net/projects/amputator/ +_AMPERSAND_RE = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)') + + +# ---- exceptions +class MarkdownError(Exception): + pass + + +# ---- public api + +def markdown_path( + path: str, + encoding: str = "utf-8", + html4tags: bool = False, + tab_width: int = DEFAULT_TAB_WIDTH, + safe_mode: Optional[_safe_mode] = None, + extras: Optional[_extras_param] = None, + link_patterns: Optional[_link_patterns] = None, + footnote_title: Optional[str] = None, + footnote_return_symbol: Optional[str] = None, + use_file_vars: bool = False +) -> 'UnicodeWithAttrs': + fp = codecs.open(path, 'r', encoding) + text = fp.read() + fp.close() + return Markdown(html4tags=html4tags, tab_width=tab_width, + safe_mode=safe_mode, extras=extras, + link_patterns=link_patterns, + footnote_title=footnote_title, + footnote_return_symbol=footnote_return_symbol, + use_file_vars=use_file_vars).convert(text) + + +def markdown( + text: str, + html4tags: bool = False, + tab_width: int = DEFAULT_TAB_WIDTH, + safe_mode: Optional[_safe_mode] = None, + extras: Optional[_extras_param] = None, + link_patterns: Optional[_link_patterns] = None, + footnote_title: Optional[str] = None, + footnote_return_symbol: Optional[str] =None, + use_file_vars: bool = False, + cli: bool = False +) -> 'UnicodeWithAttrs': + return Markdown(html4tags=html4tags, tab_width=tab_width, + safe_mode=safe_mode, extras=extras, + link_patterns=link_patterns, + footnote_title=footnote_title, + footnote_return_symbol=footnote_return_symbol, + use_file_vars=use_file_vars, cli=cli).convert(text) + + +class Stage(IntEnum): + PREPROCESS = auto() + HASH_HTML = auto() + LINK_DEFS = auto() + + BLOCK_GAMUT = auto() + HEADERS = auto() + LISTS = auto() + CODE_BLOCKS = auto() + BLOCK_QUOTES = auto() + PARAGRAPHS = auto() + + SPAN_GAMUT = auto() + CODE_SPANS = auto() + ESCAPE_SPECIAL = auto() + LINKS = auto() # and auto links + ITALIC_AND_BOLD = auto() + + POSTPROCESS = auto() + UNHASH_HTML = auto() + + +def mark_stage(stage: Stage): + ''' + Decorator that handles executing relevant `Extra`s before and after this `Stage` executes. + ''' + def wrapper(func): + @functools.wraps(func) + def inner(md: 'Markdown', text, *args, **kwargs): + md.stage = stage + # set "order" prop so extras can tell if they're being invoked before/after the stage + md.order = stage - 0.5 + + if stage in Extra._exec_order: + for klass in Extra._exec_order[stage][0]: + if klass.name not in md.extra_classes: + continue + extra = md.extra_classes[klass.name] + if extra.test(text): + text = extra.run(text) + + md.order = stage + text = func(md, text, *args, **kwargs) + md.order = stage + 0.5 + + if stage in Extra._exec_order: + for klass in Extra._exec_order[stage][1]: + if klass.name not in md.extra_classes: + continue + extra = md.extra_classes[klass.name] + if extra.test(text): + text = extra.run(text) + + return text + + return inner + + return wrapper + + +class Markdown(object): + # The dict of "extras" to enable in processing -- a mapping of + # extra name to argument for the extra. Most extras do not have an + # argument, in which case the value is None. + # + # This can be set via (a) subclassing and (b) the constructor + # "extras" argument. + extras: _extras_dict + # dict of `Extra` names and associated class instances, populated during _setup_extras + extra_classes: Dict[str, 'Extra'] + + urls: Dict[str, str] + titles: Dict[str, str] + html_blocks: Dict[str, str] + html_spans: Dict[str, str] + html_removed_text: str = "{(#HTML#)}" # placeholder removed text that does not trigger bold + html_removed_text_compat: str = "[HTML_REMOVED]" # for compat with markdown.py + safe_mode: Optional[_safe_mode] + + _toc: List[Tuple[int, str, str]] + + # Used to track when we're inside an ordered or unordered list + # (see _ProcessListItems() for details): + list_level = 0 + + stage: Stage + '''Current "stage" of markdown conversion taking place''' + order: float + ''' + Same as `Stage` but will be +/- 0.5 of the value of `Stage`. + This allows extras to check if they are running before or after a particular stage + with `if md.order < md.stage`. + ''' + + _ws_only_line_re = re.compile(r"^[ \t]+$", re.M) + + def __init__( + self, + html4tags: bool = False, + tab_width: int = DEFAULT_TAB_WIDTH, + safe_mode: Optional[_safe_mode] = None, + extras: Optional[_extras_param] = None, + link_patterns: Optional[_link_patterns] = None, + footnote_title: Optional[str] = None, + footnote_return_symbol: Optional[str] = None, + use_file_vars: bool = False, + cli: bool = False + ): + if html4tags: + self.empty_element_suffix = ">" + else: + self.empty_element_suffix = " />" + self.tab_width = tab_width + self.tab = tab_width * " " + + # For compatibility with earlier markdown2.py and with + # markdown.py's safe_mode being a boolean, + # safe_mode == True -> "replace" + if safe_mode is True: + self.safe_mode = "replace" + else: + self.safe_mode = safe_mode + + # Massaging and building the "extras" info. + if getattr(self, 'extras', None) is None: + self.extras = {} + elif not isinstance(self.extras, dict): + # inheriting classes may set `self.extras` as List[str]. + # we can't allow it through type hints but we can convert it + self.extras = dict([(e, None) for e in self.extras]) # type:ignore + + if extras: + if not isinstance(extras, dict): + extras = dict([(e, None) for e in extras]) + self.extras.update(extras) + assert isinstance(self.extras, dict) + + if "toc" in self.extras: + if "header-ids" not in self.extras: + self.extras["header-ids"] = None # "toc" implies "header-ids" + + if self.extras["toc"] is None: + self._toc_depth = 6 + else: + self._toc_depth = self.extras["toc"].get("depth", 6) + + if 'header-ids' in self.extras: + if not isinstance(self.extras['header-ids'], dict): + self.extras['header-ids'] = { + 'mixed': False, + 'prefix': self.extras['header-ids'], + 'reset-count': True + } + + if 'break-on-newline' in self.extras: + self.extras.setdefault('breaks', {}) + self.extras['breaks']['on_newline'] = True + + if 'link-patterns' in self.extras: + # allow link patterns via extras dict without kwarg explicitly set + link_patterns = link_patterns or self.extras['link-patterns'] + if link_patterns is None: + # if you have specified that the link-patterns extra SHOULD + # be used (via self.extras) but you haven't provided anything + # via the link_patterns argument then an error is raised + raise MarkdownError("If the 'link-patterns' extra is used, an argument for 'link_patterns' is required") + self.extras['link-patterns'] = link_patterns + + self._instance_extras = self.extras.copy() + self.link_patterns = link_patterns + self.footnote_title = footnote_title + self.footnote_return_symbol = footnote_return_symbol + self.use_file_vars = use_file_vars + self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M) + self.cli = cli + + self._escape_table = g_escape_table.copy() + self._code_table = {} + if "smarty-pants" in self.extras: + self._escape_table['"'] = _hash_text('"') + self._escape_table["'"] = _hash_text("'") + + def reset(self): + self.urls = {} + self.titles = {} + self.html_blocks = {} + self.html_spans = {} + self.list_level = 0 + self.extras = self._instance_extras.copy() + self._setup_extras() + self._toc = [] + + def _setup_extras(self): + if "footnotes" in self.extras: + # order of insertion matters for footnotes. Use ordered dict for Python < 3.7 + # https://docs.python.org/3/whatsnew/3.7.html#summary-release-highlights + self.footnotes = OrderedDict() + self.footnote_ids = [] + if "header-ids" in self.extras: + if not hasattr(self, '_count_from_header_id') or self.extras['header-ids'].get('reset-count', False): + self._count_from_header_id = defaultdict(int) + if "metadata" in self.extras: + self.metadata: Dict[str, Any] = {} + + self.extra_classes = {} + for name, klass in Extra._registry.items(): + if name not in self.extras: + continue + self.extra_classes[name] = klass(self, (self.extras.get(name, {}))) + + # Per "rel" + # should only be used in tags with an "href" attribute. + + # Opens the linked document in a new window or tab + # should only used in tags with an "href" attribute. + # same with _a_nofollow + _a_nofollow_or_blank_links = re.compile(r""" + <(a) + ( + [^>]* + href= # href is required + ['"]? # HTML5 attribute values do not have to be quoted + [^#'"] # We don't want to match href values that start with # (like footnotes) + ) + """, + re.IGNORECASE | re.VERBOSE + ) + + def convert(self, text: str) -> 'UnicodeWithAttrs': + """Convert the given text.""" + # Main function. The order in which other subs are called here is + # essential. Link and image substitutions need to happen before + # _EscapeSpecialChars(), so that any *'s or _'s in the + # and tags get encoded. + + # Clear the global hashes. If we don't clear these, you get conflicts + # from other articles when generating a page which contains more than + # one article (e.g. an index page that shows the N most recent + # articles): + self.reset() + + if not isinstance(text, str): + # TODO: perhaps shouldn't presume UTF-8 for string input? + text = str(text, 'utf-8') + + if self.use_file_vars: + # Look for emacs-style file variable hints. + text = self._emacs_oneliner_vars_pat.sub(self._emacs_vars_oneliner_sub, text) + emacs_vars = self._get_emacs_vars(text) + if "markdown-extras" in emacs_vars: + splitter = re.compile("[ ,]+") + for e in splitter.split(emacs_vars["markdown-extras"]): + if '=' in e: + ename, earg = e.split('=', 1) + try: + earg = int(earg) + except ValueError: + pass + else: + ename, earg = e, None + self.extras[ename] = earg + + self._setup_extras() + + # Standardize line endings: + text = text.replace("\r\n", "\n") + text = text.replace("\r", "\n") + + # Make sure $text ends with a couple of newlines: + text += "\n\n" + + # Convert all tabs to spaces. + text = self._detab(text) + + # Strip any lines consisting only of spaces and tabs. + # This makes subsequent regexen easier to write, because we can + # match consecutive blank lines with /\n+/ instead of something + # contorted like /[ \t]*\n+/ . + text = self._ws_only_line_re.sub("", text) + + # strip metadata from head and extract + if "metadata" in self.extras: + text = self._extract_metadata(text) + + text = self.preprocess(text) + + if self.safe_mode: + text = self._hash_html_spans(text) + + # Turn block-level HTML blocks into hash entries + text = self._hash_html_blocks(text, raw=True) + + # Strip link definitions, store in hashes. + if "footnotes" in self.extras: + # Must do footnotes first because an unlucky footnote defn + # looks like a link defn: + # [^4]: this "looks like a link defn" + text = self._strip_footnote_definitions(text) + text = self._strip_link_definitions(text) + + text = self._run_block_gamut(text) + + if "footnotes" in self.extras: + text = self._add_footnotes(text) + + text = self.postprocess(text) + + text = self._unescape_special_chars(text) + + if self.safe_mode: + text = self._unhash_html_spans(text) + # return the removed text warning to its markdown.py compatible form + text = text.replace(self.html_removed_text, self.html_removed_text_compat) + + do_target_blank_links = "target-blank-links" in self.extras + do_nofollow_links = "nofollow" in self.extras + + if do_target_blank_links and do_nofollow_links: + text = self._a_nofollow_or_blank_links.sub(r'<\1 rel="nofollow noopener" target="_blank"\2', text) + elif do_target_blank_links: + text = self._a_nofollow_or_blank_links.sub(r'<\1 rel="noopener" target="_blank"\2', text) + elif do_nofollow_links: + text = self._a_nofollow_or_blank_links.sub(r'<\1 rel="nofollow"\2', text) + + if "toc" in self.extras and self._toc: + if self.extras['header-ids'].get('mixed'): + # TOC will only be out of order if mixed headers is enabled + def toc_sort(entry): + '''Sort the TOC by order of appearance in text''' + match = re.search( + # header tag, any attrs, the ID, any attrs, the text, close tag + r'^<(h%d).*?id=(["\'])%s\2.*>%s$' % (entry[0], entry[1], re.escape(entry[2])), + text, re.M + ) + return match.start() if match else 0 + + self._toc.sort(key=toc_sort) + self._toc_html = calculate_toc_html(self._toc) + + # Prepend toc html to output + if self.cli or (self.extras['toc'] is not None and self.extras['toc'].get('prepend', False)): + text = '{}\n{}'.format(self._toc_html, text) + + text += "\n" + + # Attach attrs to output + rv = UnicodeWithAttrs(text) + + if "toc" in self.extras and self._toc: + rv.toc_html = self._toc_html + + if "metadata" in self.extras: + rv.metadata = self.metadata + return rv + + @mark_stage(Stage.POSTPROCESS) + def postprocess(self, text: str) -> str: + """A hook for subclasses to do some postprocessing of the html, if + desired. This is called before unescaping of special chars and + unhashing of raw HTML spans. + """ + return text + + @mark_stage(Stage.PREPROCESS) + def preprocess(self, text: str) -> str: + """A hook for subclasses to do some preprocessing of the Markdown, if + desired. This is called after basic formatting of the text, but prior + to any extras, safe mode, etc. processing. + """ + return text + + # Is metadata if the content starts with optional '---'-fenced `key: value` + # pairs. E.g. (indented for presentation): + # --- + # foo: bar + # another-var: blah blah + # --- + # # header + # or: + # foo: bar + # another-var: blah blah + # + # # header + _meta_data_pattern = re.compile(r''' + ^{0}( # optional opening fence + (?: + {1}:(?:\n+[ \t]+.*)+ # indented lists + )|(?: + (?:{1}:\s+>(?:\n\s+.*)+?) # multiline long descriptions + (?=\n{1}:\s*.*\n|\s*\Z) # match up until the start of the next key:value definition or the end of the input text + )|(?: + {1}:(?! >).*\n? # simple key:value pair, leading spaces allowed + ) + ){0} # optional closing fence + '''.format(r'(?:---[\ \t]*\n)?', r'[\S \t]*\w[\S \t]*\s*'), re.MULTILINE | re.VERBOSE + ) + + _key_val_list_pat = re.compile( + r"^-(?:[ \t]*([^\n]*)(?:[ \t]*[:-][ \t]*(\S+))?)(?:\n((?:[ \t]+[^\n]+\n?)+))?", + re.MULTILINE, + ) + _key_val_dict_pat = re.compile( + r"^([^:\n]+)[ \t]*:[ \t]*([^\n]*)(?:((?:\n[ \t]+[^\n]+)+))?", re.MULTILINE + ) # grp0: key, grp1: value, grp2: multiline value + _meta_data_fence_pattern = re.compile(r'^---[\ \t]*\n', re.MULTILINE) + _meta_data_newline = re.compile("^\n", re.MULTILINE) + + def _extract_metadata(self, text: str) -> str: + if text.startswith("---"): + fence_splits = re.split(self._meta_data_fence_pattern, text, maxsplit=2) + metadata_content = fence_splits[1] + tail = fence_splits[2] + else: + metadata_split = re.split(self._meta_data_newline, text, maxsplit=1) + metadata_content = metadata_split[0] + tail = metadata_split[1] + + # _meta_data_pattern only has one capturing group, so we can assume + # the returned type to be list[str] + match: List[str] = re.findall(self._meta_data_pattern, metadata_content) + if not match: + return text + + def parse_structured_value(value: str) -> Union[List[Any], Dict[str, Any]]: + vs = value.lstrip() + vs = value.replace(v[: len(value) - len(vs)], "\n")[1:] + + # List + if vs.startswith("-"): + r: List[Any] = [] + # the regex used has multiple capturing groups, so + # returned type from findall will be List[List[str]] + match: List[str] + for match in re.findall(self._key_val_list_pat, vs): + if match[0] and not match[1] and not match[2]: + r.append(match[0].strip()) + elif match[0] == ">" and not match[1] and match[2]: + r.append(match[2].strip()) + elif match[0] and match[1]: + r.append({match[0].strip(): match[1].strip()}) + elif not match[0] and not match[1] and match[2]: + r.append(parse_structured_value(match[2])) + else: + # Broken case + pass + + return r + + # Dict + else: + return { + match[0].strip(): ( + match[1].strip() + if match[1] + else parse_structured_value(match[2]) + ) + for match in re.findall(self._key_val_dict_pat, vs) + } + + for item in match: + + k, v = item.split(":", 1) + + # Multiline value + if v[:3] == " >\n": + self.metadata[k.strip()] = _dedent(v[3:]).strip() + + # Empty value + elif v == "\n": + self.metadata[k.strip()] = "" + + # Structured value + elif v[0] == "\n": + self.metadata[k.strip()] = parse_structured_value(v) + + # Simple value + else: + self.metadata[k.strip()] = v.strip() + + return tail + + _emacs_oneliner_vars_pat = re.compile(r"((?:)?)", re.UNICODE) + # This regular expression is intended to match blocks like this: + # PREFIX Local Variables: SUFFIX + # PREFIX mode: Tcl SUFFIX + # PREFIX End: SUFFIX + # Some notes: + # - "[ \t]" is used instead of "\s" to specifically exclude newlines + # - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does + # not like anything other than Unix-style line terminators. + _emacs_local_vars_pat = re.compile(r"""^ + (?P(?:[^\r\n|\n|\r])*?) + [\ \t]*Local\ Variables:[\ \t]* + (?P.*?)(?:\r\n|\n|\r) + (?P.*?\1End:) + """, re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE) + + def _emacs_vars_oneliner_sub(self, match: re.Match) -> str: + if match.group(1).strip() == '-*-' and match.group(4).strip() == '-*-': + lead_ws = re.findall(r'^\s*', match.group(1))[0] + tail_ws = re.findall(r'\s*$', match.group(4))[0] + return '%s%s' % (lead_ws, '-*-', match.group(2).strip(), '-*-', tail_ws) + + start, end = match.span() + return match.string[start: end] + + def _get_emacs_vars(self, text: str) -> Dict[str, str]: + """Return a dictionary of emacs-style local variables. + + Parsing is done loosely according to this spec (and according to + some in-practice deviations from this): + http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables + """ + emacs_vars = {} + SIZE = pow(2, 13) # 8kB + + # Search near the start for a '-*-'-style one-liner of variables. + head = text[:SIZE] + if "-*-" in head: + match = self._emacs_oneliner_vars_pat.search(head) + if match: + emacs_vars_str = match.group(2) + assert '\n' not in emacs_vars_str + emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';') + if s.strip()] + if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]: + # While not in the spec, this form is allowed by emacs: + # -*- Tcl -*- + # where the implied "variable" is "mode". This form + # is only allowed if there are no other variables. + emacs_vars["mode"] = emacs_var_strs[0].strip() + else: + for emacs_var_str in emacs_var_strs: + try: + variable, value = emacs_var_str.strip().split(':', 1) + except ValueError: + log.debug("emacs variables error: malformed -*- " + "line: %r", emacs_var_str) + continue + # Lowercase the variable name because Emacs allows "Mode" + # or "mode" or "MoDe", etc. + emacs_vars[variable.lower()] = value.strip() + + tail = text[-SIZE:] + if "Local Variables" in tail: + match = self._emacs_local_vars_pat.search(tail) + if match: + prefix = match.group("prefix") + suffix = match.group("suffix") + lines = match.group("content").splitlines(False) + # print "prefix=%r, suffix=%r, content=%r, lines: %s"\ + # % (prefix, suffix, match.group("content"), lines) + + # Validate the Local Variables block: proper prefix and suffix + # usage. + for i, line in enumerate(lines): + if not line.startswith(prefix): + log.debug("emacs variables error: line '%s' " + "does not use proper prefix '%s'" + % (line, prefix)) + return {} + # Don't validate suffix on last line. Emacs doesn't care, + # neither should we. + if i != len(lines)-1 and not line.endswith(suffix): + log.debug("emacs variables error: line '%s' " + "does not use proper suffix '%s'" + % (line, suffix)) + return {} + + # Parse out one emacs var per line. + continued_for = None + for line in lines[:-1]: # no var on the last line ("PREFIX End:") + if prefix: + line = line[len(prefix):] # strip prefix + if suffix: + line = line[:-len(suffix)] # strip suffix + line = line.strip() + if continued_for: + variable = continued_for + if line.endswith('\\'): + line = line[:-1].rstrip() + else: + continued_for = None + emacs_vars[variable] += ' ' + line + else: + try: + variable, value = line.split(':', 1) + except ValueError: + log.debug("local variables error: missing colon " + "in local variables entry: '%s'" % line) + continue + # Do NOT lowercase the variable name, because Emacs only + # allows "mode" (and not "Mode", "MoDe", etc.) in this block. + value = value.strip() + if value.endswith('\\'): + value = value[:-1].rstrip() + continued_for = variable + else: + continued_for = None + emacs_vars[variable] = value + + # Unquote values. + for var, val in list(emacs_vars.items()): + if len(val) > 1 and (val.startswith('"') and val.endswith('"') + or val.startswith('"') and val.endswith('"')): + emacs_vars[var] = val[1:-1] + + return emacs_vars + + def _detab_line(self, line: str) -> str: + r"""Recusively convert tabs to spaces in a single line. + + Called from _detab().""" + if '\t' not in line: + return line + chunk1, chunk2 = line.split('\t', 1) + chunk1 += (' ' * (self.tab_width - len(chunk1) % self.tab_width)) + output = chunk1 + chunk2 + return self._detab_line(output) + + def _detab(self, text: str) -> str: + r"""Iterate text line by line and convert tabs to spaces. + + >>> m = Markdown() + >>> m._detab("\tfoo") + ' foo' + >>> m._detab(" \tfoo") + ' foo' + >>> m._detab("\t foo") + ' foo' + >>> m._detab(" foo") + ' foo' + >>> m._detab(" foo\n\tbar\tblam") + ' foo\n bar blam' + """ + if '\t' not in text: + return text + output = [] + for line in text.splitlines(): + output.append(self._detab_line(line)) + return '\n'.join(output) + + # I broke out the html5 tags here and add them to _block_tags_a and + # _block_tags_b. This way html5 tags are easy to keep track of. + _html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption' + + _block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del|style|html|head|body' + _block_tags_a += _html5tags + + _strict_tag_block_re = re.compile(r""" + ( # save in \1 + ^ # start of line (with re.M) + <(%s) # start tag = \2 + \b # word break + (.*\n)*? # any number of lines, minimally matching + # the matching end tag + [ \t]* # trailing spaces/tabs + (?=\n+|\Z) # followed by a newline or end of document + ) + """ % _block_tags_a, + re.X | re.M) + + _block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math' + _block_tags_b += _html5tags + + _span_tags = ( + 'a|abbr|acronym|b|bdo|big|br|button|cite|code|dfn|em|i|img|input|kbd|label|map|object|output|q' + '|samp|script|select|small|span|strong|sub|sup|textarea|time|tt|var' + ) + + _liberal_tag_block_re = re.compile(r""" + ( # save in \1 + ^ # start of line (with re.M) + <(%s) # start tag = \2 + \b # word break + (.*\n)*? # any number of lines, minimally matching + .* # the matching end tag + [ \t]* # trailing spaces/tabs + (?=\n+|\Z) # followed by a newline or end of document + ) + """ % _block_tags_b, + re.X | re.M) + + _html_markdown_attr_re = re.compile( + r'''\s+markdown=("1"|'1')''') + def _hash_html_block_sub( + self, + match: Union[re.Match, str], + raw: bool = False + ) -> str: + if isinstance(match, str): + html = match + tag = None + else: + html = match.group(1) + try: + tag = match.group(2) + except IndexError: + tag = None + + if not tag: + m = re.match(r'.*?<(\S).*?\s*>', html) + # tag shouldn't be none but make the assertion for type checker + assert m is not None + tag = m.group(1) + + if raw and self.safe_mode: + html = self._sanitize_html(html) + elif 'markdown-in-html' in self.extras and 'markdown=' in html: + first_line = html.split('\n', 1)[0] + m = self._html_markdown_attr_re.search(first_line) + if m: + lines = html.split('\n') + # if MD is on same line as opening tag then split across two lines + lines = list(filter(None, (re.split(r'(.*?<%s.*markdown=.*?>)' % tag, lines[0])))) + lines[1:] + # if MD on same line as closing tag, split across two lines + lines = lines[:-1] + list(filter(None, re.split(r'(\s*?.*?$)' % tag, lines[-1]))) + # extract key sections of the match + first_line = lines[0] + middle = '\n'.join(lines[1:-1]) + last_line = lines[-1] + # remove `markdown="1"` attr from tag + first_line = first_line[:m.start()] + first_line[m.end():] + # hash the HTML segments to protect them + f_key = _hash_text(first_line) + self.html_blocks[f_key] = first_line + l_key = _hash_text(last_line) + self.html_blocks[l_key] = last_line + return ''.join(["\n\n", f_key, + "\n\n", middle, "\n\n", + l_key, "\n\n"]) + elif self.extras.get('header-ids', {}).get('mixed') and self._h_tag_re.match(html): + html = self._h_tag_re.sub(self._h_tag_sub, html) + key = _hash_text(html) + self.html_blocks[key] = html + return "\n\n" + key + "\n\n" + + @mark_stage(Stage.HASH_HTML) + def _hash_html_blocks(self, text: str, raw: bool = False) -> str: + """Hashify HTML blocks + + We only want to do this for block-level HTML tags, such as headers, + lists, and tables. That's because we still want to wrap

s around + "paragraphs" that are wrapped in non-block-level tags, such as anchors, + phrase emphasis, and spans. The list of tags we're looking for is + hard-coded. + + @param raw {boolean} indicates if these are raw HTML blocks in + the original source. It makes a difference in "safe" mode. + """ + if '<' not in text: + return text + + # Pass `raw` value into our calls to self._hash_html_block_sub. + hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw) + + # First, look for nested blocks, e.g.: + #

+ #
+ # tags for inner block must be indented. + #
+ #
+ # + # The outermost tags must start at the left margin for this to match, and + # the inner nested divs must be indented. + # We need to do this before the next, more liberal match, because the next + # match will start at the first `
` and stop at the first `
`. + text = self._strict_tag_block_sub(text, self._block_tags_a, hash_html_block_sub) + + # Now match more liberally, simply from `\n` to `\n` + text = self._liberal_tag_block_re.sub(hash_html_block_sub, text) + + # now do the same for spans that are acting like blocks + # eg: an anchor split over multiple lines for readability + text = self._strict_tag_block_sub( + text, self._span_tags, + # inline elements can't contain block level elements, so only span gamut is required + lambda t: hash_html_block_sub(self._run_span_gamut(t)) + ) + + # Special case just for
. It was easier to make a special + # case than to make the other regex more complicated. + if "", start_idx) + 3 + except ValueError: + break + + # Start position for next comment block search. + start = end_idx + + # Validate whitespace before comment. + if start_idx: + # - Up to `tab_width - 1` spaces before start_idx. + for i in range(self.tab_width - 1): + if text[start_idx - 1] != ' ': + break + start_idx -= 1 + if start_idx == 0: + break + # - Must be preceded by 2 newlines or hit the start of + # the document. + if start_idx == 0: + pass + elif start_idx == 1 and text[0] == '\n': + start_idx = 0 # to match minute detail of Markdown.pl regex + elif text[start_idx-2:start_idx] == '\n\n': + pass + else: + break + + # Validate whitespace after comment. + # - Any number of spaces and tabs. + while end_idx < len(text): + if text[end_idx] not in ' \t': + break + end_idx += 1 + # - Must be following by 2 newlines or hit end of text. + if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'): + continue + + # Escape and hash (must match `_hash_html_block_sub`). + html = text[start_idx:end_idx] + if raw and self.safe_mode: + html = self._sanitize_html(html) + key = _hash_text(html) + self.html_blocks[key] = html + text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:] + + if "xml" in self.extras: + # Treat XML processing instructions and namespaced one-liner + # tags as if they were block HTML tags. E.g., if standalone + # (i.e. are their own paragraph), the following do not get + # wrapped in a

tag: + # + # + # + _xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width) + text = _xml_oneliner_re.sub(hash_html_block_sub, text) + + return text + + def _strict_tag_block_sub( + self, + text: str, + html_tags_re: str, + callback: Callable[[str], str], + allow_indent: bool = False + ) -> str: + ''' + Finds and substitutes HTML blocks within blocks of text + + Args: + text: the text to search + html_tags_re: a regex pattern of HTML block tags to match against. + For example, `Markdown._block_tags_a` + callback: callback function that receives the found HTML text block and returns a new str + allow_indent: allow matching HTML blocks that are not completely outdented + ''' + tag_count = 0 + current_tag = html_tags_re + block = '' + result = '' + + for chunk in text.splitlines(True): + is_markup = re.match( + r'^(\s{0,%s})(?:(?=))?(?)' % ('' if allow_indent else '0', current_tag), chunk + ) + block += chunk + + if is_markup: + if chunk.startswith('%s bool: + # super basic check if number of open tags == number of closing tags + return len(re.findall('<%s(?:.*?)>' % tag_name, text)) == len(re.findall('' % tag_name, text)) + + @mark_stage(Stage.LINK_DEFS) + def _strip_link_definitions(self, text: str) -> str: + # Strips link definitions from text, stores the URLs and titles in + # hash references. + less_than_tab = self.tab_width - 1 + + # Link defs are in the form: + # [id]: url "optional title" + _link_def_re = re.compile(r""" + ^[ ]{0,%d}\[(.+)\]: # id = \1 + [ \t]* + \n? # maybe *one* newline + [ \t]* + ? # url = \2 + [ \t]* + (?: + \n? # maybe one newline + [ \t]* + (?<=\s) # lookbehind for whitespace + ['"(] + ([^\n]*) # title = \3 + ['")] + [ \t]* + )? # title is optional + (?:\n+|\Z) + """ % less_than_tab, re.X | re.M | re.U) + return _link_def_re.sub(self._extract_link_def_sub, text) + + def _extract_link_def_sub(self, match: re.Match) -> str: + id, url, title = match.groups() + key = id.lower() # Link IDs are case-insensitive + self.urls[key] = self._encode_amps_and_angles(url) + if title: + self.titles[key] = title + return "" + + def _extract_footnote_def_sub(self, match: re.Match) -> str: + id, text = match.groups() + text = _dedent(text, skip_first_line=not text.startswith('\n')).strip() + normed_id = re.sub(r'\W', '-', id) + # Ensure footnote text ends with a couple newlines (for some + # block gamut matches). + self.footnotes[normed_id] = text + "\n\n" + return "" + + def _strip_footnote_definitions(self, text: str) -> str: + """A footnote definition looks like this: + + [^note-id]: Text of the note. + + May include one or more indented paragraphs. + + Where, + - The 'note-id' can be pretty much anything, though typically it + is the number of the footnote. + - The first paragraph may start on the next line, like so: + + [^note-id]: + Text of the note. + """ + less_than_tab = self.tab_width - 1 + footnote_def_re = re.compile(r''' + ^[ ]{0,%d}\[\^(.+)\]: # id = \1 + [ \t]* + ( # footnote text = \2 + # First line need not start with the spaces. + (?:\s*.*\n+) + (?: + (?:[ ]{%d} | \t) # Subsequent lines must be indented. + .*\n+ + )* + ) + # Lookahead for non-space at line-start, or end of doc. + (?:(?=^[ ]{0,%d}\S)|\Z) + ''' % (less_than_tab, self.tab_width, self.tab_width), + re.X | re.M) + return footnote_def_re.sub(self._extract_footnote_def_sub, text) + + _hr_re = re.compile(r'^[ ]{0,3}([-_*])[ ]{0,2}(\1[ ]{0,2}){2,}$', re.M) + + @mark_stage(Stage.BLOCK_GAMUT) + def _run_block_gamut(self, text: str) -> str: + # These are all the transformations that form block-level + # tags like paragraphs, headers, and list items. + + text = self._do_headers(text) + + # Do Horizontal Rules: + # On the number of spaces in horizontal rules: The spec is fuzzy: "If + # you wish, you may use spaces between the hyphens or asterisks." + # Markdown.pl 1.0.1's hr regexes limit the number of spaces between the + # hr chars to one or two. We'll reproduce that limit here. + hr = "\n tags around block-level tags. + text = self._hash_html_blocks(text) + + text = self._form_paragraphs(text) + + return text + + @mark_stage(Stage.SPAN_GAMUT) + def _run_span_gamut(self, text: str) -> str: + # These are all the transformations that occur *within* block-level + # tags like paragraphs, headers, and list items. + + text = self._do_code_spans(text) + + text = self._escape_special_chars(text) + + # Process anchor and image tags. + text = self._do_links(text) + + # Make links out of things like `` + # Must come after _do_links(), because you can use < and > + # delimiters in inline links like [this](). + text = self._do_auto_links(text) + + text = self._encode_amps_and_angles(text) + + text = self._do_italics_and_bold(text) + + # Do hard breaks + text = re.sub(r" {2,}\n(?!\<(?:\/?(ul|ol|li))\>)", " + | + # auto-link (e.g., ) + <[\w~:/?#\[\]@!$&'\(\)*+,;%=\.\\-]+> + | + # comment + | + <\?.*?\?> # processing instruction + ) + ) + """, re.X) + + @mark_stage(Stage.ESCAPE_SPECIAL) + def _escape_special_chars(self, text: str) -> str: + # Python markdown note: the HTML tokenization here differs from + # that in Markdown.pl, hence the behaviour for subtle cases can + # differ (I believe the tokenizer here does a better job because + # it isn't susceptible to unmatched '<' and '>' in HTML tags). + # Note, however, that '>' is not allowed in an auto-link URL + # here. + lead_escape_re = re.compile(r'^((?:\\\\)*(?!\\))') + escaped = [] + is_html_markup = False + for token in self._sorta_html_tokenize_re.split(text): + # check token is preceded by 0 or more PAIRS of escapes, because escape pairs + # escape themselves and don't affect the token + if is_html_markup and lead_escape_re.match(token): + # Within tags/HTML-comments/auto-links, encode * and _ + # so they don't conflict with their use in Markdown for + # italics and strong. We're replacing each such + # character with its corresponding MD5 checksum value; + # this is likely overkill, but it should prevent us from + # colliding with the escape values by accident. + escape_seq, token = lead_escape_re.split(token)[1:] or ('', token) + escaped.append( + escape_seq.replace('\\\\', self._escape_table['\\']) + + token.replace('*', self._escape_table['*']) + .replace('_', self._escape_table['_']) + ) + else: + escaped.append(self._encode_backslash_escapes(token.replace('\\<', '<'))) + is_html_markup = not is_html_markup + return ''.join(escaped) + + @mark_stage(Stage.HASH_HTML) + def _hash_html_spans(self, text: str) -> str: + # Used for safe_mode. + + def _is_auto_link(s): + if ':' in s and self._auto_link_re.match(s): + return True + elif '@' in s and self._auto_email_link_re.match(s): + return True + return False + + def _is_code_span(index, token): + try: + if token == '': + peek_tokens = split_tokens[index: index + 3] + elif token == '': + peek_tokens = split_tokens[index - 2: index + 1] + else: + return False + except IndexError: + return False + + return re.match(r'md5-[A-Fa-f0-9]{32}', ''.join(peek_tokens)) + + def _is_comment(token): + if self.safe_mode == 'replace': + # don't bother processing each section of comment in replace mode. Just do the whole thing + return + return re.match(r'()', token) + + def _hash(token): + key = _hash_text(token) + self.html_spans[key] = token + return key + + tokens = [] + split_tokens = self._sorta_html_tokenize_re.split(text) + is_html_markup = False + for index, token in enumerate(split_tokens): + if is_html_markup and not _is_auto_link(token) and not _is_code_span(index, token): + is_comment = _is_comment(token) + if is_comment: + tokens.append(_hash(self._sanitize_html(is_comment.group(1)))) + # sanitise but leave comment body intact for further markdown processing + tokens.append(self._sanitize_html(is_comment.group(2))) + tokens.append(_hash(self._sanitize_html(is_comment.group(3)))) + else: + tokens.append(_hash(self._sanitize_html(token))) + else: + tokens.append(self._encode_incomplete_tags(token)) + is_html_markup = not is_html_markup + return ''.join(tokens) + + def _unhash_html_spans(self, text: str) -> str: + for key, sanitized in list(self.html_spans.items()): + text = text.replace(key, sanitized) + return text + + def _sanitize_html(self, s: str) -> str: + if self.safe_mode == "replace": + return self.html_removed_text + elif self.safe_mode == "escape": + replacements = [ + ('&', '&'), + ('<', '<'), + ('>', '>'), + ] + for before, after in replacements: + s = s.replace(before, after) + return s + else: + raise MarkdownError("invalid value for 'safe_mode': %r (must be " + "'escape' or 'replace')" % self.safe_mode) + + _inline_link_title = re.compile(r''' + ( # \1 + [ \t]+ + (['"]) # quote char = \2 + (?P.*?) + \2 + )? # title is optional + \)$ + ''', re.X | re.S) + _tail_of_reference_link_re = re.compile(r''' + # Match tail of: [text][id] + [ ]? # one optional space + (?:\n[ ]*)? # one optional newline followed by spaces + \[ + (?P<id>.*?) + \] + ''', re.X | re.S) + + _whitespace = re.compile(r'\s*') + + _strip_anglebrackets = re.compile(r'<(.*)>.*') + + def _find_non_whitespace(self, text: str, start: int) -> int: + """Returns the index of the first non-whitespace character in text + after (and including) start + """ + match = self._whitespace.match(text, start) + return match.end() if match else len(text) + + def _find_balanced(self, text: str, start: int, open_c: str, close_c: str) -> int: + """Returns the index where the open_c and close_c characters balance + out - the same number of open_c and close_c are encountered - or the + end of string if it's reached before the balance point is found. + """ + i = start + l = len(text) + count = 1 + while count > 0 and i < l: + if text[i] == open_c: + count += 1 + elif text[i] == close_c: + count -= 1 + i += 1 + return i + + def _extract_url_and_title(self, text: str, start: int) -> Union[Tuple[str, str, int], Tuple[None, None, None]]: + """Extracts the url and (optional) title from the tail of a link""" + # text[start] equals the opening parenthesis + idx = self._find_non_whitespace(text, start+1) + if idx == len(text): + return None, None, None + end_idx = idx + has_anglebrackets = text[idx] == "<" + if has_anglebrackets: + end_idx = self._find_balanced(text, end_idx+1, "<", ">") + end_idx = self._find_balanced(text, end_idx, "(", ")") + match = self._inline_link_title.search(text, idx, end_idx) + if not match: + return None, None, None + url, title = text[idx:match.start()], match.group("title") + if has_anglebrackets: + url = self._strip_anglebrackets.sub(r'\1', url) + return url, title, end_idx + + # https://developer.mozilla.org/en-US/docs/web/http/basics_of_http/data_urls + # https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types + _data_url_re = re.compile(r''' + data: + # in format type/subtype;parameter=optional + (?P<mime>\w+/[\w+\.-]+(?:;\w+=[\w+\.-]+)?)? + # optional base64 token + (?P<token>;base64)? + ,(?P<data>.*) + ''', re.X) + + def _protect_url(self, url: str) -> str: + ''' + Function that passes a URL through `_html_escape_url` to remove any nasty characters, + and then hashes the now "safe" URL to prevent other safety mechanisms from tampering + with it (eg: escaping "&" in URL parameters) + ''' + data_url = self._data_url_re.match(url) + charset = None + if data_url is not None: + mime = data_url.group('mime') or '' + if mime.startswith('image/') and data_url.group('token') == ';base64': + charset='base64' + url = _html_escape_url(url, safe_mode=self.safe_mode, charset=charset) + key = _hash_text(url) + self._escape_table[url] = key + return key + + _safe_protocols = r'(?:https?|ftp):\/\/|(?:mailto|tel):' + + @property + def _safe_href(self): + ''' + _safe_href is adapted from pagedown's Markdown.Sanitizer.js + From: https://github.com/StackExchange/pagedown/blob/master/LICENSE.txt + Original Showdown code copyright (c) 2007 John Fraser + Modifications and bugfixes (c) 2009 Dana Robinson + Modifications and bugfixes (c) 2009-2014 Stack Exchange Inc. + ''' + safe = r'-\w' + # omitted ['"<>] for XSS reasons + less_safe = r'#/\.!#$%&\(\)\+,/:;=\?@\[\]^`\{\}\|~' + # dot seperated hostname, optional port number, not followed by protocol seperator + domain = r'(?:[%s]+(?:\.[%s]+)*)(?:(?<!tel):\d+/?)?(?![^:/]*:/*)' % (safe, safe) + fragment = r'[%s]*' % (safe + less_safe) + + return re.compile(r'^(?:(%s)?(%s)(%s)|(#|\.{,2}/)(%s))$' % (self._safe_protocols, domain, fragment, fragment), re.I) + + @mark_stage(Stage.LINKS) + def _do_links(self, text: str) -> str: + """Turn Markdown link shortcuts into XHTML <a> and <img> tags. + + This is a combination of Markdown.pl's _DoAnchors() and + _DoImages(). They are done together because that simplified the + approach. It was necessary to use a different approach than + Markdown.pl because of the lack of atomic matching support in + Python's regex engine used in $g_nested_brackets. + """ + MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24 + + # `anchor_allowed_pos` is used to support img links inside + # anchors, but not anchors inside anchors. An anchor's start + # pos must be `>= anchor_allowed_pos`. + anchor_allowed_pos = 0 + + curr_pos = 0 + while True: # Handle the next link. + # The next '[' is the start of: + # - an inline anchor: [text](url "title") + # - a reference anchor: [text][id] + # - an inline img: ![text](url "title") + # - a reference img: ![text][id] + # - a footnote ref: [^id] + # (Only if 'footnotes' extra enabled) + # - a footnote defn: [^id]: ... + # (Only if 'footnotes' extra enabled) These have already + # been stripped in _strip_footnote_definitions() so no + # need to watch for them. + # - a link definition: [id]: url "title" + # These have already been stripped in + # _strip_link_definitions() so no need to watch for them. + # - not markup: [...anything else... + try: + start_idx = text.index('[', curr_pos) + except ValueError: + break + text_length = len(text) + + # Find the matching closing ']'. + # Markdown.pl allows *matching* brackets in link text so we + # will here too. Markdown.pl *doesn't* currently allow + # matching brackets in img alt text -- we'll differ in that + # regard. + bracket_depth = 0 + for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL, + text_length)): + ch = text[p] + if ch == ']': + bracket_depth -= 1 + if bracket_depth < 0: + break + elif ch == '[': + bracket_depth += 1 + else: + # Closing bracket not found within sentinel length. + # This isn't markup. + curr_pos = start_idx + 1 + continue + link_text = text[start_idx+1:p] + + # Fix for issue 341 - Injecting XSS into link text + if self.safe_mode: + link_text = self._hash_html_spans(link_text) + link_text = self._unhash_html_spans(link_text) + + # Possibly a footnote ref? + if "footnotes" in self.extras and link_text.startswith("^"): + normed_id = re.sub(r'\W', '-', link_text[1:]) + if normed_id in self.footnotes: + self.footnote_ids.append(normed_id) + result = '<sup class="footnote-ref" id="fnref-%s">' \ + '<a href="#fn-%s">%s</a></sup>' \ + % (normed_id, normed_id, len(self.footnote_ids)) + text = text[:start_idx] + result + text[p+1:] + else: + # This id isn't defined, leave the markup alone. + curr_pos = p+1 + continue + + # Now determine what this is by the remainder. + p += 1 + + # Inline anchor or img? + if text[p:p + 1] == '(': # attempt at perf improvement + url, title, url_end_idx = self._extract_url_and_title(text, p) + if url is not None: + # Handle an inline anchor or img. + is_img = start_idx > 0 and text[start_idx-1] == "!" + if is_img: + start_idx -= 1 + + # We've got to encode these to avoid conflicting + # with italics/bold. + url = url.replace('*', self._escape_table['*']) \ + .replace('_', self._escape_table['_']) + if title: + title_str = ' title="%s"' % ( + _xml_escape_attr(title) + .replace('*', self._escape_table['*']) + .replace('_', self._escape_table['_'])) + else: + title_str = '' + if is_img: + img_class_str = self._html_class_str_from_tag("img") + result = '<img src="%s" alt="%s"%s%s%s' \ + % (self._protect_url(url), + _xml_escape_attr(link_text), + title_str, + img_class_str, + self.empty_element_suffix) + if "smarty-pants" in self.extras: + result = result.replace('"', self._escape_table['"']) + curr_pos = start_idx + len(result) + anchor_allowed_pos = start_idx + len(result) + text = text[:start_idx] + result + text[url_end_idx:] + elif start_idx >= anchor_allowed_pos: + safe_link = self._safe_href.match(url) + if self.safe_mode and not safe_link: + result_head = '<a href="#"%s>' % (title_str) + else: + result_head = '<a href="%s"%s>' % (self._protect_url(url), title_str) + result = '%s%s</a>' % (result_head, link_text) + if "smarty-pants" in self.extras: + result = result.replace('"', self._escape_table['"']) + # <img> allowed from curr_pos on, <a> from + # anchor_allowed_pos on. + curr_pos = start_idx + len(result_head) + anchor_allowed_pos = start_idx + len(result) + text = text[:start_idx] + result + text[url_end_idx:] + else: + # Anchor not allowed here. + curr_pos = start_idx + 1 + continue + + # Reference anchor or img? + else: + match = self._tail_of_reference_link_re.match(text, p) + if match: + # Handle a reference-style anchor or img. + is_img = start_idx > 0 and text[start_idx-1] == "!" + if is_img: + start_idx -= 1 + link_id = match.group("id").lower() + if not link_id: + link_id = link_text.lower() # for links like [this][] + if link_id in self.urls: + url = self.urls[link_id] + # We've got to encode these to avoid conflicting + # with italics/bold. + url = url.replace('*', self._escape_table['*']) \ + .replace('_', self._escape_table['_']) + title = self.titles.get(link_id) + if title: + title = _xml_escape_attr(title) \ + .replace('*', self._escape_table['*']) \ + .replace('_', self._escape_table['_']) + title_str = ' title="%s"' % title + else: + title_str = '' + if is_img: + img_class_str = self._html_class_str_from_tag("img") + result = '<img src="%s" alt="%s"%s%s%s' \ + % (self._protect_url(url), + _xml_escape_attr(link_text), + title_str, + img_class_str, + self.empty_element_suffix) + if "smarty-pants" in self.extras: + result = result.replace('"', self._escape_table['"']) + curr_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + elif start_idx >= anchor_allowed_pos: + if self.safe_mode and not self._safe_href.match(url): + result_head = '<a href="#"%s>' % (title_str) + else: + result_head = '<a href="%s"%s>' % (self._protect_url(url), title_str) + result = '%s%s</a>' % (result_head, link_text) + if "smarty-pants" in self.extras: + result = result.replace('"', self._escape_table['"']) + # <img> allowed from curr_pos on, <a> from + # anchor_allowed_pos on. + curr_pos = start_idx + len(result_head) + anchor_allowed_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + else: + # Anchor not allowed here. + curr_pos = start_idx + 1 + else: + # This id isn't defined, leave the markup alone. + # set current pos to end of link title and continue from there + curr_pos = p + continue + + # Otherwise, it isn't markup. + curr_pos = start_idx + 1 + + return text + + def header_id_from_text(self, + text: str, + prefix: str, + n: Optional[int] = None + ) -> str: + """Generate a header id attribute value from the given header + HTML content. + + This is only called if the "header-ids" extra is enabled. + Subclasses may override this for different header ids. + + @param text {str} The text of the header tag + @param prefix {str} The requested prefix for header ids. This is the + value of the "header-ids" extra key, if any. Otherwise, None. + @param n {int} (unused) The <hN> tag number, i.e. `1` for an <h1> tag. + @returns {str} The value for the header tag's "id" attribute. Return + None to not have an id attribute and to exclude this header from + the TOC (if the "toc" extra is specified). + """ + header_id = _slugify(text) + if prefix and isinstance(prefix, str): + header_id = prefix + '-' + header_id + + self._count_from_header_id[header_id] += 1 + if 0 == len(header_id) or self._count_from_header_id[header_id] > 1: + header_id += '-%s' % self._count_from_header_id[header_id] + + return header_id + + def _header_id_exists(self, text: str) -> bool: + header_id = _slugify(text) + prefix = self.extras['header-ids'].get('prefix') + if prefix and isinstance(prefix, str): + header_id = prefix + '-' + header_id + return header_id in self._count_from_header_id or header_id in map(lambda x: x[1], self._toc) + + def _toc_add_entry(self, level: int, id: str, name: str) -> None: + if level > self._toc_depth: + return + if self._toc is None: + self._toc = [] + self._toc.append((level, id, self._unescape_special_chars(name))) + + _h_re_base = r''' + (^(.+)[ \t]{0,99}\n(=+|-+)[ \t]*\n+) + | + (^(\#{1,6}) # \1 = string of #'s + [ \t]%s + (.+?) # \2 = Header text + [ \t]{0,99} + (?<!\\) # ensure not an escaped trailing '#' + \#* # optional closing #'s (not counted) + \n+ + ) + ''' + + _h_re = re.compile(_h_re_base % '*', re.X | re.M) + _h_re_tag_friendly = re.compile(_h_re_base % '+', re.X | re.M) + + def _h_sub(self, match: re.Match) -> str: + '''Handles processing markdown headers''' + if match.group(1) is not None and match.group(3) == "-": + return match.group(1) + elif match.group(1) is not None: + # Setext header + n = {"=": 1, "-": 2}[match.group(3)[0]] + header_group = match.group(2) + else: + # atx header + n = len(match.group(5)) + header_group = match.group(6) + + demote_headers = self.extras.get("demote-headers") + if demote_headers: + n = min(n + demote_headers, 6) + header_id_attr = "" + if "header-ids" in self.extras: + header_id = self.header_id_from_text(header_group, + self.extras["header-ids"].get('prefix'), n) + if header_id: + header_id_attr = ' id="%s"' % header_id + html = self._run_span_gamut(header_group) + if "toc" in self.extras and header_id: + self._toc_add_entry(n, header_id, html) + return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n) + + _h_tag_re = re.compile(r''' + ^<h([1-6])(.*)> # \1 tag num, \2 attrs + (.*) # \3 text + </h\1> + ''', re.X | re.M) + + def _h_tag_sub(self, match: re.Match) -> str: + '''Different to `_h_sub` in that this function handles existing HTML headers''' + text = match.string[match.start(): match.end()] + h_level = int(match.group(1)) + # extract id= attr from tag, trying to account for regex "misses" + id_attr = (re.match(r'.*?id=(\S+)?.*', match.group(2) or '') or '') + if id_attr: + # if id attr exists, extract that + id_attr = id_attr.group(1) or '' + id_attr = id_attr.strip('\'" ') + h_text = match.group(3) + + # check if header was already processed (ie: was a markdown header rather than HTML) + if id_attr and self._header_id_exists(id_attr): + return text + + # generate new header id if none existed + header_id = id_attr or self.header_id_from_text(h_text, self.extras['header-ids'].get('prefix'), h_level) + if "toc" in self.extras: + self._toc_add_entry(h_level, header_id, h_text) + if header_id and not id_attr: + # '<h[digit]' + new ID + '...' + return text[:3] + ' id="%s"' % header_id + text[3:] + return text + + @mark_stage(Stage.HEADERS) + def _do_headers(self, text: str) -> str: + # Setext-style headers: + # Header 1 + # ======== + # + # Header 2 + # -------- + + # atx-style headers: + # # Header 1 + # ## Header 2 + # ## Header 2 with closing hashes ## + # ... + # ###### Header 6 + + if 'tag-friendly' in self.extras: + return self._h_re_tag_friendly.sub(self._h_sub, text) + return self._h_re.sub(self._h_sub, text) + + _marker_ul_chars = '*+-' + _marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars + _marker_ul = '(?:[%s])' % _marker_ul_chars + _marker_ol = r'(?:\d+\.)' + + def _list_sub(self, match: re.Match) -> str: + lst = match.group(1) + lst_type = match.group(4) in self._marker_ul_chars and "ul" or "ol" + + if lst_type == 'ol' and match.group(4) != '1.': + # if list doesn't start at 1 then set the ol start attribute + lst_opts = ' start="%s"' % match.group(4)[:-1] + else: + lst_opts = '' + + lst_opts = lst_opts + self._html_class_str_from_tag(lst_type) + + result = self._process_list_items(lst) + if self.list_level: + return "<%s%s>\n%s</%s>\n" % (lst_type, lst_opts, result, lst_type) + else: + return "<%s%s>\n%s</%s>\n\n" % (lst_type, lst_opts, result, lst_type) + + @mark_stage(Stage.LISTS) + def _do_lists(self, text: str) -> str: + # Form HTML ordered (numbered) and unordered (bulleted) lists. + + # Iterate over each *non-overlapping* list match. + pos = 0 + while True: + # Find the *first* hit for either list style (ul or ol). We + # match ul and ol separately to avoid adjacent lists of different + # types running into each other (see issue #16). + hits = [] + for marker_pat in (self._marker_ul, self._marker_ol): + less_than_tab = self.tab_width - 1 + other_marker_pat = self._marker_ul if marker_pat == self._marker_ol else self._marker_ol + whole_list = r''' + ( # \1 = whole list + ( # \2 + ([ ]{0,%d}) # \3 = the indentation level of the list item marker + (%s) # \4 = first list item marker + [ \t]+ + (?!\ *\4\ ) # '- - - ...' isn't a list. See 'not_quite_a_list' test case. + ) + (?:.+?) + ( # \5 + \Z + | + \n{2,} + (?=\S) + (?! # Negative lookahead for another list item marker + [ \t]* + %s[ \t]+ + ) + | + \n+ + (?= + \3 # lookahead for a different style of list item marker + %s[ \t]+ + ) + ) + ) + ''' % (less_than_tab, marker_pat, marker_pat, other_marker_pat) + if self.list_level: # sub-list + list_re = re.compile("^"+whole_list, re.X | re.M | re.S) + else: + list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list, + re.X | re.M | re.S) + match = list_re.search(text, pos) + if match: + hits.append((match.start(), match)) + if not hits: + break + hits.sort() + match = hits[0][1] + start, end = match.span() + middle = self._list_sub(match) + text = text[:start] + middle + text[end:] + pos = start + len(middle) # start pos for next attempted match + + return text + + _list_item_re = re.compile(r''' + (\n)? # leading line = \1 + (^[ \t]*) # leading whitespace = \2 + (?P<marker>%s) [ \t]+ # list marker = \3 + ((?:.+?) # list item text = \4 + (\n{1,2})) # eols = \5 + (?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+)) + ''' % (_marker_any, _marker_any), + re.M | re.X | re.S) + + _task_list_item_re = re.compile(r''' + (\[[\ xX]\])[ \t]+ # tasklist marker = \1 + (.*) # list item text = \2 + ''', re.M | re.X | re.S) + + _task_list_warpper_str = r'<input type="checkbox" class="task-list-item-checkbox" %sdisabled> %s' + + def _task_list_item_sub(self, match: re.Match) -> str: + marker = match.group(1) + item_text = match.group(2) + if marker in ['[x]','[X]']: + return self._task_list_warpper_str % ('checked ', item_text) + elif marker == '[ ]': + return self._task_list_warpper_str % ('', item_text) + # returning None has same effect as returning empty str, but only + # one makes the type checker happy + return '' + + _last_li_endswith_two_eols = False + def _list_item_sub(self, match: re.Match) -> str: + item = match.group(4) + leading_line = match.group(1) + if leading_line or "\n\n" in item or self._last_li_endswith_two_eols: + item = self._uniform_outdent(item, min_outdent=' ', max_outdent=self.tab)[1] + item = self._run_block_gamut(item) + else: + # Recursion for sub-lists: + item = self._do_lists(self._uniform_outdent(item, min_outdent=' ')[1]) + if item.endswith('\n'): + item = item[:-1] + item = self._run_span_gamut(item) + self._last_li_endswith_two_eols = (len(match.group(5)) == 2) + + if "task_list" in self.extras: + item = self._task_list_item_re.sub(self._task_list_item_sub, item) + + return "<li>%s</li>\n" % item + + def _process_list_items(self, list_str: str) -> str: + # Process the contents of a single ordered or unordered list, + # splitting it into individual list items. + + # The $g_list_level global keeps track of when we're inside a list. + # Each time we enter a list, we increment it; when we leave a list, + # we decrement. If it's zero, we're not in a list anymore. + # + # We do this because when we're not inside a list, we want to treat + # something like this: + # + # I recommend upgrading to version + # 8. Oops, now this line is treated + # as a sub-list. + # + # As a single paragraph, despite the fact that the second line starts + # with a digit-period-space sequence. + # + # Whereas when we're inside a list (or sub-list), that line will be + # treated as the start of a sub-list. What a kludge, huh? This is + # an aspect of Markdown's syntax that's hard to parse perfectly + # without resorting to mind-reading. Perhaps the solution is to + # change the syntax rules such that sub-lists must start with a + # starting cardinal number; e.g. "1." or "a.". + self.list_level += 1 + self._last_li_endswith_two_eols = False + list_str = list_str.rstrip('\n') + '\n' + list_str = self._list_item_re.sub(self._list_item_sub, list_str) + self.list_level -= 1 + return list_str + + def _get_pygments_lexer(self, lexer_name: str): + ''' + Returns: + `pygments.Lexer` or None if a lexer matching `lexer_name` is + not found + ''' + try: + from pygments import lexers, util + except ImportError: + return None + try: + return lexers.get_lexer_by_name(lexer_name) + except util.ClassNotFound: + return None + + def _color_with_pygments( + self, + codeblock: str, + lexer, + **formatter_opts + ) -> str: + ''' + TODO: this function is only referenced by the `FencedCodeBlocks` + extra. May be worth moving over there + + Args: + codeblock: the codeblock to highlight + lexer (pygments.Lexer): lexer to use + formatter_opts: pygments HtmlFormatter options + ''' + import pygments + import pygments.formatters + + class HtmlCodeFormatter(pygments.formatters.HtmlFormatter): + def _wrap_code(self, inner): + """A function for use in a Pygments Formatter which + wraps in <code> tags. + """ + yield 0, "<code>" + for tup in inner: + yield tup + yield 0, "</code>" + + def _add_newline(self, inner): + # Add newlines around the inner contents so that _strict_tag_block_re matches the outer div. + yield 0, "\n" + yield from inner + yield 0, "\n" + + def wrap(self, source, outfile=None): + """Return the source with a code, pre, and div.""" + if outfile is None: + # pygments >= 2.12 + return self._add_newline(self._wrap_pre(self._wrap_code(source))) + else: + # pygments < 2.12 + return self._wrap_div(self._add_newline(self._wrap_pre(self._wrap_code(source)))) + + formatter_opts.setdefault("cssclass", "codehilite") + formatter = HtmlCodeFormatter(**formatter_opts) + return pygments.highlight(codeblock, lexer, formatter) + + def _code_block_sub(self, match: re.Match) -> str: + codeblock = match.group(1) + codeblock = self._outdent(codeblock) + codeblock = self._detab(codeblock) + codeblock = codeblock.lstrip('\n') # trim leading newlines + codeblock = codeblock.rstrip() # trim trailing whitespace + + pre_class_str = self._html_class_str_from_tag("pre") + code_class_str = self._html_class_str_from_tag("code") + + codeblock = self._encode_code(codeblock) + + return "\n<pre%s><code%s>%s\n</code></pre>\n" % ( + pre_class_str, code_class_str, codeblock) + + def _html_class_str_from_tag(self, tag: str) -> str: + """Get the appropriate ' class="..."' string (note the leading + space), if any, for the given tag. + """ + if "html-classes" not in self.extras: + return "" + try: + html_classes_from_tag = self.extras["html-classes"] + except TypeError: + return "" + else: + if isinstance(html_classes_from_tag, dict): + if tag in html_classes_from_tag: + return ' class="%s"' % html_classes_from_tag[tag] + return "" + + @mark_stage(Stage.CODE_BLOCKS) + def _do_code_blocks(self, text: str) -> str: + """Process Markdown `<pre><code>` blocks.""" + code_block_re = re.compile(r''' + (?:\n\n|\A\n?) + ( # $1 = the code block -- one or more lines, starting with a space/tab + (?: + (?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces + .*\n+ + )+ + ) + ((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc + # Lookahead to make sure this block isn't already in a code block. + # Needed when syntax highlighting is being used. + (?!([^<]|<(/?)span)*\</code\>) + ''' % (self.tab_width, self.tab_width), + re.M | re.X) + return code_block_re.sub(self._code_block_sub, text) + + # Rules for a code span: + # - backslash escapes are not interpreted in a code span + # - to include one or or a run of more backticks the delimiters must + # be a longer run of backticks + # - cannot start or end a code span with a backtick; pad with a + # space and that space will be removed in the emitted HTML + # See `test/tm-cases/escapes.text` for a number of edge-case + # examples. + _code_span_re = re.compile(r''' + (?<!\\) + (`+) # \1 = Opening run of ` + (?!`) # See Note A test/tm-cases/escapes.text + (.+?) # \2 = The code block + (?<!`) + \1 # Matching closer + (?!`) + ''', re.X | re.S) + + def _code_span_sub(self, match: re.Match) -> str: + c = match.group(2).strip(" \t") + c = self._encode_code(c) + return "<code%s>%s</code>" % (self._html_class_str_from_tag("code"), c) + + @mark_stage(Stage.CODE_SPANS) + def _do_code_spans(self, text: str) -> str: + # * Backtick quotes are used for <code></code> spans. + # + # * You can use multiple backticks as the delimiters if you want to + # include literal backticks in the code span. So, this input: + # + # Just type ``foo `bar` baz`` at the prompt. + # + # Will translate to: + # + # <p>Just type <code>foo `bar` baz</code> at the prompt.</p> + # + # There's no arbitrary limit to the number of backticks you + # can use as delimters. If you need three consecutive backticks + # in your code, use four for delimiters, etc. + # + # * You can use spaces to get literal backticks at the edges: + # + # ... type `` `bar` `` ... + # + # Turns to: + # + # ... type <code>`bar`</code> ... + return self._code_span_re.sub(self._code_span_sub, text) + + def _encode_code(self, text: str) -> str: + """Encode/escape certain characters inside Markdown code runs. + The point is that in code, these characters are literals, + and lose their special Markdown meanings. + """ + replacements = [ + # Encode all ampersands; HTML entities are not + # entities within a Markdown code span. + ('&', '&'), + # Do the angle bracket song and dance: + ('<', '<'), + ('>', '>'), + ] + for before, after in replacements: + text = text.replace(before, after) + hashed = _hash_text(text) + self._code_table[text] = hashed + return hashed + + _strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]?)(?<=\S)\1", re.S) + _em_re = re.compile(r"(\*|_)(?=\S)(.*?\S)\1", re.S) + + @mark_stage(Stage.ITALIC_AND_BOLD) + def _do_italics_and_bold(self, text: str) -> str: + # <strong> must go first: + text = self._strong_re.sub(r"<strong>\2</strong>", text) + text = self._em_re.sub(r"<em>\2</em>", text) + return text + + _block_quote_base = r''' + ( # Wrap whole match in \1 + ( + ^[ \t]*>%s[ \t]? # '>' at the start of a line + .+\n # rest of the first line + (.+\n)* # subsequent consecutive lines + )+ + ) + ''' + _block_quote_re = re.compile(_block_quote_base % '', re.M | re.X) + _block_quote_re_spoiler = re.compile(_block_quote_base % '[ \t]*?!?', re.M | re.X) + _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M) + _bq_one_level_re_spoiler = re.compile('^[ \t]*>[ \t]*?![ \t]?', re.M) + _bq_all_lines_spoilers = re.compile(r'\A(?:^[ \t]*>[ \t]*?!.*[\n\r]*)+\Z', re.M) + _html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S) + def _dedent_two_spaces_sub(self, match: re.Match) -> str: + return re.sub(r'(?m)^ ', '', match.group(1)) + + def _block_quote_sub(self, match: re.Match) -> str: + bq = match.group(1) + is_spoiler = 'spoiler' in self.extras and self._bq_all_lines_spoilers.match(bq) + # trim one level of quoting + if is_spoiler: + bq = self._bq_one_level_re_spoiler.sub('', bq) + else: + bq = self._bq_one_level_re.sub('', bq) + # trim whitespace-only lines + bq = self._ws_only_line_re.sub('', bq) + bq = self._run_block_gamut(bq) # recurse + + bq = re.sub('(?m)^', ' ', bq) + # These leading spaces screw with <pre> content, so we need to fix that: + bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq) + + if is_spoiler: + return '<blockquote class="spoiler">\n%s\n</blockquote>\n\n' % bq + else: + return '<blockquote>\n%s\n</blockquote>\n\n' % bq + + @mark_stage(Stage.BLOCK_QUOTES) + def _do_block_quotes(self, text: str) -> str: + if '>' not in text: + return text + if 'spoiler' in self.extras: + return self._block_quote_re_spoiler.sub(self._block_quote_sub, text) + else: + return self._block_quote_re.sub(self._block_quote_sub, text) + + @mark_stage(Stage.PARAGRAPHS) + def _form_paragraphs(self, text: str) -> str: + # Strip leading and trailing lines: + text = text.strip('\n') + + # Wrap <p> tags. + grafs = [] + for i, graf in enumerate(re.split(r"\n{2,}", text)): + if graf in self.html_blocks: + # Unhashify HTML blocks + grafs.append(self.html_blocks[graf]) + else: + cuddled_list = None + if "cuddled-lists" in self.extras: + # Need to put back trailing '\n' for `_list_item_re` + # match at the end of the paragraph. + li = self._list_item_re.search(graf + '\n') + # Two of the same list marker in this paragraph: a likely + # candidate for a list cuddled to preceding paragraph + # text (issue 33). Note the `[-1]` is a quick way to + # consider numeric bullets (e.g. "1." and "2.") to be + # equal. + if (li and len(li.group(2)) <= 3 + and ( + (li.group("next_marker") and li.group("marker")[-1] == li.group("next_marker")[-1]) + or + li.group("next_marker") is None + ) + ): + start = li.start() + cuddled_list = self._do_lists(graf[start:]).rstrip("\n") + if re.match(r'^<(?:ul|ol).*?>', cuddled_list): + graf = graf[:start] + else: + # Not quite a cuddled list. (See not_quite_a_list_cuddled_lists test case) + # Store as a simple paragraph. + graf = cuddled_list + cuddled_list = None + + # Wrap <p> tags. + graf = self._run_span_gamut(graf) + grafs.append("<p%s>" % self._html_class_str_from_tag('p') + graf.lstrip(" \t") + "</p>") + + if cuddled_list: + grafs.append(cuddled_list) + + return "\n\n".join(grafs) + + def _add_footnotes(self, text: str) -> str: + if self.footnotes: + footer = [ + '<div class="footnotes">', + '<hr' + self.empty_element_suffix, + '<ol>', + ] + + if not self.footnote_title: + self.footnote_title = "Jump back to footnote %d in the text." + if not self.footnote_return_symbol: + self.footnote_return_symbol = "↩" + + # self.footnotes is generated in _strip_footnote_definitions, which runs re.sub on the whole + # text. This means that the dict keys are inserted in order of appearance. Use the dict to + # sort footnote ids by that same order + self.footnote_ids.sort(key=lambda a: list(self.footnotes.keys()).index(a)) + for i, id in enumerate(self.footnote_ids): + if i != 0: + footer.append('') + footer.append('<li id="fn-%s">' % id) + footer.append(self._run_block_gamut(self.footnotes[id])) + try: + backlink = ('<a href="#fnref-%s" ' + + 'class="footnoteBackLink" ' + + 'title="' + self.footnote_title + '">' + + self.footnote_return_symbol + + '</a>') % (id, i+1) + except TypeError: + log.debug("Footnote error. `footnote_title` " + "must include parameter. Using defaults.") + backlink = ('<a href="#fnref-%s" ' + 'class="footnoteBackLink" ' + 'title="Jump back to footnote %d in the text.">' + '↩</a>' % (id, i+1)) + + if footer[-1].endswith("</p>"): + footer[-1] = footer[-1][:-len("</p>")] \ + + ' ' + backlink + "</p>" + else: + footer.append("\n<p>%s</p>" % backlink) + footer.append('</li>') + footer.append('</ol>') + footer.append('</div>') + return text + '\n\n' + '\n'.join(footer) + else: + return text + + _naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I) + _naked_gt_re = re.compile(r'''(?<![a-z0-9?!/'"-])>''', re.I) + + def _encode_amps_and_angles(self, text: str) -> str: + # Smart processing for ampersands and angle brackets that need + # to be encoded. + text = _AMPERSAND_RE.sub('&', text) + + # Encode naked <'s + text = self._naked_lt_re.sub('<', text) + + # Encode naked >'s + # Note: Other markdown implementations (e.g. Markdown.pl, PHP + # Markdown) don't do this. + text = self._naked_gt_re.sub('>', text) + return text + + _incomplete_tags_re = re.compile(r"<(!--|/?\w+?(?!\w)\s*?.+?(?:[\s/]+?|$))") + + def _encode_incomplete_tags(self, text: str) -> str: + if self.safe_mode not in ("replace", "escape"): + return text + + if text.endswith(">"): + return text # this is not an incomplete tag, this is a link in the form <http://x.y.z> + + def incomplete_tags_sub(match): + return match.group().replace('<', '<') + + return self._incomplete_tags_re.sub(incomplete_tags_sub, text) + + def _encode_backslash_escapes(self, text: str) -> str: + for ch, escape in list(self._escape_table.items()): + text = text.replace("\\"+ch, escape) + return text + + _auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I) + def _auto_link_sub(self, match: re.Match) -> str: + g1 = match.group(1) + return '<a href="%s">%s</a>' % (self._protect_url(g1), g1) + + _auto_email_link_re = re.compile(r""" + < + (?:mailto:)? + ( + [-.\w]+ + \@ + [-\w]+(\.[-\w]+)*\.[a-z]+ + ) + > + """, re.I | re.X | re.U) + def _auto_email_link_sub(self, match: re.Match) -> str: + return self._encode_email_address( + self._unescape_special_chars(match.group(1))) + + def _do_auto_links(self, text: str) -> str: + text = self._auto_link_re.sub(self._auto_link_sub, text) + text = self._auto_email_link_re.sub(self._auto_email_link_sub, text) + return text + + def _encode_email_address(self, addr: str) -> str: + # Input: an email address, e.g. "foo@example.com" + # + # Output: the email address as a mailto link, with each character + # of the address encoded as either a decimal or hex entity, in + # the hopes of foiling most address harvesting spam bots. E.g.: + # + # <a href="mailto:foo@e + # xample.com">foo + # @example.com</a> + # + # Based on a filter by Matthew Wickline, posted to the BBEdit-Talk + # mailing list: <http://tinyurl.com/yu7ue> + chars = [_xml_encode_email_char_at_random(ch) + for ch in "mailto:" + addr] + # Strip the mailto: from the visible part. + addr = '<a href="%s">%s</a>' \ + % (''.join(chars), ''.join(chars[7:])) + return addr + + def _unescape_special_chars(self, text: str) -> str: + # Swap back in all the special characters we've hidden. + hashmap = tuple(self._escape_table.items()) + tuple(self._code_table.items()) + # html_blocks table is in format {hash: item} compared to usual {item: hash} + hashmap += tuple(tuple(reversed(i)) for i in self.html_blocks.items()) + while True: + orig_text = text + for ch, hash in hashmap: + text = text.replace(hash, ch) + if text == orig_text: + break + return text + + def _outdent(self, text: str) -> str: + # Remove one level of line-leading tabs or spaces + return self._outdent_re.sub('', text) + + @staticmethod + def _uniform_outdent( + text: str, + min_outdent: Optional[str] = None, + max_outdent: Optional[str] = None + ) -> Tuple[str, str]: + ''' + Removes the smallest common leading indentation from each (non empty) + line of `text` and returns said indent along with the outdented text. + + Args: + min_outdent: make sure the smallest common whitespace is at least this size + max_outdent: the maximum amount a line can be outdented by + ''' + + # find the leading whitespace for every line + whitespace: List[Union[str, None]] = [ + re.findall(r'^[ \t]*', line)[0] if line else None + for line in text.splitlines() + ] + whitespace_not_empty = [i for i in whitespace if i is not None] + + # if no whitespace detected (ie: no lines in code block, issue #505) + if not whitespace_not_empty: + return '', text + + # get minimum common whitespace + outdent = min(whitespace_not_empty) + # adjust min common ws to be within bounds + if min_outdent is not None: + outdent = min([i for i in whitespace_not_empty if i >= min_outdent] or [min_outdent]) + if max_outdent is not None: + outdent = min(outdent, max_outdent) + + outdented = [] + for line_ws, line in zip(whitespace, text.splitlines(True)): + if line.startswith(outdent): + # if line starts with smallest common ws, dedent it + outdented.append(line.replace(outdent, '', 1)) + elif line_ws is not None and line_ws < outdent: + # if less indented than min common whitespace then outdent as much as possible + outdented.append(line.replace(line_ws, '', 1)) + else: + outdented.append(line) + + return outdent, ''.join(outdented) + + @staticmethod + def _uniform_indent( + text: str, + indent: str, + include_empty_lines: bool = False, + indent_empty_lines: bool = False + ) -> str: + ''' + Uniformly indent a block of text by a fixed amount + + Args: + text: the text to indent + indent: a string containing the indent to apply + include_empty_lines: don't remove whitespace only lines + indent_empty_lines: indent whitespace only lines with the rest of the text + ''' + blocks = [] + for line in text.splitlines(True): + if line.strip() or indent_empty_lines: + blocks.append(indent + line) + elif include_empty_lines: + blocks.append(line) + else: + blocks.append('') + return ''.join(blocks) + + @staticmethod + def _match_overlaps_substr(text, match: re.Match, substr: str) -> bool: + ''' + Checks if a regex match overlaps with a substring in the given text. + ''' + for instance in re.finditer(re.escape(substr), text): + start, end = instance.span() + if start <= match.start() <= end: + return True + if start <= match.end() <= end: + return True + return False + + +class MarkdownWithExtras(Markdown): + """A markdowner class that enables most extras: + + - footnotes + - fenced-code-blocks (only highlights code if 'pygments' Python module on path) + + These are not included: + - pyshell (specific to Python-related documenting) + - code-friendly (because it *disables* part of the syntax) + - link-patterns (because you need to specify some actual + link-patterns anyway) + """ + extras = ["footnotes", "fenced-code-blocks"] # type: ignore + + +# ---------------------------------------------------------- +# Extras +# ---------------------------------------------------------- + +# Base classes +# ---------------------------------------------------------- + +class Extra(ABC): + _registry: Dict[str, Type['Extra']] = {} + _exec_order: Dict[Stage, Tuple[List[Type['Extra']], List[Type['Extra']]]] = {} + + name: str + ''' + An identifiable name that users can use to invoke the extra + in the Markdown class + ''' + order: Tuple[Collection[Union[Stage, Type['Extra']]], Collection[Union[Stage, Type['Extra']]]] + ''' + Tuple of two iterables containing the stages/extras this extra will run before and + after, respectively + ''' + + def __init__(self, md: Markdown, options: Optional[dict]): + ''' + Args: + md: An instance of `Markdown` + options: a dict of settings to alter the extra's behaviour + ''' + self.md = md + self.options = options if options is not None else {} + + @classmethod + def deregister(cls): + ''' + Removes the class from the extras registry and unsets its execution order. + ''' + if cls.name in cls._registry: + del cls._registry[cls.name] + + for exec_order in Extra._exec_order.values(): + # find everywhere this extra is mentioned and remove it + for section in exec_order: + while cls in section: + section.remove(cls) + + @classmethod + def register(cls): + ''' + Registers the class for use with `Markdown` and calculates its execution order based on + the `order` class attribute. + ''' + cls._registry[cls.name] = cls + + for index, item in enumerate((*cls.order[0], *cls.order[1])): + before = index < len(cls.order[0]) + if not isinstance(item, Stage) and issubclass(item, Extra): + # eg: FencedCodeBlocks + for exec_orders in Extra._exec_order.values(): + # insert this extra everywhere the other one is mentioned + for section in exec_orders: + if item in section: + to_index = section.index(item) + if not before: + to_index += 1 + section.insert(to_index, cls) + else: + # eg: Stage.PREPROCESS + Extra._exec_order.setdefault(item, ([], [])) + if cls in Extra._exec_order[item][0 if before else 1]: + # extra is already runnig after this stage. Don't duplicate that effort + continue + if before: + Extra._exec_order[item][0].insert(0, cls) + else: + Extra._exec_order[item][1].append(cls) + + @abstractmethod + def run(self, text: str) -> str: + ''' + Run the extra against the given text. + + Returns: + The new text after being modified by the extra + ''' + ... + + def test(self, text: str) -> bool: + ''' + Check a section of markdown to see if this extra should be run upon it. + The default implementation will always return True but it's recommended to override + this behaviour to improve performance. + ''' + return True + + +class ItalicAndBoldProcessor(Extra): + ''' + An ABC that provides hooks for dealing with italics and bold syntax. + This class is set to trigger both before AND after the italics and bold stage. + This allows any child classes to intercept instances of bold or italic syntax and + change the output or hash it to prevent it from being processed. + + After the I&B stage any hashes in the `hash_tables` instance variable are replaced. + ''' + name = 'italic-and-bold-processor' + order = (Stage.ITALIC_AND_BOLD,), (Stage.ITALIC_AND_BOLD,) + + strong_re = Markdown._strong_re + em_re = Markdown._em_re + + def __init__(self, md: Markdown, options: dict): + super().__init__(md, options) + self.hash_table = {} + + def run(self, text): + if self.md.order < Stage.ITALIC_AND_BOLD: + text = self.strong_re.sub(self.sub, text) + text = self.em_re.sub(self.sub, text) + else: + # put any hashed values back + for key, substr in self.hash_table.items(): + text = text.replace(key, substr) + return text + + @abstractmethod + def sub(self, match: re.Match) -> str: + # do nothing. Let `Markdown._do_italics_and_bold` do its thing later + return match.string[match.start(): match.end()] + + def sub_hash(self, match: re.Match) -> str: + substr = match.string[match.start(): match.end()] + key = _hash_text(substr) + self.hash_table[key] = substr + return key + + def test(self, text): + if self.md.order < Stage.ITALIC_AND_BOLD: + return '*' in text or '_' in text + return self.hash_table and re.search(r'md5-[0-9a-z]{32}', text) + +# User facing extras +# ---------------------------------------------------------- + + +class Admonitions(Extra): + ''' + Enable parsing of RST admonitions + ''' + + name = 'admonitions' + order = (Stage.BLOCK_GAMUT, Stage.LINK_DEFS), () + + admonitions = r'admonition|attention|caution|danger|error|hint|important|note|tip|warning' + + admonitions_re = re.compile(r''' + ^(\ *)\.\.\ (%s)::\ * # $1 leading indent, $2 the admonition + (.*)? # $3 admonition title + ((?:\s*\n\1\ {3,}.*)+?) # $4 admonition body (required) + (?=\s*(?:\Z|\n{4,}|\n\1?\ {0,2}\S)) # until EOF, 3 blank lines or something less indented + ''' % admonitions, + re.IGNORECASE | re.MULTILINE | re.VERBOSE + ) + + def test(self, text): + return self.admonitions_re.search(text) is not None + + def sub(self, match: re.Match) -> str: + lead_indent, admonition_name, title, body = match.groups() + + admonition_type = '<strong>%s</strong>' % admonition_name + + # figure out the class names to assign the block + if admonition_name.lower() == 'admonition': + admonition_class = 'admonition' + else: + admonition_class = 'admonition %s' % admonition_name.lower() + + # titles are generally optional + if title: + title = '<em>%s</em>' % title + + # process the admonition body like regular markdown + body = self.md._run_block_gamut("\n%s\n" % self.md._uniform_outdent(body)[1]) + + # indent the body before placing inside the aside block + admonition = self.md._uniform_indent( + '%s\n%s\n\n%s\n' % (admonition_type, title, body), + self.md.tab, False + ) + # wrap it in an aside + admonition = '<aside class="%s">\n%s</aside>' % (admonition_class, admonition) + # now indent the whole admonition back to where it started + return self.md._uniform_indent(admonition, lead_indent, False) + + def run(self, text): + return self.admonitions_re.sub(self.sub, text) + + +class _BreaksExtraOpts(TypedDict, total=False): + '''Options for the `Breaks` extra''' + on_backslash: bool + '''Replace backslashes at the end of a line with <br>''' + on_newline: bool + '''Replace single new line characters with <br> when True''' + + +class Breaks(Extra): + name = 'breaks' + order = (), (Stage.ITALIC_AND_BOLD,) + options: _BreaksExtraOpts + + def run(self, text): + on_backslash = self.options.get('on_backslash', False) + on_newline = self.options.get('on_newline', False) + + if on_backslash and on_newline: + pattern = r' *\\?' + elif on_backslash: + pattern = r'(?: *\\| {2,})' + elif on_newline: + pattern = r' *' + else: + pattern = r' {2,}' + + break_tag = "<br%s\n" % self.md.empty_element_suffix + text = re.sub(pattern + r"\n(?!\<(?:\/?(ul|ol|li))\>)", break_tag, text) + + return text + + +class CodeFriendly(ItalicAndBoldProcessor): + ''' + Disable _ and __ for em and strong. + ''' + name = 'code-friendly' + + def sub(self, match: re.Match) -> str: + syntax = match.group(1) + if '_' not in syntax: + return super().sub(match) + text = match.string[match.start(): match.end()] + key = _hash_text(text) + self.hash_table[key] = text + return key + + +class FencedCodeBlocks(Extra): + ''' + Allows a code block to not have to be indented + by fencing it with '```' on a line before and after. Based on + <http://github.github.com/github-flavored-markdown/> with support for + syntax highlighting. + ''' + + name = 'fenced-code-blocks' + order = (Stage.LINK_DEFS, Stage.BLOCK_GAMUT), (Stage.PREPROCESS,) + + fenced_code_block_re = re.compile(r''' + (?:\n+|\A\n?|(?<=\n)) + (^[ \t]*`{3,})\s{0,99}?([\w+-]+)?\s{0,99}?\n # $1 = opening fence (captured for back-referencing), $2 = optional lang + (.*?) # $3 = code block content + \1[ \t]*\n # closing fence + ''', re.M | re.X | re.S) + + def test(self, text): + if '```' not in text: + return False + if self.md.stage == Stage.PREPROCESS and not self.md.safe_mode: + return True + if self.md.stage == Stage.LINK_DEFS and self.md.safe_mode: + return True + return self.md.stage == Stage.BLOCK_GAMUT + + def _code_block_with_lexer_sub( + self, + codeblock: str, + leading_indent: str, + lexer + ) -> str: + ''' + Args: + codeblock: the codeblock to format + leading_indent: the indentation to prefix the block with + lexer (pygments.Lexer): the lexer to use + ''' + formatter_opts = self.md.extras['fenced-code-blocks'] or {} + + def unhash_code(codeblock): + for key, sanitized in list(self.md.html_spans.items()): + codeblock = codeblock.replace(key, sanitized) + replacements = [ + ("&", "&"), + ("<", "<"), + (">", ">") + ] + for old, new in replacements: + codeblock = codeblock.replace(old, new) + return codeblock + # remove leading indent from code block + _, codeblock = self.md._uniform_outdent(codeblock, max_outdent=leading_indent) + + codeblock = unhash_code(codeblock) + colored = self.md._color_with_pygments(codeblock, lexer, + **formatter_opts) + + # add back the indent to all lines + return "\n%s\n" % self.md._uniform_indent(colored, leading_indent, True) + + def tags(self, lexer_name: str) -> Tuple[str, str]: + ''' + Returns the tags that the encoded code block will be wrapped in, based + upon the lexer name. + + This function can be overridden by subclasses to piggy-back off of the + fenced code blocks syntax (see `Mermaid` extra). + + Returns: + The opening and closing tags, as strings within a tuple + ''' + pre_class = self.md._html_class_str_from_tag('pre') + if "highlightjs-lang" in self.md.extras and lexer_name: + code_class = ' class="%s language-%s"' % (lexer_name, lexer_name) + else: + code_class = self.md._html_class_str_from_tag('code') + return ('<pre%s><code%s>' % (pre_class, code_class), '</code></pre>') + + def sub(self, match: re.Match) -> str: + lexer_name = match.group(2) + codeblock = match.group(3) + codeblock = codeblock[:-1] # drop one trailing newline + + # Use pygments only if not using the highlightjs-lang extra + if lexer_name and "highlightjs-lang" not in self.md.extras: + lexer = self.md._get_pygments_lexer(lexer_name) + if lexer: + leading_indent = ' '*(len(match.group(1)) - len(match.group(1).lstrip())) + return self._code_block_with_lexer_sub(codeblock, leading_indent, lexer) + + # Fenced code blocks need to be outdented before encoding, and then reapplied + leading_indent = ' ' * (len(match.group(1)) - len(match.group(1).lstrip())) + if codeblock: + # only run the codeblock through the outdenter if not empty + leading_indent, codeblock = self.md._uniform_outdent(codeblock, max_outdent=leading_indent) + + codeblock = self.md._encode_code(codeblock) + + tags = self.tags(lexer_name) + + return "\n%s%s%s\n%s%s\n" % (leading_indent, tags[0], codeblock, leading_indent, tags[1]) + + def run(self, text): + return self.fenced_code_block_re.sub(self.sub, text) + + +class Latex(Extra): + ''' + Convert $ and $$ to <math> and </math> tags for inline and block math. + ''' + name = 'latex' + order = (Stage.CODE_BLOCKS, FencedCodeBlocks), () + + _single_dollar_re = re.compile(r'(?<!\$)\$(?!\$)(.*?)\$') + _double_dollar_re = re.compile(r'\$\$(.*?)\$\$', re.DOTALL) + + # Ways to escape + _pre_code_block_re = re.compile(r"<pre>(.*?)</pre>", re.DOTALL) # Wraped in <pre> + _triple_re = re.compile(r'```(.*?)```', re.DOTALL) # Wrapped in a code block ``` + _single_re = re.compile(r'(?<!`)(`)(.*?)(?<!`)\1(?!`)') # Wrapped in a single ` + + converter = None + code_blocks = {} + + def _convert_single_match(self, match): + return self.converter.convert(match.group(1)) + + def _convert_double_match(self, match): + return self.converter.convert(match.group(1).replace(r"\n", ''), display="block") + + def code_placeholder(self, match): + placeholder = f"<!--CODE_BLOCK_{len(self.code_blocks)}-->" + self.code_blocks[placeholder] = match.group(0) + return placeholder + + def run(self, text): + try: + import latex2mathml.converter + self.converter = latex2mathml.converter + except ImportError: + raise ImportError('The "latex" extra requires the "latex2mathml" package to be installed.') + + # Escape by replacing with a code block + text = self._pre_code_block_re.sub(self.code_placeholder, text) + text = self._single_re.sub(self.code_placeholder, text) + text = self._triple_re.sub(self.code_placeholder, text) + + text = self._single_dollar_re.sub(self._convert_single_match, text) + text = self._double_dollar_re.sub(self._convert_double_match, text) + + # Convert placeholder tag back to original code + for placeholder, code_block in self.code_blocks.items(): + text = text.replace(placeholder, code_block) + + return text + + +class LinkPatterns(Extra): + ''' + Auto-link given regex patterns in text (e.g. bug number + references, revision number references). + ''' + name = 'link-patterns' + order = (Stage.LINKS,), () + options: _link_patterns + + _basic_link_re = re.compile(r'!?\[.*?\]\(.*?\)') + + def run(self, text): + link_from_hash = {} + for regex, repl in self.options: + replacements = [] + for match in regex.finditer(text): + if any(self.md._match_overlaps_substr(text, match, h) for h in link_from_hash): + continue + + if callable(repl): + href = repl(match) + else: + href = match.expand(repl) + replacements.append((match.span(), href)) + for (start, end), href in reversed(replacements): + + # Do not match against links inside brackets. + if text[start - 1:start] == '[' and text[end:end + 1] == ']': + continue + + # Do not match against links in the standard markdown syntax. + if text[start - 2:start] == '](' or text[end:end + 2] == '")': + continue + + # Do not match against links which are escaped. + if text[start - 3:start] == '"""' and text[end:end + 3] == '"""': + text = text[:start - 3] + text[start:end] + text[end + 3:] + continue + + # search the text for anything that looks like a link + is_inside_link = False + for link_re in (self.md._auto_link_re, self._basic_link_re): + for match in link_re.finditer(text): + if any((r[0] <= start and end <= r[1]) for r in match.regs): + # if the link pattern start and end pos is within the bounds of + # something that looks like a link, then don't process it + is_inside_link = True + break + else: + continue + break + + if is_inside_link: + continue + + escaped_href = ( + href.replace('"', '"') # b/c of attr quote + # To avoid markdown <em> and <strong>: + .replace('*', self.md._escape_table['*']) + .replace('_', self.md._escape_table['_'])) + link = '<a href="%s">%s</a>' % (escaped_href, text[start:end]) + hash = _hash_text(link) + link_from_hash[hash] = link + text = text[:start] + hash + text[end:] + for hash, link in list(link_from_hash.items()): + text = text.replace(hash, link) + return text + + def test(self, text): + return True + + +class MarkdownInHTML(Extra): + ''' + Allow the use of `markdown="1"` in a block HTML tag to + have markdown processing be done on its contents. Similar to + <http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with + some limitations. + ''' + name = 'markdown-in-html' + order = (), (Stage.HASH_HTML,) + + def run(self, text): + def callback(block): + indent, block = self.md._uniform_outdent(block) + block = self.md._hash_html_block_sub(block) + block = self.md._uniform_indent(block, indent, include_empty_lines=True, indent_empty_lines=False) + return block + + return self.md._strict_tag_block_sub(text, self.md._block_tags_a, callback, True) + + def test(self, text): + return True + + +class Mermaid(FencedCodeBlocks): + name = 'mermaid' + order = (FencedCodeBlocks,), () + + def tags(self, lexer_name): + if lexer_name == 'mermaid': + return ('<pre class="mermaid-pre"><div class="mermaid">', '</div></pre>') + return super().tags(lexer_name) + + +class MiddleWordEm(ItalicAndBoldProcessor): + ''' + Allows or disallows emphasis syntax in the middle of words, + defaulting to allow. Disabling this means that `this_text_here` will not be + converted to `this<em>text</em>here`. + ''' + name = 'middle-word-em' + order = (CodeFriendly,), (Stage.ITALIC_AND_BOLD,) + + def __init__(self, md: Markdown, options: Union[dict, bool]): + ''' + Args: + md: the markdown instance + options: can be bool for backwards compatibility but will be converted to a dict + in the constructor. All options are: + - allowed (bool): whether to allow emphasis in the middle of a word. + If `options` is a bool it will be placed under this key. + ''' + if isinstance(options, bool): + options = {'allowed': options} + options.setdefault('allowed', True) + super().__init__(md, options) + + self.liberal_em_re = self.em_re + if not options['allowed']: + self.em_re = re.compile(r'(?<=\b)%s(?=\b)' % self.liberal_em_re.pattern, self.liberal_em_re.flags) + + def run(self, text): + # run strong and whatnot first + # this also will process all strict ems + text = super().run(text) + if self.md.order < self.md.stage: + # hash all non-valid ems + text = self.liberal_em_re.sub(self.sub_hash, text) + return text + + def sub(self, match: re.Match) -> str: + syntax = match.group(1) + if len(syntax) != 1: + # strong syntax + return super().sub(match) + return '<em>%s</em>' % match.group(2) + + +class Numbering(Extra): + ''' + Support of generic counters. Non standard extension to + allow sequential numbering of figures, tables, equations, exhibits etc. + ''' + + name = 'numbering' + order = (Stage.LINK_DEFS,), () + + def run(self, text): + # First pass to define all the references + regex_defns = re.compile(r''' + \[\#(\w+) # the counter. Open square plus hash plus a word \1 + ([^@]*) # Some optional characters, that aren't an @. \2 + @(\w+) # the id. Should this be normed? \3 + ([^\]]*)\] # The rest of the text up to the terminating ] \4 + ''', re.VERBOSE) + regex_subs = re.compile(r"\[@(\w+)\s*\]") # [@ref_id] + counters = {} + references = {} + replacements = [] + definition_html = '<figcaption class="{}" id="counter-ref-{}">{}{}{}</figcaption>' + reference_html = '<a class="{}" href="#counter-ref-{}">{}</a>' + for match in regex_defns.finditer(text): + # We must have four match groups otherwise this isn't a numbering reference + if len(match.groups()) != 4: + continue + counter = match.group(1) + text_before = match.group(2).strip() + ref_id = match.group(3) + text_after = match.group(4) + number = counters.get(counter, 1) + references[ref_id] = (number, counter) + replacements.append((match.start(0), + definition_html.format(counter, + ref_id, + text_before, + number, + text_after), + match.end(0))) + counters[counter] = number + 1 + for repl in reversed(replacements): + text = text[:repl[0]] + repl[1] + text[repl[2]:] + + # Second pass to replace the references with the right + # value of the counter + # Fwiw, it's vaguely annoying to have to turn the iterator into + # a list and then reverse it but I can't think of a better thing to do. + for match in reversed(list(regex_subs.finditer(text))): + number, counter = references.get(match.group(1), (None, None)) + if number is not None: + repl = reference_html.format(counter, + match.group(1), + number) + else: + repl = reference_html.format(match.group(1), + 'countererror', + '?' + match.group(1) + '?') + if "smarty-pants" in self.md.extras: + repl = repl.replace('"', self.md._escape_table['"']) + + text = text[:match.start()] + repl + text[match.end():] + return text + + +class PyShell(Extra): + ''' + Treats unindented Python interactive shell sessions as <code> + blocks. + ''' + + name = 'pyshell' + order = (), (Stage.LISTS,) + + def test(self, text): + return ">>>" in text + + def sub(self, match: re.Match) -> str: + if "fenced-code-blocks" in self.md.extras: + dedented = _dedent(match.group(0)) + return self.md.extra_classes['fenced-code-blocks'].run("```pycon\n" + dedented + "```\n") + + lines = match.group(0).splitlines(0) + _dedentlines(lines) + indent = ' ' * self.md.tab_width + s = ('\n' # separate from possible cuddled paragraph + + indent + ('\n'+indent).join(lines) + + '\n') + return s + + def run(self, text): + less_than_tab = self.md.tab_width - 1 + _pyshell_block_re = re.compile(r""" + ^([ ]{0,%d})>>>[ ].*\n # first line + ^(\1[^\S\n]*\S.*\n)* # any number of subsequent lines with at least one character + (?=^\1?\n|\Z) # ends with a blank line or end of document + """ % less_than_tab, re.M | re.X) + + return _pyshell_block_re.sub(self.sub, text) + + +class SmartyPants(Extra): + ''' + Replaces ' and " with curly quotation marks or curly + apostrophes. Replaces --, ---, ..., and . . . with en dashes, em dashes, + and ellipses. + ''' + name = 'smarty-pants' + order = (), (Stage.SPAN_GAMUT,) + + _opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)") + _opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)') + _closing_single_quote_re = re.compile(r"(?<=\S)'") + _closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))') + # "smarty-pants" extra: Very liberal in interpreting a single prime as an + # apostrophe; e.g. ignores the fact that "round", "bout", "twer", and + # "twixt" can be written without an initial apostrophe. This is fine because + # using scare quotes (single quotation marks) is rare. + _apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))") + _contractions = ["tis", "twas", "twer", "neath", "o", "n", + "round", "bout", "twixt", "nuff", "fraid", "sup"] + + + def contractions(self, text: str) -> str: + text = self._apostrophe_year_re.sub(r"’\1", text) + for c in self._contractions: + text = text.replace("'%s" % c, "’%s" % c) + text = text.replace("'%s" % c.capitalize(), + "’%s" % c.capitalize()) + return text + + def run(self, text): + """Fancifies 'single quotes', "double quotes", and apostrophes. + Converts --, ---, and ... into en dashes, em dashes, and ellipses. + + Inspiration is: <http://daringfireball.net/projects/smartypants/> + See "test/tm-cases/smarty_pants.text" for a full discussion of the + support here and + <http://code.google.com/p/python-markdown2/issues/detail?id=42> for a + discussion of some diversion from the original SmartyPants. + """ + if "'" in text: # guard for perf + text = self.contractions(text) + text = self._opening_single_quote_re.sub("‘", text) + text = self._closing_single_quote_re.sub("’", text) + + if '"' in text: # guard for perf + text = self._opening_double_quote_re.sub("“", text) + text = self._closing_double_quote_re.sub("”", text) + + text = text.replace("---", "—") + text = text.replace("--", "–") + text = text.replace("...", "…") + text = text.replace(" . . . ", "…") + text = text.replace(". . .", "…") + + # TODO: Temporary hack to fix https://github.com/trentm/python-markdown2/issues/150 + if "footnotes" in self.md.extras and "footnote-ref" in text: + # Quotes in the footnote back ref get converted to "smart" quotes + # Change them back here to ensure they work. + text = text.replace('class="footnote-ref”', 'class="footnote-ref"') + + return text + + def test(self, text): + return "'" in text or '"' in text + + +class Strike(Extra): + ''' + Text inside of double tilde is ~~strikethrough~~ + ''' + name = 'strike' + order = (Stage.ITALIC_AND_BOLD,), () + + _strike_re = re.compile(r"~~(?=\S)(.+?)(?<=\S)~~", re.S) + + def run(self, text): + return self._strike_re.sub(r"<s>\1</s>", text) + + def test(self, text): + return '~~' in text + + +class Tables(Extra): + ''' + Tables using the same format as GFM + <https://help.github.com/articles/github-flavored-markdown#tables> and + PHP-Markdown Extra <https://michelf.ca/projects/php-markdown/extra/#table>. + ''' + name = 'tables' + order = (), (Stage.LISTS,) + + def run(self, text): + """Copying PHP-Markdown and GFM table syntax. Some regex borrowed from + https://github.com/michelf/php-markdown/blob/lib/Michelf/Markdown.php#L2538 + """ + less_than_tab = self.md.tab_width - 1 + table_re = re.compile(r''' + (?:(?<=\n)|\A\n?) # leading blank line + + ^[ ]{0,%d} # allowed whitespace + (.*[|].*)[ ]*\n # $1: header row (at least one pipe) + + ^[ ]{0,%d} # allowed whitespace + ( # $2: underline row + # underline row with leading bar + (?: \|\ *:?-+:?\ * )+ \|? \s?[ ]*\n + | + # or, underline row without leading bar + (?: \ *:?-+:?\ *\| )+ (?: \ *:?-+:?\ * )? \s?[ ]*\n + ) + + ( # $3: data rows + (?: + ^[ ]{0,%d}(?!\ ) # ensure line begins with 0 to less_than_tab spaces + .*\|.*[ ]*\n + )+ + ) + ''' % (less_than_tab, less_than_tab, less_than_tab), re.M | re.X) + return table_re.sub(self.sub, text) + + def sub(self, match: re.Match) -> str: + trim_space_re = '^[ \t\n]+|[ \t\n]+$' + trim_bar_re = r'^\||\|$' + split_bar_re = r'^\||(?<![\`\\])\|' + escape_bar_re = r'\\\|' + + head, underline, body = match.groups() + + # Determine aligns for columns. + cols = [re.sub(escape_bar_re, '|', cell.strip()) for cell in re.split(split_bar_re, re.sub(trim_bar_re, "", re.sub(trim_space_re, "", underline)))] + align_from_col_idx = {} + for col_idx, col in enumerate(cols): + if col[0] == ':' and col[-1] == ':': + align_from_col_idx[col_idx] = ' style="text-align:center;"' + elif col[0] == ':': + align_from_col_idx[col_idx] = ' style="text-align:left;"' + elif col[-1] == ':': + align_from_col_idx[col_idx] = ' style="text-align:right;"' + + # thead + hlines = ['<table%s>' % self.md._html_class_str_from_tag('table'), '<thead%s>' % self.md._html_class_str_from_tag('thead'), '<tr>'] + cols = [re.sub(escape_bar_re, '|', cell.strip()) for cell in re.split(split_bar_re, re.sub(trim_bar_re, "", re.sub(trim_space_re, "", head)))] + for col_idx, col in enumerate(cols): + hlines.append(' <th%s>%s</th>' % ( + align_from_col_idx.get(col_idx, ''), + self.md._run_span_gamut(col) + )) + hlines.append('</tr>') + hlines.append('</thead>') + + # tbody + hlines.append('<tbody>') + for line in body.strip('\n').split('\n'): + hlines.append('<tr>') + cols = [re.sub(escape_bar_re, '|', cell.strip()) for cell in re.split(split_bar_re, re.sub(trim_bar_re, "", re.sub(trim_space_re, "", line)))] + for col_idx, col in enumerate(cols): + hlines.append(' <td%s>%s</td>' % ( + align_from_col_idx.get(col_idx, ''), + self.md._run_span_gamut(col) + )) + hlines.append('</tr>') + hlines.append('</tbody>') + hlines.append('</table>') + + return '\n'.join(hlines) + '\n' + + +class TelegramSpoiler(Extra): + name = 'tg-spoiler' + order = (), (Stage.ITALIC_AND_BOLD,) + + _tg_spoiler_re = re.compile(r"\|\|\s?(.+?)\s?\|\|", re.S) + + def run(self, text): + return self._tg_spoiler_re.sub(r"<tg-spoiler>\1</tg-spoiler>", text) + + def test(self, text): + return '||' in text + + +class Underline(Extra): + ''' + Text inside of double dash is --underlined--. + ''' + name = 'underline' + order = (Stage.ITALIC_AND_BOLD,), () + + _underline_re = re.compile(r"(?<!<!)--(?!>)(?=\S)(.+?)(?<=\S)(?<!<!)--(?!>)", re.S) + + def run(self, text): + return self._underline_re.sub(r"<u>\1</u>", text) + + def test(self, text): + return '--' in text + + +class _WavedromExtraOpts(TypedDict, total=False): + '''Options for the `Wavedrom` extra''' + prefer_embed_svg: bool + ''' + Use the `wavedrom` library to convert diagrams to SVGs and embed them directly. + This will only work if the `wavedrom` library has been installed. + + Defaults to `True` + ''' + + +class Wavedrom(Extra): + ''' + Support for generating Wavedrom digital timing diagrams + ''' + name = 'wavedrom' + order = (Stage.CODE_BLOCKS, FencedCodeBlocks), () + options: _WavedromExtraOpts + + def test(self, text): + match = FencedCodeBlocks.fenced_code_block_re.search(text) + return match is None or match.group(2) == 'wavedrom' + + def sub(self, match: re.Match) -> str: + # dedent the block for processing + lead_indent, waves = self.md._uniform_outdent(match.group(3)) + # default tags to wrap the wavedrom block in + open_tag, close_tag = '<script type="WaveDrom">\n', '</script>' + + # check if the user would prefer to have the SVG embedded directly + embed_svg = self.options.get('prefer_embed_svg', True) + + if embed_svg: + try: + import wavedrom + waves = wavedrom.render(waves).tostring() + open_tag, close_tag = '<div>', '\n</div>' + except ImportError: + pass + + # hash SVG to prevent <> chars being messed with + self.md._escape_table[waves] = _hash_text(waves) + + return self.md._uniform_indent( + '\n%s%s%s\n' % (open_tag, self.md._escape_table[waves], close_tag), + lead_indent, include_empty_lines=True + ) + + def run(self, text): + return FencedCodeBlocks.fenced_code_block_re.sub(self.sub, text) + + +class WikiTables(Extra): + ''' + Google Code Wiki-style tables. See + <http://code.google.com/p/support/wiki/WikiSyntax#Tables>. + ''' + name = 'wiki-tables' + order = (Tables,), () + + def run(self, text): + less_than_tab = self.md.tab_width - 1 + wiki_table_re = re.compile(r''' + (?:(?<=\n\n)|\A\n?) # leading blank line + ^([ ]{0,%d})\|\|.+?\|\|[ ]*\n # first line + (^\1\|\|.+?\|\|\n)* # any number of subsequent lines + ''' % less_than_tab, re.M | re.X) + return wiki_table_re.sub(self.sub, text) + + def sub(self, match: re.Match) -> str: + ttext = match.group(0).strip() + rows = [] + for line in ttext.splitlines(0): + line = line.strip()[2:-2].strip() + row = [c.strip() for c in re.split(r'(?<!\\)\|\|', line)] + rows.append(row) + + hlines = [] + + def add_hline(line, indents=0): + hlines.append((self.md.tab * indents) + line) + + def format_cell(text): + return self.md._run_span_gamut(re.sub(r"^\s*~", "", cell).strip(" ")) + + add_hline('<table%s>' % self.md._html_class_str_from_tag('table')) + # Check if first cell of first row is a header cell. If so, assume the whole row is a header row. + if rows and rows[0] and re.match(r"^\s*~", rows[0][0]): + add_hline('<thead%s>' % self.md._html_class_str_from_tag('thead'), 1) + add_hline('<tr>', 2) + for cell in rows[0]: + add_hline("<th>{}</th>".format(format_cell(cell)), 3) + add_hline('</tr>', 2) + add_hline('</thead>', 1) + # Only one header row allowed. + rows = rows[1:] + # If no more rows, don't create a tbody. + if rows: + add_hline('<tbody>', 1) + for row in rows: + add_hline('<tr>', 2) + for cell in row: + add_hline('<td>{}</td>'.format(format_cell(cell)), 3) + add_hline('</tr>', 2) + add_hline('</tbody>', 1) + add_hline('</table>') + return '\n'.join(hlines) + '\n' + + def test(self, text): + return '||' in text + + +# Register extras +Admonitions.register() +Breaks.register() +CodeFriendly.register() +FencedCodeBlocks.register() +Latex.register() +LinkPatterns.register() +MarkdownInHTML.register() +MiddleWordEm.register() +Mermaid.register() +Numbering.register() +PyShell.register() +SmartyPants.register() +Strike.register() +Tables.register() +TelegramSpoiler.register() +Underline.register() +Wavedrom.register() +WikiTables.register() + + +# ---------------------------------------------------------- + + +# ---- internal support functions + + +def calculate_toc_html(toc: Union[List[Tuple[int, str, str]], None]) -> Optional[str]: + """Return the HTML for the current TOC. + + This expects the `_toc` attribute to have been set on this instance. + """ + if toc is None: + return None + + def indent(): + return ' ' * (len(h_stack) - 1) + lines = [] + h_stack = [0] # stack of header-level numbers + for level, id, name in toc: + if level > h_stack[-1]: + lines.append("%s<ul>" % indent()) + h_stack.append(level) + elif level == h_stack[-1]: + lines[-1] += "</li>" + else: + while level < h_stack[-1]: + h_stack.pop() + if not lines[-1].endswith("</li>"): + lines[-1] += "</li>" + lines.append("%s</ul></li>" % indent()) + lines.append('%s<li><a href="#%s">%s</a>' % ( + indent(), id, name)) + while len(h_stack) > 1: + h_stack.pop() + if not lines[-1].endswith("</li>"): + lines[-1] += "</li>" + lines.append("%s</ul>" % indent()) + return '\n'.join(lines) + '\n' + + +class UnicodeWithAttrs(str): + """A subclass of unicode used for the return value of conversion to + possibly attach some attributes. E.g. the "toc_html" attribute when + the "toc" extra is used. + """ + metadata: Optional[Dict[str, str]] = None + toc_html: Optional[str] = None + +## {{{ http://code.activestate.com/recipes/577257/ (r1) +_slugify_strip_re = re.compile(r'[^\w\s-]') +_slugify_hyphenate_re = re.compile(r'[-\s]+') +def _slugify(value: str) -> str: + """ + Normalizes string, converts to lowercase, removes non-alpha characters, + and converts spaces to hyphens. + + From Django's "django/template/defaultfilters.py". + """ + import unicodedata + value = unicodedata.normalize('NFKD', value).encode('utf-8', 'ignore').decode() + value = _slugify_strip_re.sub('', value).strip().lower() + return _slugify_hyphenate_re.sub('-', value) +## end of http://code.activestate.com/recipes/577257/ }}} + + +# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549 +def _curry(function: Callable, *args, **kwargs) -> Callable: + def result(*rest, **kwrest): + combined = kwargs.copy() + combined.update(kwrest) + return function(*args + rest, **combined) + return result + + +# Recipe: regex_from_encoded_pattern (1.0) +def _regex_from_encoded_pattern(s: str) -> re.Pattern: + """'foo' -> re.compile(re.escape('foo')) + '/foo/' -> re.compile('foo') + '/foo/i' -> re.compile('foo', re.I) + """ + if s.startswith('/') and s.rfind('/') != 0: + # Parse it: /PATTERN/FLAGS + idx = s.rfind('/') + _, flags_str = s[1:idx], s[idx+1:] + flag_from_char = { + "i": re.IGNORECASE, + "l": re.LOCALE, + "s": re.DOTALL, + "m": re.MULTILINE, + "u": re.UNICODE, + } + flags = 0 + for char in flags_str: + try: + flags |= flag_from_char[char] + except KeyError: + raise ValueError("unsupported regex flag: '%s' in '%s' " + "(must be one of '%s')" + % (char, s, ''.join(list(flag_from_char.keys())))) + return re.compile(s[1:idx], flags) + else: # not an encoded regex + return re.compile(re.escape(s)) + + +# Recipe: dedent (0.1.2) +def _dedentlines(lines: List[str], tabsize: int = 8, skip_first_line: bool = False) -> List[str]: + """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines + + "lines" is a list of lines to dedent. + "tabsize" is the tab width to use for indent width calculations. + "skip_first_line" is a boolean indicating if the first line should + be skipped for calculating the indent width and for dedenting. + This is sometimes useful for docstrings and similar. + + Same as dedent() except operates on a sequence of lines. Note: the + lines list is modified **in-place**. + """ + DEBUG = False + if DEBUG: + print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\ + % (tabsize, skip_first_line)) + margin = None + for i, line in enumerate(lines): + if i == 0 and skip_first_line: + continue + indent = 0 + for ch in line: + if ch == ' ': + indent += 1 + elif ch == '\t': + indent += tabsize - (indent % tabsize) + elif ch in '\r\n': + continue # skip all-whitespace lines + else: + break + else: + continue # skip all-whitespace lines + if DEBUG: + print("dedent: indent=%d: %r" % (indent, line)) + if margin is None: + margin = indent + else: + margin = min(margin, indent) + if DEBUG: + print("dedent: margin=%r" % margin) + + if margin is not None and margin > 0: + for i, line in enumerate(lines): + if i == 0 and skip_first_line: + continue + removed = 0 + for j, ch in enumerate(line): + if ch == ' ': + removed += 1 + elif ch == '\t': + removed += tabsize - (removed % tabsize) + elif ch in '\r\n': + if DEBUG: + print("dedent: %r: EOL -> strip up to EOL" % line) + lines[i] = lines[i][j:] + break + else: + raise ValueError("unexpected non-whitespace char %r in " + "line %r while removing %d-space margin" + % (ch, line, margin)) + if DEBUG: + print("dedent: %r: %r -> removed %d/%d"\ + % (line, ch, removed, margin)) + if removed == margin: + lines[i] = lines[i][j+1:] + break + elif removed > margin: + lines[i] = ' '*(removed-margin) + lines[i][j+1:] + break + else: + if removed: + lines[i] = lines[i][removed:] + return lines + + +def _dedent(text: str, tabsize: int = 8, skip_first_line: bool = False) -> str: + """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text + + "text" is the text to dedent. + "tabsize" is the tab width to use for indent width calculations. + "skip_first_line" is a boolean indicating if the first line should + be skipped for calculating the indent width and for dedenting. + This is sometimes useful for docstrings and similar. + + textwrap.dedent(s), but don't expand tabs to spaces + """ + lines = text.splitlines(True) + _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line) + return ''.join(lines) + + +class _memoized(object): + """Decorator that caches a function's return value each time it is called. + If called later with the same arguments, the cached value is returned, and + not re-evaluated. + + http://wiki.python.org/moin/PythonDecoratorLibrary + """ + def __init__(self, func): + self.func = func + self.cache = {} + + def __call__(self, *args): + try: + return self.cache[args] + except KeyError: + self.cache[args] = value = self.func(*args) + return value + except TypeError: + # uncachable -- for instance, passing a list as an argument. + # Better to not cache than to blow up entirely. + return self.func(*args) + + def __repr__(self): + """Return the function's docstring.""" + return self.func.__doc__ + + +def _xml_oneliner_re_from_tab_width(tab_width: int) -> re.Pattern: + """Standalone XML processing instruction regex.""" + return re.compile(r""" + (?: + (?<=\n\n) # Starting after a blank line + | # or + \A\n? # the beginning of the doc + ) + ( # save in $1 + [ ]{0,%d} + (?: + <\?\w+\b\s+.*?\?> # XML processing instruction + | + <\w+:\w+\b\s+.*?/> # namespaced single tag + ) + [ \t]* + (?=\n{2,}|\Z) # followed by a blank line or end of document + ) + """ % (tab_width - 1), re.X) +_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width) + + +def _hr_tag_re_from_tab_width(tab_width: int) -> re.Pattern: + return re.compile(r""" + (?: + (?<=\n\n) # Starting after a blank line + | # or + \A\n? # the beginning of the doc + ) + ( # save in \1 + [ ]{0,%d} + <(hr) # start tag = \2 + \b # word break + ([^<>])*? # + /?> # the matching end tag + [ \t]* + (?=\n{2,}|\Z) # followed by a blank line or end of document + ) + """ % (tab_width - 1), re.X) +_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width) + + +def _xml_escape_attr(attr: str, skip_single_quote: bool = True) -> str: + """Escape the given string for use in an HTML/XML tag attribute. + + By default this doesn't bother with escaping `'` to `'`, presuming that + the tag attribute is surrounded by double quotes. + """ + escaped = _AMPERSAND_RE.sub('&', attr) + + escaped = (attr + .replace('"', '"') + .replace('<', '<') + .replace('>', '>')) + if not skip_single_quote: + escaped = escaped.replace("'", "'") + return escaped + + +def _xml_encode_email_char_at_random(ch: str) -> str: + r = random() + # Roughly 10% raw, 45% hex, 45% dec. + # '@' *must* be encoded. I [John Gruber] insist. + # Issue 26: '_' must be encoded. + if r > 0.9 and ch not in "@_": + return ch + elif r < 0.45: + # The [1:] is to drop leading '0': 0x63 -> x63 + return '&#%s;' % hex(ord(ch))[1:] + else: + return '&#%s;' % ord(ch) + + +def _html_escape_url( + attr: str, + safe_mode: Union[_safe_mode, bool, None] = False, + charset: Optional[str] = None +): + """ + Replace special characters that are potentially malicious in url string. + + Args: + charset: don't escape characters from this charset. Currently the only + exception is for '+' when charset=='base64' + """ + escaped = (attr + .replace('"', '"') + .replace('<', '<') + .replace('>', '>')) + if safe_mode: + if charset != 'base64': + escaped = escaped.replace('+', ' ') + escaped = escaped.replace("'", "'") + return escaped + + +# ---- mainline + +class _NoReflowFormatter(argparse.RawDescriptionHelpFormatter): + """An argparse formatter that does NOT reflow the description.""" + def format_description(self, description): + return description or "" + + +def _test(): + import doctest + doctest.testmod() + + +def main(argv=None): + if argv is None: + argv = sys.argv + if not logging.root.handlers: + logging.basicConfig() + + parser = argparse.ArgumentParser( + prog="markdown2", description=cmdln_desc, usage='%(prog)s [PATHS...]', + formatter_class=_NoReflowFormatter + ) + parser.add_argument('--version', action='version', + version='%(prog)s {version}'.format(version=__version__)) + parser.add_argument('paths', nargs='*', + help=( + 'optional list of files to convert.' + 'If none are given, stdin will be used' + )) + parser.add_argument("-v", "--verbose", dest="log_level", + action="store_const", const=logging.DEBUG, + help="more verbose output") + parser.add_argument("--encoding", + help="specify encoding of text content") + parser.add_argument("--html4tags", action="store_true", default=False, + help="use HTML 4 style for empty element tags") + parser.add_argument("-s", "--safe", metavar="MODE", dest="safe_mode", + help="sanitize literal HTML: 'escape' escapes " + "HTML meta chars, 'replace' replaces with an " + "[HTML_REMOVED] note") + parser.add_argument("-x", "--extras", action="append", + help="Turn on specific extra features (not part of " + "the core Markdown spec). See above.") + parser.add_argument("--use-file-vars", + help="Look for and use Emacs-style 'markdown-extras' " + "file var to turn on extras. See " + "<https://github.com/trentm/python-markdown2/wiki/Extras>") + parser.add_argument("--link-patterns-file", + help="path to a link pattern file") + parser.add_argument("--self-test", action="store_true", + help="run internal self-tests (some doctests)") + parser.add_argument("--compare", action="store_true", + help="run against Markdown.pl as well (for testing)") + parser.set_defaults(log_level=logging.INFO, compare=False, + encoding="utf-8", safe_mode=None, use_file_vars=False) + opts = parser.parse_args() + paths = opts.paths + log.setLevel(opts.log_level) + + if opts.self_test: + return _test() + + if opts.extras: + extras = {} + for s in opts.extras: + splitter = re.compile("[,;: ]+") + for e in splitter.split(s): + if '=' in e: + ename, earg = e.split('=', 1) + try: + earg = int(earg) + except ValueError: + pass + else: + ename, earg = e, None + extras[ename] = earg + else: + extras = None + + if opts.link_patterns_file: + link_patterns = [] + f = open(opts.link_patterns_file) + try: + for i, line in enumerate(f.readlines()): + if not line.strip(): + continue + if line.lstrip().startswith("#"): + continue + try: + pat, href = line.rstrip().rsplit(None, 1) + except ValueError: + raise MarkdownError("%s:%d: invalid link pattern line: %r" + % (opts.link_patterns_file, i+1, line)) + link_patterns.append( + (_regex_from_encoded_pattern(pat), href)) + finally: + f.close() + else: + link_patterns = None + + from os.path import abspath, dirname, exists, join + markdown_pl = join(dirname(dirname(abspath(__file__))), "test", + "Markdown.pl") + if not paths: + paths = ['-'] + for path in paths: + if path == '-': + text = sys.stdin.read() + else: + fp = codecs.open(path, 'r', opts.encoding) + text = fp.read() + fp.close() + if opts.compare: + from subprocess import PIPE, Popen + print("==== Markdown.pl ====") + p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True) + p.stdin.write(text.encode('utf-8')) + p.stdin.close() + perl_html = p.stdout.read().decode('utf-8') + sys.stdout.write(perl_html) + print("==== markdown2.py ====") + html = markdown(text, + html4tags=opts.html4tags, + safe_mode=opts.safe_mode, + extras=extras, link_patterns=link_patterns, + use_file_vars=opts.use_file_vars, + cli=True) + sys.stdout.write(html) + if extras and "toc" in extras: + log.debug("toc_html: " + + str(html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))) + if opts.compare: + test_dir = join(dirname(dirname(abspath(__file__))), "test") + if exists(join(test_dir, "test_markdown2.py")): + sys.path.insert(0, test_dir) + from test_markdown2 import norm_html_from_html + norm_html = norm_html_from_html(html) + norm_perl_html = norm_html_from_html(perl_html) + else: + norm_html = html + norm_perl_html = perl_html + print("==== match? %r ====" % (norm_perl_html == norm_html)) + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/parrot/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt b/parrot/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt new file mode 100644 index 0000000000000000000000000000000000000000..b491c70e0aef319022ded661e111ddbd45b8a17f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt @@ -0,0 +1,1568 @@ +End User License Agreement +-------------------------- + + +Preface +------- + +The Software License Agreement in Chapter 1 and the Supplement +in Chapter 2 contain license terms and conditions that govern +the use of NVIDIA software. By accepting this agreement, you +agree to comply with all the terms and conditions applicable +to the product(s) included herein. + + +NVIDIA Driver + + +Description + +This package contains the operating system driver and +fundamental system software components for NVIDIA GPUs. + + +NVIDIA CUDA Toolkit + + +Description + +The NVIDIA CUDA Toolkit provides command-line and graphical +tools for building, debugging and optimizing the performance +of applications accelerated by NVIDIA GPUs, runtime and math +libraries, and documentation including programming guides, +user manuals, and API references. + + +Default Install Location of CUDA Toolkit + +Windows platform: + +%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.# + +Linux platform: + +/usr/local/cuda-#.# + +Mac platform: + +/Developer/NVIDIA/CUDA-#.# + + +NVIDIA CUDA Samples + + +Description + +This package includes over 100+ CUDA examples that demonstrate +various CUDA programming principles, and efficient CUDA +implementation of algorithms in specific application domains. + + +Default Install Location of CUDA Samples + +Windows platform: + +%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.# + +Linux platform: + +/usr/local/cuda-#.#/samples + +and + +$HOME/NVIDIA_CUDA-#.#_Samples + +Mac platform: + +/Developer/NVIDIA/CUDA-#.#/samples + + +NVIDIA Nsight Visual Studio Edition (Windows only) + + +Description + +NVIDIA Nsight Development Platform, Visual Studio Edition is a +development environment integrated into Microsoft Visual +Studio that provides tools for debugging, profiling, analyzing +and optimizing your GPU computing and graphics applications. + + +Default Install Location of Nsight Visual Studio Edition + +Windows platform: + +%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.# + + +1. License Agreement for NVIDIA Software Development Kits +--------------------------------------------------------- + + +Release Date: July 26, 2018 +--------------------------- + + +Important NoticeRead before downloading, installing, +copying or using the licensed software: +------------------------------------------------------- + +This license agreement, including exhibits attached +("Agreement”) is a legal agreement between you and NVIDIA +Corporation ("NVIDIA") and governs your use of a NVIDIA +software development kit (“SDK”). + +Each SDK has its own set of software and materials, but here +is a description of the types of items that may be included in +a SDK: source code, header files, APIs, data sets and assets +(examples include images, textures, models, scenes, videos, +native API input/output files), binary software, sample code, +libraries, utility programs, programming code and +documentation. + +This Agreement can be accepted only by an adult of legal age +of majority in the country in which the SDK is used. + +If you are entering into this Agreement on behalf of a company +or other legal entity, you represent that you have the legal +authority to bind the entity to this Agreement, in which case +“you” will mean the entity you represent. + +If you don’t have the required age or authority to accept +this Agreement, or if you don’t accept all the terms and +conditions of this Agreement, do not download, install or use +the SDK. + +You agree to use the SDK only for purposes that are permitted +by (a) this Agreement, and (b) any applicable law, regulation +or generally accepted practices or guidelines in the relevant +jurisdictions. + + +1.1. License + + +1.1.1. License Grant + +Subject to the terms of this Agreement, NVIDIA hereby grants +you a non-exclusive, non-transferable license, without the +right to sublicense (except as expressly provided in this +Agreement) to: + + 1. Install and use the SDK, + + 2. Modify and create derivative works of sample source code + delivered in the SDK, and + + 3. Distribute those portions of the SDK that are identified + in this Agreement as distributable, as incorporated in + object code format into a software application that meets + the distribution requirements indicated in this Agreement. + + +1.1.2. Distribution Requirements + +These are the distribution requirements for you to exercise +the distribution grant: + + 1. Your application must have material additional + functionality, beyond the included portions of the SDK. + + 2. The distributable portions of the SDK shall only be + accessed by your application. + + 3. The following notice shall be included in modifications + and derivative works of sample source code distributed: + “This software contains source code provided by NVIDIA + Corporation.” + + 4. Unless a developer tool is identified in this Agreement + as distributable, it is delivered for your internal use + only. + + 5. The terms under which you distribute your application + must be consistent with the terms of this Agreement, + including (without limitation) terms relating to the + license grant and license restrictions and protection of + NVIDIA’s intellectual property rights. Additionally, you + agree that you will protect the privacy, security and + legal rights of your application users. + + 6. You agree to notify NVIDIA in writing of any known or + suspected distribution or use of the SDK not in compliance + with the requirements of this Agreement, and to enforce + the terms of your agreements with respect to distributed + SDK. + + +1.1.3. Authorized Users + +You may allow employees and contractors of your entity or of +your subsidiary(ies) to access and use the SDK from your +secure network to perform work on your behalf. + +If you are an academic institution you may allow users +enrolled or employed by the academic institution to access and +use the SDK from your secure network. + +You are responsible for the compliance with the terms of this +Agreement by your authorized users. If you become aware that +your authorized users didn’t follow the terms of this +Agreement, you agree to take reasonable steps to resolve the +non-compliance and prevent new occurrences. + + +1.1.4. Pre-Release SDK + +The SDK versions identified as alpha, beta, preview or +otherwise as pre-release, may not be fully functional, may +contain errors or design flaws, and may have reduced or +different security, privacy, accessibility, availability, and +reliability standards relative to commercial versions of +NVIDIA software and materials. Use of a pre-release SDK may +result in unexpected results, loss of data, project delays or +other unpredictable damage or loss. + +You may use a pre-release SDK at your own risk, understanding +that pre-release SDKs are not intended for use in production +or business-critical systems. + +NVIDIA may choose not to make available a commercial version +of any pre-release SDK. NVIDIA may also choose to abandon +development and terminate the availability of a pre-release +SDK at any time without liability. + + +1.1.5. Updates + +NVIDIA may, at its option, make available patches, workarounds +or other updates to this SDK. Unless the updates are provided +with their separate governing terms, they are deemed part of +the SDK licensed to you as provided in this Agreement. You +agree that the form and content of the SDK that NVIDIA +provides may change without prior notice to you. While NVIDIA +generally maintains compatibility between versions, NVIDIA may +in some cases make changes that introduce incompatibilities in +future versions of the SDK. + + +1.1.6. Third Party Licenses + +The SDK may come bundled with, or otherwise include or be +distributed with, third party software licensed by a NVIDIA +supplier and/or open source software provided under an open +source license. Use of third party software is subject to the +third-party license terms, or in the absence of third party +terms, the terms of this Agreement. Copyright to third party +software is held by the copyright holders indicated in the +third-party software or license. + + +1.1.7. Reservation of Rights + +NVIDIA reserves all rights, title, and interest in and to the +SDK, not expressly granted to you under this Agreement. + + +1.2. Limitations + +The following license limitations apply to your use of the +SDK: + + 1. You may not reverse engineer, decompile or disassemble, + or remove copyright or other proprietary notices from any + portion of the SDK or copies of the SDK. + + 2. Except as expressly provided in this Agreement, you may + not copy, sell, rent, sublicense, transfer, distribute, + modify, or create derivative works of any portion of the + SDK. For clarity, you may not distribute or sublicense the + SDK as a stand-alone product. + + 3. Unless you have an agreement with NVIDIA for this + purpose, you may not indicate that an application created + with the SDK is sponsored or endorsed by NVIDIA. + + 4. You may not bypass, disable, or circumvent any + encryption, security, digital rights management or + authentication mechanism in the SDK. + + 5. You may not use the SDK in any manner that would cause it + to become subject to an open source software license. As + examples, licenses that require as a condition of use, + modification, and/or distribution that the SDK be: + + a. Disclosed or distributed in source code form; + + b. Licensed for the purpose of making derivative works; + or + + c. Redistributable at no charge. + + 6. Unless you have an agreement with NVIDIA for this + purpose, you may not use the SDK with any system or + application where the use or failure of the system or + application can reasonably be expected to threaten or + result in personal injury, death, or catastrophic loss. + Examples include use in avionics, navigation, military, + medical, life support or other life critical applications. + NVIDIA does not design, test or manufacture the SDK for + these critical uses and NVIDIA shall not be liable to you + or any third party, in whole or in part, for any claims or + damages arising from such uses. + + 7. You agree to defend, indemnify and hold harmless NVIDIA + and its affiliates, and their respective employees, + contractors, agents, officers and directors, from and + against any and all claims, damages, obligations, losses, + liabilities, costs or debt, fines, restitutions and + expenses (including but not limited to attorney’s fees + and costs incident to establishing the right of + indemnification) arising out of or related to your use of + the SDK outside of the scope of this Agreement, or not in + compliance with its terms. + + +1.3. Ownership + + 1. NVIDIA or its licensors hold all rights, title and + interest in and to the SDK and its modifications and + derivative works, including their respective intellectual + property rights, subject to your rights described in this + section. This SDK may include software and materials from + NVIDIA’s licensors, and these licensors are intended + third party beneficiaries that may enforce this Agreement + with respect to their intellectual property rights. + + 2. You hold all rights, title and interest in and to your + applications and your derivative works of the sample + source code delivered in the SDK, including their + respective intellectual property rights, subject to + NVIDIA’s rights described in this section. + + 3. You may, but don’t have to, provide to NVIDIA + suggestions, feature requests or other feedback regarding + the SDK, including possible enhancements or modifications + to the SDK. For any feedback that you voluntarily provide, + you hereby grant NVIDIA and its affiliates a perpetual, + non-exclusive, worldwide, irrevocable license to use, + reproduce, modify, license, sublicense (through multiple + tiers of sublicensees), and distribute (through multiple + tiers of distributors) it without the payment of any + royalties or fees to you. NVIDIA will use feedback at its + choice. NVIDIA is constantly looking for ways to improve + its products, so you may send feedback to NVIDIA through + the developer portal at https://developer.nvidia.com. + + +1.4. No Warranties + +THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL +FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND +ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND +OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, +BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE +ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO +WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF +DEALING OR COURSE OF TRADE. + + +1.5. Limitation of Liability + +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS +AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, +PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS +OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF +PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION +WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK, +WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH +OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), +PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF +LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES +TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS +AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE +NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS +LIMIT. + +These exclusions and limitations of liability shall apply +regardless if NVIDIA or its affiliates have been advised of +the possibility of such damages, and regardless of whether a +remedy fails its essential purpose. These exclusions and +limitations of liability form an essential basis of the +bargain between the parties, and, absent any of these +exclusions or limitations of liability, the provisions of this +Agreement, including, without limitation, the economic terms, +would be substantially different. + + +1.6. Termination + + 1. This Agreement will continue to apply until terminated by + either you or NVIDIA as described below. + + 2. If you want to terminate this Agreement, you may do so by + stopping to use the SDK. + + 3. NVIDIA may, at any time, terminate this Agreement if: + + a. (i) you fail to comply with any term of this + Agreement and the non-compliance is not fixed within + thirty (30) days following notice from NVIDIA (or + immediately if you violate NVIDIA’s intellectual + property rights); + + b. (ii) you commence or participate in any legal + proceeding against NVIDIA with respect to the SDK; or + + c. (iii) NVIDIA decides to no longer provide the SDK in + a country or, in NVIDIA’s sole discretion, the + continued use of it is no longer commercially viable. + + 4. Upon any termination of this Agreement, you agree to + promptly discontinue use of the SDK and destroy all copies + in your possession or control. Your prior distributions in + accordance with this Agreement are not affected by the + termination of this Agreement. Upon written request, you + will certify in writing that you have complied with your + commitments under this section. Upon any termination of + this Agreement all provisions survive except for the + license grant provisions. + + +1.7. General + +If you wish to assign this Agreement or your rights and +obligations, including by merger, consolidation, dissolution +or operation of law, contact NVIDIA to ask for permission. Any +attempted assignment not approved by NVIDIA in writing shall +be void and of no effect. NVIDIA may assign, delegate or +transfer this Agreement and its rights and obligations, and if +to a non-affiliate you will be notified. + +You agree to cooperate with NVIDIA and provide reasonably +requested information to verify your compliance with this +Agreement. + +This Agreement will be governed in all respects by the laws of +the United States and of the State of Delaware as those laws +are applied to contracts entered into and performed entirely +within Delaware by Delaware residents, without regard to the +conflicts of laws principles. The United Nations Convention on +Contracts for the International Sale of Goods is specifically +disclaimed. You agree to all terms of this Agreement in the +English language. + +The state or federal courts residing in Santa Clara County, +California shall have exclusive jurisdiction over any dispute +or claim arising out of this Agreement. Notwithstanding this, +you agree that NVIDIA shall still be allowed to apply for +injunctive remedies or an equivalent type of urgent legal +relief in any jurisdiction. + +If any court of competent jurisdiction determines that any +provision of this Agreement is illegal, invalid or +unenforceable, such provision will be construed as limited to +the extent necessary to be consistent with and fully +enforceable under the law and the remaining provisions will +remain in full force and effect. Unless otherwise specified, +remedies are cumulative. + +Each party acknowledges and agrees that the other is an +independent contractor in the performance of this Agreement. + +The SDK has been developed entirely at private expense and is +“commercial items” consisting of “commercial computer +software” and “commercial computer software +documentation” provided with RESTRICTED RIGHTS. Use, +duplication or disclosure by the U.S. Government or a U.S. +Government subcontractor is subject to the restrictions in +this Agreement pursuant to DFARS 227.7202-3(a) or as set forth +in subparagraphs (c)(1) and (2) of the Commercial Computer +Software - Restricted Rights clause at FAR 52.227-19, as +applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas +Expressway, Santa Clara, CA 95051. + +The SDK is subject to United States export laws and +regulations. You agree that you will not ship, transfer or +export the SDK into any country, or use the SDK in any manner, +prohibited by the United States Bureau of Industry and +Security or economic sanctions regulations administered by the +U.S. Department of Treasury’s Office of Foreign Assets +Control (OFAC), or any applicable export laws, restrictions or +regulations. These laws include restrictions on destinations, +end users and end use. By accepting this Agreement, you +confirm that you are not a resident or citizen of any country +currently embargoed by the U.S. and that you are not otherwise +prohibited from receiving the SDK. + +Any notice delivered by NVIDIA to you under this Agreement +will be delivered via mail, email or fax. You agree that any +notices that NVIDIA sends you electronically will satisfy any +legal communication requirements. Please direct your legal +notices or other correspondence to NVIDIA Corporation, 2788 +San Tomas Expressway, Santa Clara, California 95051, United +States of America, Attention: Legal Department. + +This Agreement and any exhibits incorporated into this +Agreement constitute the entire agreement of the parties with +respect to the subject matter of this Agreement and supersede +all prior negotiations or documentation exchanged between the +parties relating to this SDK license. Any additional and/or +conflicting terms on documents issued by you are null, void, +and invalid. Any amendment or waiver under this Agreement +shall be in writing and signed by representatives of both +parties. + + +2. CUDA Toolkit Supplement to Software License Agreement for +NVIDIA Software Development Kits +------------------------------------------------------------ + + +Release date: August 16, 2018 +----------------------------- + +The terms in this supplement govern your use of the NVIDIA +CUDA Toolkit SDK under the terms of your license agreement +(“Agreement”) as modified by this supplement. Capitalized +terms used but not defined below have the meaning assigned to +them in the Agreement. + +This supplement is an exhibit to the Agreement and is +incorporated as an integral part of the Agreement. In the +event of conflict between the terms in this supplement and the +terms in the Agreement, the terms in this supplement govern. + + +2.1. License Scope + +The SDK is licensed for you to develop applications only for +use in systems with NVIDIA GPUs. + + +2.2. Distribution + +The portions of the SDK that are distributable under the +Agreement are listed in Attachment A. + + +2.3. Operating Systems + +Those portions of the SDK designed exclusively for use on the +Linux or FreeBSD operating systems, or other operating systems +derived from the source code to these operating systems, may +be copied and redistributed for use in accordance with this +Agreement, provided that the object code files are not +modified in any way (except for unzipping of compressed +files). + + +2.4. Audio and Video Encoders and Decoders + +You acknowledge and agree that it is your sole responsibility +to obtain any additional third-party licenses required to +make, have made, use, have used, sell, import, and offer for +sale your products or services that include or incorporate any +third-party software and content relating to audio and/or +video encoders and decoders from, including but not limited +to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A., +MPEG-LA, and Coding Technologies. NVIDIA does not grant to you +under this Agreement any necessary patent or other rights with +respect to any audio and/or video encoders and decoders. + + +2.5. Licensing + +If the distribution terms in this Agreement are not suitable +for your organization, or for any questions regarding this +Agreement, please contact NVIDIA at +nvidia-compute-license-questions@nvidia.com. + + +2.6. Attachment A + +The following portions of the SDK are distributable under the +Agreement: + +Component + +CUDA Runtime + +Windows + +cudart.dll, cudart_static.lib, cudadevrt.lib + +Mac OSX + +libcudart.dylib, libcudart_static.a, libcudadevrt.a + +Linux + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Android + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Component + +CUDA FFT Library + +Windows + +cufft.dll, cufftw.dll, cufft.lib, cufftw.lib + +Mac OSX + +libcufft.dylib, libcufft_static.a, libcufftw.dylib, +libcufftw_static.a + +Linux + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Android + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Component + +CUDA BLAS Library + +Windows + +cublas.dll, cublasLt.dll + +Mac OSX + +libcublas.dylib, libcublasLt.dylib, libcublas_static.a, +libcublasLt_static.a + +Linux + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Android + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Component + +NVIDIA "Drop-in" BLAS Library + +Windows + +nvblas.dll + +Mac OSX + +libnvblas.dylib + +Linux + +libnvblas.so + +Component + +CUDA Sparse Matrix Library + +Windows + +cusparse.dll, cusparse.lib + +Mac OSX + +libcusparse.dylib, libcusparse_static.a + +Linux + +libcusparse.so, libcusparse_static.a + +Android + +libcusparse.so, libcusparse_static.a + +Component + +CUDA Linear Solver Library + +Windows + +cusolver.dll, cusolver.lib + +Mac OSX + +libcusolver.dylib, libcusolver_static.a + +Linux + +libcusolver.so, libcusolver_static.a + +Android + +libcusolver.so, libcusolver_static.a + +Component + +CUDA Random Number Generation Library + +Windows + +curand.dll, curand.lib + +Mac OSX + +libcurand.dylib, libcurand_static.a + +Linux + +libcurand.so, libcurand_static.a + +Android + +libcurand.so, libcurand_static.a + +Component + +CUDA Accelerated Graph Library + +Component + +NVIDIA Performance Primitives Library + +Windows + +nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll, +nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll, +nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib, +nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll, +nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib + +Mac OSX + +libnppc.dylib, libnppc_static.a, libnppial.dylib, +libnppial_static.a, libnppicc.dylib, libnppicc_static.a, +libnppicom.dylib, libnppicom_static.a, libnppidei.dylib, +libnppidei_static.a, libnppif.dylib, libnppif_static.a, +libnppig.dylib, libnppig_static.a, libnppim.dylib, +libnppisu_static.a, libnppitc.dylib, libnppitc_static.a, +libnpps.dylib, libnpps_static.a + +Linux + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Android + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Component + +NVIDIA JPEG Library + +Linux + +libnvjpeg.so, libnvjpeg_static.a + +Component + +Internal common library required for statically linking to +cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP + +Mac OSX + +libculibos.a + +Linux + +libculibos.a + +Component + +NVIDIA Runtime Compilation Library and Header + +All + +nvrtc.h + +Windows + +nvrtc.dll, nvrtc-builtins.dll + +Mac OSX + +libnvrtc.dylib, libnvrtc-builtins.dylib + +Linux + +libnvrtc.so, libnvrtc-builtins.so + +Component + +NVIDIA Optimizing Compiler Library + +Windows + +nvvm.dll + +Mac OSX + +libnvvm.dylib + +Linux + +libnvvm.so + +Component + +NVIDIA Common Device Math Functions Library + +Windows + +libdevice.10.bc + +Mac OSX + +libdevice.10.bc + +Linux + +libdevice.10.bc + +Component + +CUDA Occupancy Calculation Header Library + +All + +cuda_occupancy.h + +Component + +CUDA Half Precision Headers + +All + +cuda_fp16.h, cuda_fp16.hpp + +Component + +CUDA Profiling Tools Interface (CUPTI) Library + +Windows + +cupti.dll + +Mac OSX + +libcupti.dylib + +Linux + +libcupti.so + +Component + +NVIDIA Tools Extension Library + +Windows + +nvToolsExt.dll, nvToolsExt.lib + +Mac OSX + +libnvToolsExt.dylib + +Linux + +libnvToolsExt.so + +Component + +NVIDIA CUDA Driver Libraries + +Linux + +libcuda.so, libnvidia-fatbinaryloader.so, +libnvidia-ptxjitcompiler.so + +The NVIDIA CUDA Driver Libraries are only distributable in +applications that meet this criteria: + + 1. The application was developed starting from a NVIDIA CUDA + container obtained from Docker Hub or the NVIDIA GPU + Cloud, and + + 2. The resulting application is packaged as a Docker + container and distributed to users on Docker Hub or the + NVIDIA GPU Cloud only. + + +2.7. Attachment B + + +Additional Licensing Obligations + +The following third party components included in the SOFTWARE +are licensed to Licensee pursuant to the following terms and +conditions: + + 1. Licensee's use of the GDB third party component is + subject to the terms and conditions of GNU GPL v3: + + This product includes copyrighted third-party software licensed + under the terms of the GNU General Public License v3 ("GPL v3"). + All third-party software packages are copyright by their respective + authors. GPL v3 terms and conditions are hereby incorporated into + the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt + + Consistent with these licensing requirements, the software + listed below is provided under the terms of the specified + open source software licenses. To obtain source code for + software provided under licenses that require + redistribution of source code, including the GNU General + Public License (GPL) and GNU Lesser General Public License + (LGPL), contact oss-requests@nvidia.com. This offer is + valid for a period of three (3) years from the date of the + distribution of this product by NVIDIA CORPORATION. + + Component License + CUDA-GDB GPL v3 + + 2. Licensee represents and warrants that any and all third + party licensing and/or royalty payment obligations in + connection with Licensee's use of the H.264 video codecs + are solely the responsibility of Licensee. + + 3. Licensee's use of the Thrust library is subject to the + terms and conditions of the Apache License Version 2.0. + All third-party software packages are copyright by their + respective authors. Apache License Version 2.0 terms and + conditions are hereby incorporated into the Agreement by + this reference. + http://www.apache.org/licenses/LICENSE-2.0.html + + In addition, Licensee acknowledges the following notice: + Thrust includes source code from the Boost Iterator, + Tuple, System, and Random Number libraries. + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 4. Licensee's use of the LLVM third party component is + subject to the following terms and conditions: + + ====================================================== + LLVM Release License + ====================================================== + University of Illinois/NCSA + Open Source License + + Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign. + All rights reserved. + + Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal with the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at Urbana- + Champaign, nor the names of its contributors may be used to endorse or + promote products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS WITH THE SOFTWARE. + + 5. Licensee's use (e.g. nvprof) of the PCRE third party + component is subject to the following terms and + conditions: + + ------------ + PCRE LICENCE + ------------ + PCRE is a library of functions to support regular expressions whose syntax + and semantics are as close as possible to those of the Perl 5 language. + Release 8 of PCRE is distributed under the terms of the "BSD" licence, as + specified below. The documentation for PCRE, supplied in the "doc" + directory, is distributed under the same terms as the software itself. The + basic library functions are written in C and are freestanding. Also + included in the distribution is a set of C++ wrapper functions, and a just- + in-time compiler that can be used to optimize pattern matching. These are + both optional features that can be omitted when the library is built. + + THE BASIC LIBRARY FUNCTIONS + --------------------------- + Written by: Philip Hazel + Email local part: ph10 + Email domain: cam.ac.uk + University of Cambridge Computing Service, + Cambridge, England. + Copyright (c) 1997-2012 University of Cambridge + All rights reserved. + + PCRE JUST-IN-TIME COMPILATION SUPPORT + ------------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2010-2012 Zoltan Herczeg + All rights reserved. + + STACK-LESS JUST-IN-TIME COMPILER + -------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2009-2012 Zoltan Herczeg + All rights reserved. + + THE C++ WRAPPER FUNCTIONS + ------------------------- + Contributed by: Google Inc. + Copyright (c) 2007-2012, Google Inc. + All rights reserved. + + THE "BSD" LICENCE + ----------------- + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the University of Cambridge nor the name of Google + Inc. nor the names of their contributors may be used to endorse or + promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 6. Some of the cuBLAS library routines were written by or + derived from code written by Vasily Volkov and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2007-2009, Regents of the University of California + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the University of California, Berkeley nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 7. Some of the cuBLAS library routines were written by or + derived from code written by Davide Barbieri and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 8. Some of the cuBLAS library routines were derived from + code developed by the University of Tennessee and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2010 The University of Tennessee. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer listed in this license in the documentation and/or + other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 9. Some of the cuBLAS library routines were written by or + derived from code written by Jonathan Hogg and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2012, The Science and Technology Facilities Council (STFC). + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the STFC nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 10. Some of the cuBLAS library routines were written by or + derived from code written by Ahmad M. Abdelfattah, David + Keyes, and Hatem Ltaief, and are subject to the Apache + License, Version 2.0, as follows: + + -- (C) Copyright 2013 King Abdullah University of Science and Technology + Authors: + Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa) + David Keyes (david.keyes@kaust.edu.sa) + Hatem Ltaief (hatem.ltaief@kaust.edu.sa) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the King Abdullah University of Science and + Technology nor the names of its contributors may be used to endorse + or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE + + 11. Some of the cuSPARSE library routines were written by or + derived from code written by Li-Wen Chang and are subject + to the NCSA Open Source License as follows: + + Copyright (c) 2012, University of Illinois. + + All rights reserved. + + Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal with the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimers in the documentation and/or other materials provided + with the distribution. + * Neither the names of IMPACT Group, University of Illinois, nor + the names of its contributors may be used to endorse or promote + products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. + + 12. Some of the cuRAND library routines were written by or + derived from code written by Mutsuo Saito and Makoto + Matsumoto and are subject to the following license: + + Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + University. All rights reserved. + + Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + University and University of Tokyo. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the Hiroshima University nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 13. Some of the cuRAND library routines were derived from + code developed by D. E. Shaw Research and are subject to + the following license: + + Copyright 2010-2011, D. E. Shaw Research. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions, and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of D. E. Shaw Research nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 14. Some of the Math library routines were written by or + derived from code developed by Norbert Juffa and are + subject to the following license: + + Copyright (c) 2015-2017, Norbert Juffa + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 15. Licensee's use of the lz4 third party component is + subject to the following terms and conditions: + + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 16. The NPP library uses code from the Boost Math Toolkit, + and is subject to the following license: + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 17. Portions of the Nsight Eclipse Edition is subject to the + following license: + + The Eclipse Foundation makes available all content in this plug-in + ("Content"). Unless otherwise indicated below, the Content is provided + to you under the terms and conditions of the Eclipse Public License + Version 1.0 ("EPL"). A copy of the EPL is available at http:// + www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program" + will mean the Content. + + If you did not receive this Content directly from the Eclipse + Foundation, the Content is being redistributed by another party + ("Redistributor") and different terms and conditions may apply to your + use of any object code in the Content. Check the Redistributor's + license that was provided with the Content. If no such license exists, + contact the Redistributor. Unless otherwise indicated below, the terms + and conditions of the EPL still apply to any source code in the + Content and such source code may be obtained at http://www.eclipse.org. + + 18. Some of the cuBLAS library routines uses code from + OpenAI, which is subject to the following license: + + License URL + https://github.com/openai/openai-gemm/blob/master/LICENSE + + License Text + The MIT License + + Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + 19. Licensee's use of the Visual Studio Setup Configuration + Samples is subject to the following license: + + The MIT License (MIT) + Copyright (C) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of the Software, + and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + 20. Licensee's use of linmath.h header for CPU functions for + GL vector/matrix operations from lunarG is subject to the + Apache License Version 2.0. + + 21. The DX12-CUDA sample uses the d3dx12.h header, which is + subject to the MIT license . + +----------------- diff --git a/parrot/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD b/parrot/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..7bdd0969540e15ef13d7062911ef1f804e902f44 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD @@ -0,0 +1,18 @@ +nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusparse/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusparse/include/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/include/cusparse.h,sha256=yhV9iTcEW9XEyhaJmX4iddh_cMb8sfNAy6qva5ae4qw,287290 +nvidia/cusparse/include/cusparse_v2.h,sha256=jkH2A9hYc-TEF0vuQ_SurbhPNEHkYGUIRuxKXhFAqnw,2587 +nvidia/cusparse/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusparse/lib/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/lib/libcusparse.so.12,sha256=UARmovVZ3mIqcbuSDT0pI-aRNSRXR6J0LuE-3_C6YIU,264876688 +nvidia_cusparse_cu12-12.1.0.106.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +nvidia_cusparse_cu12-12.1.0.106.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262 +nvidia_cusparse_cu12-12.1.0.106.dist-info/METADATA,sha256=XpBtE4L1lFCx7gDu7Klx9dijNWQW26PS3fcOGjNIsXg,1550 +nvidia_cusparse_cu12-12.1.0.106.dist-info/RECORD,, +nvidia_cusparse_cu12-12.1.0.106.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia_cusparse_cu12-12.1.0.106.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106 +nvidia_cusparse_cu12-12.1.0.106.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7 diff --git a/parrot/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/WHEEL b/parrot/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..06e355fe0e3ed7077903f119ae6928a17da8eb6f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-manylinux1_x86_64 + diff --git a/parrot/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/top_level.txt b/parrot/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.1.0.106.dist-info/top_level.txt @@ -0,0 +1 @@ +nvidia diff --git a/parrot/lib/python3.10/site-packages/pylab.py b/parrot/lib/python3.10/site-packages/pylab.py new file mode 100644 index 0000000000000000000000000000000000000000..64ece55b1cb2c312d8a9c2ac1fd264fa11727270 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pylab.py @@ -0,0 +1,3 @@ +from matplotlib.pylab import * # noqa: F401, F403 +import matplotlib.pylab +__doc__ = matplotlib.pylab.__doc__ diff --git a/parrot/lib/python3.10/site-packages/pynvml.py b/parrot/lib/python3.10/site-packages/pynvml.py new file mode 100644 index 0000000000000000000000000000000000000000..1aaea2259aeedb04ee9e654ee8daa132057f3847 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pynvml.py @@ -0,0 +1,6135 @@ +##### +# Copyright (c) 2011-2023, NVIDIA Corporation. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the NVIDIA Corporation nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. +##### + +## +# Python bindings for the NVML library +## +from ctypes import * +from ctypes.util import find_library +from functools import wraps +import sys +import os +import threading +import string + +## C Type mappings ## +## Enums +_nvmlEnableState_t = c_uint +NVML_FEATURE_DISABLED = 0 +NVML_FEATURE_ENABLED = 1 + +_nvmlBrandType_t = c_uint +NVML_BRAND_UNKNOWN = 0 +NVML_BRAND_QUADRO = 1 +NVML_BRAND_TESLA = 2 +NVML_BRAND_NVS = 3 +NVML_BRAND_GRID = 4 # Deprecated from API reporting. Keeping definition for backward compatibility. +NVML_BRAND_GEFORCE = 5 +NVML_BRAND_TITAN = 6 +NVML_BRAND_NVIDIA_VAPPS = 7 # NVIDIA Virtual Applications +NVML_BRAND_NVIDIA_VPC = 8 # NVIDIA Virtual PC +NVML_BRAND_NVIDIA_VCS = 9 # NVIDIA Virtual Compute Server +NVML_BRAND_NVIDIA_VWS = 10 # NVIDIA RTX Virtual Workstation +NVML_BRAND_NVIDIA_CLOUD_GAMING = 11 # NVIDIA Cloud Gaming +NVML_BRAND_NVIDIA_VGAMING = NVML_BRAND_NVIDIA_CLOUD_GAMING # Deprecated from API reporting. Keeping definition for backward compatibility. +NVML_BRAND_QUADRO_RTX = 12 +NVML_BRAND_NVIDIA_RTX = 13 +NVML_BRAND_NVIDIA = 14 +NVML_BRAND_GEFORCE_RTX = 15 # Unused +NVML_BRAND_TITAN_RTX = 16 # Unused +NVML_BRAND_COUNT = 17 + +_nvmlTemperatureThresholds_t = c_uint +NVML_TEMPERATURE_THRESHOLD_SHUTDOWN = 0 +NVML_TEMPERATURE_THRESHOLD_SLOWDOWN = 1 +NVML_TEMPERATURE_THRESHOLD_MEM_MAX = 2 +NVML_TEMPERATURE_THRESHOLD_GPU_MAX = 3 +NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MIN = 4 +NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_CURR = 5 +NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MAX = 6 +NVML_TEMPERATURE_THRESHOLD_GPS_CURR = 7 +NVML_TEMPERATURE_THRESHOLD_COUNT = 8 + +_nvmlTemperatureSensors_t = c_uint +NVML_TEMPERATURE_GPU = 0 +NVML_TEMPERATURE_COUNT = 1 + + +_nvmlComputeMode_t = c_uint +NVML_COMPUTEMODE_DEFAULT = 0 +NVML_COMPUTEMODE_EXCLUSIVE_THREAD = 1 ## Support Removed +NVML_COMPUTEMODE_PROHIBITED = 2 +NVML_COMPUTEMODE_EXCLUSIVE_PROCESS = 3 +NVML_COMPUTEMODE_COUNT = 4 + +_nvmlMemoryLocation_t = c_uint +NVML_MEMORY_LOCATION_L1_CACHE = 0 +NVML_MEMORY_LOCATION_L2_CACHE = 1 +NVML_MEMORY_LOCATION_DEVICE_MEMORY = 2 +NVML_MEMORY_LOCATION_DRAM = 2 +NVML_MEMORY_LOCATION_REGISTER_FILE = 3 +NVML_MEMORY_LOCATION_TEXTURE_MEMORY = 4 +NVML_MEMORY_LOCATION_TEXTURE_SHM = 5 +NVML_MEMORY_LOCATION_CBU = 6 +NVML_MEMORY_LOCATION_SRAM = 7 +NVML_MEMORY_LOCATION_COUNT = 8 + +NVML_NVLINK_MAX_LINKS = 18 + +# For backwards compatibility, maintain the incorrectly-named "LANES" define +NVML_NVLINK_MAX_LANES = NVML_NVLINK_MAX_LINKS + +_nvmlNvLinkErrorCounter_t = c_uint +NVML_NVLINK_ERROR_DL_REPLAY = 0 +NVML_NVLINK_ERROR_DL_RECOVERY = 1 +NVML_NVLINK_ERROR_DL_CRC_FLIT = 2 +NVML_NVLINK_ERROR_DL_CRC_DATA = 3 +NVML_NVLINK_ERROR_DL_ECC_DATA = 4 +NVML_NVLINK_ERROR_COUNT = 5 + +_nvmlNvLinkEccLaneErrorCounter_t = c_uint +NVML_NVLINK_ERROR_DL_ECC_LANE0 = 0 +NVML_NVLINK_ERROR_DL_ECC_LANE1 = 1 +NVML_NVLINK_ERROR_DL_ECC_LANE2 = 2 +NVML_NVLINK_ERROR_DL_ECC_LANE3 = 3 +NVML_NVLINK_ERROR_DL_ECC_COUNT = 5 + +_nvmlNvLinkCapability_t = c_uint +NVML_NVLINK_CAP_P2P_SUPPORTED = 0 +NVML_NVLINK_CAP_SYSMEM_ACCESS = 1 +NVML_NVLINK_CAP_P2P_ATOMICS = 2 +NVML_NVLINK_CAP_SYSMEM_ATOMICS= 3 +NVML_NVLINK_CAP_SLI_BRIDGE = 4 +NVML_NVLINK_CAP_VALID = 5 +NVML_NVLINK_CAP_COUNT = 6 + +_nvmlNvLinkUtilizationCountPktTypes_t = c_uint +NVML_NVLINK_COUNTER_PKTFILTER_NOP = 0x1 +NVML_NVLINK_COUNTER_PKTFILTER_READ = 0x2 +NVML_NVLINK_COUNTER_PKTFILTER_WRITE = 0x4 +NVML_NVLINK_COUNTER_PKTFILTER_RATOM = 0x8 +NVML_NVLINK_COUNTER_PKTFILTER_NRATOM = 0x10 +NVML_NVLINK_COUNTER_PKTFILTER_FLUSH = 0x20 +NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA = 0x40 +NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA = 0x80 +NVML_NVLINK_COUNTER_PKTFILTER_ALL = 0xFF + +_nvmlNvLinkUtilizationCountUnits_t = c_uint +NVML_NVLINK_COUNTER_UNIT_CYCLES = 0 +NVML_NVLINK_COUNTER_UNIT_PACKETS = 1 +NVML_NVLINK_COUNTER_UNIT_BYTES = 2 +NVML_NVLINK_COUNTER_UNIT_RESERVED = 3 +NVML_NVLINK_COUNTER_UNIT_COUNT = 4 + +_nvmlNvLinkDeviceType_t = c_uint +NVML_NVLINK_DEVICE_TYPE_GPU = 0x00 +NVML_NVLINK_DEVICE_TYPE_IBMNPU = 0x01 +NVML_NVLINK_DEVICE_TYPE_SWITCH = 0x02 +NVML_NVLINK_DEVICE_TYPE_UNKNOWN = 0xFF + +# These are deprecated, instead use _nvmlMemoryErrorType_t +_nvmlEccBitType_t = c_uint +NVML_SINGLE_BIT_ECC = 0 +NVML_DOUBLE_BIT_ECC = 1 +NVML_ECC_ERROR_TYPE_COUNT = 2 + +_nvmlEccCounterType_t = c_uint +NVML_VOLATILE_ECC = 0 +NVML_AGGREGATE_ECC = 1 +NVML_ECC_COUNTER_TYPE_COUNT = 2 + +_nvmlMemoryErrorType_t = c_uint +NVML_MEMORY_ERROR_TYPE_CORRECTED = 0 +NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1 +NVML_MEMORY_ERROR_TYPE_COUNT = 2 + +_nvmlClockType_t = c_uint +NVML_CLOCK_GRAPHICS = 0 +NVML_CLOCK_SM = 1 +NVML_CLOCK_MEM = 2 +NVML_CLOCK_VIDEO = 3 +NVML_CLOCK_COUNT = 4 + +_nvmlClockId_t = c_uint +NVML_CLOCK_ID_CURRENT = 0 +NVML_CLOCK_ID_APP_CLOCK_TARGET = 1 +NVML_CLOCK_ID_APP_CLOCK_DEFAULT = 2 +NVML_CLOCK_ID_CUSTOMER_BOOST_MAX = 3 +NVML_CLOCK_ID_COUNT = 4 + +_nvmlDriverModel_t = c_uint +NVML_DRIVER_WDDM = 0 +NVML_DRIVER_WDM = 1 +NVML_DRIVER_MCDM = 2 + +NVML_MAX_GPU_PERF_PSTATES = 16 + +_nvmlPstates_t = c_uint +NVML_PSTATE_0 = 0 +NVML_PSTATE_1 = 1 +NVML_PSTATE_2 = 2 +NVML_PSTATE_3 = 3 +NVML_PSTATE_4 = 4 +NVML_PSTATE_5 = 5 +NVML_PSTATE_6 = 6 +NVML_PSTATE_7 = 7 +NVML_PSTATE_8 = 8 +NVML_PSTATE_9 = 9 +NVML_PSTATE_10 = 10 +NVML_PSTATE_11 = 11 +NVML_PSTATE_12 = 12 +NVML_PSTATE_13 = 13 +NVML_PSTATE_14 = 14 +NVML_PSTATE_15 = 15 +NVML_PSTATE_UNKNOWN = 32 + +_nvmlInforomObject_t = c_uint +NVML_INFOROM_OEM = 0 +NVML_INFOROM_ECC = 1 +NVML_INFOROM_POWER = 2 +NVML_INFOROM_DEN = 3 +NVML_INFOROM_COUNT = 4 + +_nvmlReturn_t = c_uint +NVML_SUCCESS = 0 +NVML_ERROR_UNINITIALIZED = 1 +NVML_ERROR_INVALID_ARGUMENT = 2 +NVML_ERROR_NOT_SUPPORTED = 3 +NVML_ERROR_NO_PERMISSION = 4 +NVML_ERROR_ALREADY_INITIALIZED = 5 +NVML_ERROR_NOT_FOUND = 6 +NVML_ERROR_INSUFFICIENT_SIZE = 7 +NVML_ERROR_INSUFFICIENT_POWER = 8 +NVML_ERROR_DRIVER_NOT_LOADED = 9 +NVML_ERROR_TIMEOUT = 10 +NVML_ERROR_IRQ_ISSUE = 11 +NVML_ERROR_LIBRARY_NOT_FOUND = 12 +NVML_ERROR_FUNCTION_NOT_FOUND = 13 +NVML_ERROR_CORRUPTED_INFOROM = 14 +NVML_ERROR_GPU_IS_LOST = 15 +NVML_ERROR_RESET_REQUIRED = 16 +NVML_ERROR_OPERATING_SYSTEM = 17 +NVML_ERROR_LIB_RM_VERSION_MISMATCH = 18 +NVML_ERROR_IN_USE = 19 +NVML_ERROR_MEMORY = 20 +NVML_ERROR_NO_DATA = 21 +NVML_ERROR_VGPU_ECC_NOT_SUPPORTED = 22 +NVML_ERROR_INSUFFICIENT_RESOURCES = 23 +NVML_ERROR_FREQ_NOT_SUPPORTED = 24 +NVML_ERROR_ARGUMENT_VERSION_MISMATCH = 25 +NVML_ERROR_DEPRECATED = 26 +NVML_ERROR_NOT_READY = 27 +NVML_ERROR_GPU_NOT_FOUND = 28 +NVML_ERROR_INVALID_STATE = 29 +NVML_ERROR_UNKNOWN = 999 + +_nvmlFanState_t = c_uint +NVML_FAN_NORMAL = 0 +NVML_FAN_FAILED = 1 + +_nvmlFanControlPolicy_t = c_uint +NVML_FAN_POLICY_TEMPERATURE_CONTINOUS_SW = 0 +NVML_FAN_POLICY_MANUAL = 1 + +_nvmlLedColor_t = c_uint +NVML_LED_COLOR_GREEN = 0 +NVML_LED_COLOR_AMBER = 1 + +_nvmlGpuOperationMode_t = c_uint +NVML_GOM_ALL_ON = 0 +NVML_GOM_COMPUTE = 1 +NVML_GOM_LOW_DP = 2 + +_nvmlPageRetirementCause_t = c_uint +NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS = 0 +NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR = 1 +NVML_PAGE_RETIREMENT_CAUSE_COUNT = 2 + +_nvmlRestrictedAPI_t = c_uint +NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS = 0 +NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS = 1 +NVML_RESTRICTED_API_COUNT = 2 + +_nvmlBridgeChipType_t = c_uint +NVML_BRIDGE_CHIP_PLX = 0 +NVML_BRIDGE_CHIP_BRO4 = 1 +NVML_MAX_PHYSICAL_BRIDGE = 128 + +_nvmlValueType_t = c_uint +NVML_VALUE_TYPE_DOUBLE = 0 +NVML_VALUE_TYPE_UNSIGNED_INT = 1 +NVML_VALUE_TYPE_UNSIGNED_LONG = 2 +NVML_VALUE_TYPE_UNSIGNED_LONG_LONG = 3 +NVML_VALUE_TYPE_SIGNED_LONG_LONG = 4 +NVML_VALUE_TYPE_SIGNED_INT = 5 +NVML_VALUE_TYPE_UNSIGNED_SHORT = 6 +NVML_VALUE_TYPE_COUNT = 7 + +_nvmlNvlinkVersion_t = c_uint +NVML_NVLINK_VERSION_INVALID = 0 +NVML_NVLINK_VERSION_1_0 = 1 +NVML_NVLINK_VERSION_2_0 = 2 +NVML_NVLINK_VERSION_2_2 = 3 +NVML_NVLINK_VERSION_3_0 = 4 +NVML_NVLINK_VERSION_3_1 = 5 +NVML_NVLINK_VERSION_4_0 = 6 +NVML_NVLINK_VERSION_5_0 = 7 + +_nvmlPerfPolicyType_t = c_uint +NVML_PERF_POLICY_POWER = 0 +NVML_PERF_POLICY_THERMAL = 1 +NVML_PERF_POLICY_SYNC_BOOST = 2 +NVML_PERF_POLICY_BOARD_LIMIT = 3 +NVML_PERF_POLICY_LOW_UTILIZATION = 4 +NVML_PERF_POLICY_RELIABILITY = 5 +NVML_PERF_POLICY_TOTAL_APP_CLOCKS = 10 +NVML_PERF_POLICY_TOTAL_BASE_CLOCKS = 11 +NVML_PERF_POLICY_COUNT = 12 + +_nvmlEncoderQueryType_t = c_uint +NVML_ENCODER_QUERY_H264 = 0 +NVML_ENCODER_QUERY_HEVC = 1 +NVML_ENCODER_QUERY_AV1 = 2 +NVML_ENCODER_QUERY_UNKNOWN = 255 + +_nvmlFBCSessionType_t = c_uint +NVML_FBC_SESSION_TYPE_UNKNOWN = 0 +NVML_FBC_SESSION_TYPE_TOSYS = 1 +NVML_FBC_SESSION_TYPE_CUDA = 2 +NVML_FBC_SESSION_TYPE_VID = 3 +NVML_FBC_SESSION_TYPE_HWENC = 4 + +_nvmlDetachGpuState_t = c_uint +NVML_DETACH_GPU_KEEP = 0 +NVML_DETACH_GPU_REMOVE = 1 + +_nvmlPcieLinkState_t = c_uint +NVML_PCIE_LINK_KEEP = 0 +NVML_PCIE_LINK_SHUT_DOWN = 1 + +_nvmlSamplingType_t = c_uint +NVML_TOTAL_POWER_SAMPLES = 0 +NVML_GPU_UTILIZATION_SAMPLES = 1 +NVML_MEMORY_UTILIZATION_SAMPLES = 2 +NVML_ENC_UTILIZATION_SAMPLES = 3 +NVML_DEC_UTILIZATION_SAMPLES = 4 +NVML_PROCESSOR_CLK_SAMPLES = 5 +NVML_MEMORY_CLK_SAMPLES = 6 +NVML_MODULE_POWER_SAMPLES = 7 +NVML_JPG_UTILIZATION_SAMPLES = 8 +NVML_OFA_UTILIZATION_SAMPLES = 9 +NVML_SAMPLINGTYPE_COUNT = 10 + +_nvmlPcieUtilCounter_t = c_uint +NVML_PCIE_UTIL_TX_BYTES = 0 +NVML_PCIE_UTIL_RX_BYTES = 1 +NVML_PCIE_UTIL_COUNT = 2 + +_nvmlGpuTopologyLevel_t = c_uint +NVML_TOPOLOGY_INTERNAL = 0 +NVML_TOPOLOGY_SINGLE = 10 +NVML_TOPOLOGY_MULTIPLE = 20 +NVML_TOPOLOGY_HOSTBRIDGE = 30 +NVML_TOPOLOGY_NODE = 40 +NVML_TOPOLOGY_CPU = NVML_TOPOLOGY_NODE +NVML_TOPOLOGY_SYSTEM = 50 + +_nvmlGpuP2PCapsIndex_t = c_uint +NVML_P2P_CAPS_INDEX_READ = 0, +NVML_P2P_CAPS_INDEX_WRITE = 1 +NVML_P2P_CAPS_INDEX_NVLINK =2 +NVML_P2P_CAPS_INDEX_ATOMICS = 3 +# +# NVML_P2P_CAPS_INDEX_PROP is deprecated. +# Use NVML_P2P_CAPS_INDEX_PCI instead. +# +NVML_P2P_CAPS_INDEX_PROP = 4 +NVML_P2P_CAPS_INDEX_PCI = 4 +NVML_P2P_CAPS_INDEX_UNKNOWN = 5 + +_nvmlGpuP2PStatus_t = c_uint +NVML_P2P_STATUS_OK = 0 +NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED = 1 +NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED +NVML_P2P_STATUS_GPU_NOT_SUPPORTED = 2 +NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED =3 +NVML_P2P_STATUS_DISABLED_BY_REGKEY =4 +NVML_P2P_STATUS_NOT_SUPPORTED =5 +NVML_P2P_STATUS_UNKNOWN =6 + +_nvmlDeviceArchitecture_t = c_uint +NVML_DEVICE_ARCH_KEPLER = 2 +NVML_DEVICE_ARCH_MAXWELL = 3 +NVML_DEVICE_ARCH_PASCAL = 4 +NVML_DEVICE_ARCH_VOLTA = 5 +NVML_DEVICE_ARCH_TURING = 6 +NVML_DEVICE_ARCH_AMPERE = 7 +NVML_DEVICE_ARCH_ADA = 8 +NVML_DEVICE_ARCH_HOPPER = 9 +NVML_DEVICE_ARCH_BLACKWELL = 10 +NVML_DEVICE_ARCH_T23X = 11 +NVML_DEVICE_ARCH_UNKNOWN = 0xffffffff + +# PCI bus Types +_nvmlBusType_t = c_uint +NVML_BUS_TYPE_UNKNOWN = 0 +NVML_BUS_TYPE_PCI = 1 +NVML_BUS_TYPE_PCIE = 2 +NVML_BUS_TYPE_FPCI = 3 +NVML_BUS_TYPE_AGP = 4 + +_nvmlPowerSource_t = c_uint +NVML_POWER_SOURCE_AC = 0x00000000 +NVML_POWER_SOURCE_BATTERY = 0x00000001 +NVML_POWER_SOURCE_UNDERSIZED = 0x00000002 + +_nvmlAdaptiveClockInfoStatus_t = c_uint +NVML_ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED = 0x00000000 +NVML_ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED = 0x00000001 + +_nvmlClockLimitId_t = c_uint +NVML_CLOCK_LIMIT_ID_RANGE_START = 0xffffff00 +NVML_CLOCK_LIMIT_ID_TDP = 0xffffff01 +NVML_CLOCK_LIMIT_ID_UNLIMITED = 0xffffff02 + +_nvmlPcieLinkMaxSpeed_t = c_uint +NVML_PCIE_LINK_MAX_SPEED_INVALID = 0x00000000 +NVML_PCIE_LINK_MAX_SPEED_2500MBPS = 0x00000001 +NVML_PCIE_LINK_MAX_SPEED_5000MBPS = 0x00000002 +NVML_PCIE_LINK_MAX_SPEED_8000MBPS = 0x00000003 +NVML_PCIE_LINK_MAX_SPEED_16000MBPS = 0x00000004 +NVML_PCIE_LINK_MAX_SPEED_32000MBPS = 0x00000005 +NVML_PCIE_LINK_MAX_SPEED_64000MBPS = 0x00000006 + +_nvmlPcieAtomicsCapability_t = c_uint +NVML_PCIE_ATOMICS_CAP_FETCHADD32 = 0x01 +NVML_PCIE_ATOMICS_CAP_FETCHADD64 = 0x02 +NVML_PCIE_ATOMICS_CAP_SWAP32 = 0x04 +NVML_PCIE_ATOMICS_CAP_SWAP64 = 0x08 +NVML_PCIE_ATOMICS_CAP_CAS32 = 0x10 +NVML_PCIE_ATOMICS_CAP_CAS64 = 0x20 +NVML_PCIE_ATOMICS_CAP_CAS128 = 0x40 +NVML_PCIE_ATOMICS_OPS_MAX = 7 + +_nvmlAffinityScope_t = c_uint +NVML_AFFINITY_SCOPE_NODE = 0 +NVML_AFFINITY_SCOPE_SOCKET = 1 + +_nvmlDeviceGpuRecoveryAction_t = c_uint +NVML_GPU_RECOVERY_ACTION_NONE = 0 +NVML_GPU_RECOVERY_ACTION_GPU_RESET = 1 +NVML_GPU_RECOVERY_ACTION_NODE_REBOOT = 2 +NVML_GPU_RECOVERY_ACTION_DRAIN_P2P = 3 +NVML_GPU_RECOVERY_ACTION_DRAIN_AND_RESET = 4 + +# C preprocessor defined values +nvmlFlagDefault = 0 +nvmlFlagForce = 1 +NVML_INIT_FLAG_NO_GPUS = 1 +NVML_INIT_FLAG_NO_ATTACH = 2 + +NVML_MAX_GPC_COUNT = 32 + +# buffer size +NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE = 16 +NVML_DEVICE_UUID_BUFFER_SIZE = 80 +NVML_DEVICE_UUID_V2_BUFFER_SIZE = 96 +NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE = 80 +NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE = 80 +NVML_DEVICE_NAME_BUFFER_SIZE = 64 +NVML_DEVICE_NAME_V2_BUFFER_SIZE = 96 +NVML_DEVICE_SERIAL_BUFFER_SIZE = 30 +NVML_DEVICE_PART_NUMBER_BUFFER_SIZE = 80 +NVML_DEVICE_GPU_PART_NUMBER_BUFFER_SIZE = 80 +NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE = 32 +NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE = 32 +NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE = 16 +NVML_GRID_LICENSE_BUFFER_SIZE = 128 +NVML_VGPU_NAME_BUFFER_SIZE = 64 +NVML_GRID_LICENSE_FEATURE_MAX_COUNT = 3 +NVML_VGPU_METADATA_OPAQUE_DATA_SIZE = sizeof(c_uint) + 256 +NVML_VGPU_PGPU_METADATA_OPAQUE_DATA_SIZE = 256 +NVML_DEVICE_GPU_FRU_PART_NUMBER_BUFFER_SIZE = 0x14 # NV2080_GPU_MAX_PRODUCT_PART_NUMBER_LENGTH +NVML_PERF_MODES_BUFFER_SIZE = 2048 + +# Format strings +NVML_DEVICE_PCI_BUS_ID_LEGACY_FMT = "%04X:%02X:%02X.0" +NVML_DEVICE_PCI_BUS_ID_FMT = "%08X:%02X:%02X.0" + +NVML_VALUE_NOT_AVAILABLE_ulonglong = c_ulonglong(-1) +NVML_VALUE_NOT_AVAILABLE_uint = c_uint(-1) + +''' + Field Identifiers. + + All Identifiers pertain to a device. Each ID is only used once and is guaranteed never to change. +''' +NVML_FI_DEV_ECC_CURRENT = 1 # Current ECC mode. 1=Active. 0=Inactive +NVML_FI_DEV_ECC_PENDING = 2 # Pending ECC mode. 1=Active. 0=Inactive + +#ECC Count Totals +NVML_FI_DEV_ECC_SBE_VOL_TOTAL = 3 # Total single bit volatile ECC errors +NVML_FI_DEV_ECC_DBE_VOL_TOTAL = 4 # Total double bit volatile ECC errors +NVML_FI_DEV_ECC_SBE_AGG_TOTAL = 5 # Total single bit aggregate (persistent) ECC errors +NVML_FI_DEV_ECC_DBE_AGG_TOTAL = 6 # Total double bit aggregate (persistent) ECC errors +#Individual ECC locations +NVML_FI_DEV_ECC_SBE_VOL_L1 = 7 # L1 cache single bit volatile ECC errors +NVML_FI_DEV_ECC_DBE_VOL_L1 = 8 # L1 cache double bit volatile ECC errors +NVML_FI_DEV_ECC_SBE_VOL_L2 = 9 # L2 cache single bit volatile ECC errors +NVML_FI_DEV_ECC_DBE_VOL_L2 = 10 # L2 cache double bit volatile ECC errors +NVML_FI_DEV_ECC_SBE_VOL_DEV = 11 # Device memory single bit volatile ECC errors +NVML_FI_DEV_ECC_DBE_VOL_DEV = 12 # Device memory double bit volatile ECC errors +NVML_FI_DEV_ECC_SBE_VOL_REG = 13 # Register file single bit volatile ECC errors +NVML_FI_DEV_ECC_DBE_VOL_REG = 14 # Register file double bit volatile ECC errors +NVML_FI_DEV_ECC_SBE_VOL_TEX = 15 # Texture memory single bit volatile ECC errors +NVML_FI_DEV_ECC_DBE_VOL_TEX = 16 # Texture memory double bit volatile ECC errors +NVML_FI_DEV_ECC_DBE_VOL_CBU = 17 # CBU double bit volatile ECC errors +NVML_FI_DEV_ECC_SBE_AGG_L1 = 18 # L1 cache single bit aggregate (persistent) ECC errors +NVML_FI_DEV_ECC_DBE_AGG_L1 = 19 # L1 cache double bit aggregate (persistent) ECC errors +NVML_FI_DEV_ECC_SBE_AGG_L2 = 20 # L2 cache single bit aggregate (persistent) ECC errors +NVML_FI_DEV_ECC_DBE_AGG_L2 = 21 # L2 cache double bit aggregate (persistent) ECC errors +NVML_FI_DEV_ECC_SBE_AGG_DEV = 22 # Device memory single bit aggregate (persistent) ECC errors +NVML_FI_DEV_ECC_DBE_AGG_DEV = 23 # Device memory double bit aggregate (persistent) ECC errors +NVML_FI_DEV_ECC_SBE_AGG_REG = 24 # Register File single bit aggregate (persistent) ECC errors +NVML_FI_DEV_ECC_DBE_AGG_REG = 25 # Register File double bit aggregate (persistent) ECC errors +NVML_FI_DEV_ECC_SBE_AGG_TEX = 26 # Texture memory single bit aggregate (persistent) ECC errors +NVML_FI_DEV_ECC_DBE_AGG_TEX = 27 # Texture memory double bit aggregate (persistent) ECC errors +NVML_FI_DEV_ECC_DBE_AGG_CBU = 28 # CBU double bit aggregate ECC errors + +# Page Retirement +NVML_FI_DEV_RETIRED_SBE = 29 # Number of retired pages because of single bit errors +NVML_FI_DEV_RETIRED_DBE = 30 # Number of retired pages because of double bit errors +NVML_FI_DEV_RETIRED_PENDING = 31 # If any pages are pending retirement. 1=yes. 0=no. + +# NvLink Flit Error Counters +NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0 = 32 # NVLink flow control CRC Error Counter for Lane 0 +NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 = 33 # NVLink flow control CRC Error Counter for Lane 1 +NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 = 34 # NVLink flow control CRC Error Counter for Lane 2 +NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 = 35 # NVLink flow control CRC Error Counter for Lane 3 +NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 = 36 # NVLink flow control CRC Error Counter for Lane 4 +NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 = 37 # NVLink flow control CRC Error Counter for Lane 5 +NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL = 38 # NVLink flow control CRC Error Counter total for all Lanes + +# NvLink CRC Data Error Counters +NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0 = 39 # NVLink data CRC Error Counter for Lane 0 +NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 = 40 # NVLink data CRC Error Counter for Lane 1 +NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 = 41 # NVLink data CRC Error Counter for Lane 2 +NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 = 42 # NVLink data CRC Error Counter for Lane 3 +NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 = 43 # NVLink data CRC Error Counter for Lane 4 +NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 = 44 # NVLink data CRC Error Counter for Lane 5 +NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL = 45 # NvLink data CRC Error Counter total for all Lanes + +# NvLink Replay Error Counters +NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L0 = 46 # NVLink Replay Error Counter for Lane 0 +NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L1 = 47 # NVLink Replay Error Counter for Lane 1 +NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L2 = 48 # NVLink Replay Error Counter for Lane 2 +NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L3 = 49 # NVLink Replay Error Counter for Lane 3 +NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L4 = 50 # NVLink Replay Error Counter for Lane 4 +NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L5 = 51 # NVLink Replay Error Counter for Lane 5 +NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL = 52 # NVLink Replay Error Counter total for all Lanes + +# NvLink Recovery Error Counters +NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L0 = 53 # NVLink Recovery Error Counter for Lane 0 +NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 = 54 # NVLink Recovery Error Counter for Lane 1 +NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 = 55 # NVLink Recovery Error Counter for Lane 2 +NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 = 56 # NVLink Recovery Error Counter for Lane 3 +NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 = 57 # NVLink Recovery Error Counter for Lane 4 +NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L5 = 58 # NVLink Recovery Error Counter for Lane 5 +NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL = 59 # NVLink Recovery Error Counter total for all Lanes + +# NvLink Bandwidth Counters +NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L0 = 60 # NVLink Bandwidth Counter for Counter Set 0, Lane 0 +NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L1 = 61 # NVLink Bandwidth Counter for Counter Set 0, Lane 1 +NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L2 = 62 # NVLink Bandwidth Counter for Counter Set 0, Lane 2 +NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L3 = 63 # NVLink Bandwidth Counter for Counter Set 0, Lane 3 +NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L4 = 64 # NVLink Bandwidth Counter for Counter Set 0, Lane 4 +NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L5 = 65 # NVLink Bandwidth Counter for Counter Set 0, Lane 5 +NVML_FI_DEV_NVLINK_BANDWIDTH_C0_TOTAL = 66 # NVLink Bandwidth Counter Total for Counter Set 0, All Lanes + +# NvLink Bandwidth Counters +NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L0 = 67 # NVLink Bandwidth Counter for Counter Set 1, Lane 0 +NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L1 = 68 # NVLink Bandwidth Counter for Counter Set 1, Lane 1 +NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L2 = 69 # NVLink Bandwidth Counter for Counter Set 1, Lane 2 +NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L3 = 70 # NVLink Bandwidth Counter for Counter Set 1, Lane 3 +NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L4 = 71 # NVLink Bandwidth Counter for Counter Set 1, Lane 4 +NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L5 = 72 # NVLink Bandwidth Counter for Counter Set 1, Lane 5 +NVML_FI_DEV_NVLINK_BANDWIDTH_C1_TOTAL = 73 # NVLink Bandwidth Counter Total for Counter Set 1, All Lanes + +# Perf Policy Counters +NVML_FI_DEV_PERF_POLICY_POWER = 74 # Perf Policy Counter for Power Policy +NVML_FI_DEV_PERF_POLICY_THERMAL = 75 # Perf Policy Counter for Thermal Policy +NVML_FI_DEV_PERF_POLICY_SYNC_BOOST = 76 # Perf Policy Counter for Sync boost Policy +NVML_FI_DEV_PERF_POLICY_BOARD_LIMIT = 77 # Perf Policy Counter for Board Limit +NVML_FI_DEV_PERF_POLICY_LOW_UTILIZATION = 78 # Perf Policy Counter for Low GPU Utilization Policy +NVML_FI_DEV_PERF_POLICY_RELIABILITY = 79 # Perf Policy Counter for Reliability Policy +NVML_FI_DEV_PERF_POLICY_TOTAL_APP_CLOCKS = 80 # Perf Policy Counter for Total App Clock Policy +NVML_FI_DEV_PERF_POLICY_TOTAL_BASE_CLOCKS = 81 # Perf Policy Counter for Total Base Clocks Policy + +# Memory temperatures +NVML_FI_DEV_MEMORY_TEMP = 82 # Memory temperature for the device + +# Energy Counter +NVML_FI_DEV_TOTAL_ENERGY_CONSUMPTION = 83 # Total energy consumption for the GPU in mJ since the driver was last reloaded + +# NVLink Speed +NVML_FI_DEV_NVLINK_SPEED_MBPS_L0 = 84 +NVML_FI_DEV_NVLINK_SPEED_MBPS_L1 = 85 +NVML_FI_DEV_NVLINK_SPEED_MBPS_L2 = 86 +NVML_FI_DEV_NVLINK_SPEED_MBPS_L3 = 87 +NVML_FI_DEV_NVLINK_SPEED_MBPS_L4 = 88 +NVML_FI_DEV_NVLINK_SPEED_MBPS_L5 = 89 +NVML_FI_DEV_NVLINK_SPEED_MBPS_COMMON = 90 + +# NVLink Link Count +NVML_FI_DEV_NVLINK_LINK_COUNT = 91 + +# Page Retirement pending fields +NVML_FI_DEV_RETIRED_PENDING_SBE = 92 +NVML_FI_DEV_RETIRED_PENDING_DBE = 93 + +# PCIe replay and replay rollover counters +NVML_FI_DEV_PCIE_REPLAY_COUNTER = 94 +NVML_FI_DEV_PCIE_REPLAY_ROLLOVER_COUNTER = 95 + +# NvLink Flit Error Counters +NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6 = 96 # NVLink flow control CRC Error Counter for Lane 6 +NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 = 97 # NVLink flow control CRC Error Counter for Lane 7 +NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 = 98 # NVLink flow control CRC Error Counter for Lane 8 +NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 = 99 # NVLink flow control CRC Error Counter for Lane 9 +NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 = 100 # NVLink flow control CRC Error Counter for Lane 10 +NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11 = 101 # NVLink flow control CRC Error Counter for Lane 11 + +# NvLink CRC Data Error Counters +NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6 = 102 # NVLink data CRC Error Counter for Lane 6 +NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 = 103 # NVLink data CRC Error Counter for Lane 7 +NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 = 104 # NVLink data CRC Error Counter for Lane 8 +NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 = 105 # NVLink data CRC Error Counter for Lane 9 +NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 = 106 # NVLink data CRC Error Counter for Lane 10 +NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11 = 107 # NVLink data CRC Error Counter for Lane 11 + +# NvLink Replay Error Counters +NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L6 = 108 # NVLink Replay Error Counter for Lane 6 +NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L7 = 109 # NVLink Replay Error Counter for Lane 7 +NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L8 = 110 # NVLink Replay Error Counter for Lane 8 +NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L9 = 111 # NVLink Replay Error Counter for Lane 9 +NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L10 = 112 # NVLink Replay Error Counter for Lane 10 +NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L11 = 113 # NVLink Replay Error Counter for Lane 11 + +# NvLink Recovery Error Counters +NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L6 = 114 # NVLink Recovery Error Counter for Lane 6 +NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 = 115 # NVLink Recovery Error Counter for Lane 7 +NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 = 116 # NVLink Recovery Error Counter for Lane 8 +NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 = 117 # NVLink Recovery Error Counter for Lane 9 +NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L10 = 118 # NVLink Recovery Error Counter for Lane 10 +NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L11 = 119 # NVLink Recovery Error Counter for Lane 11 + +# NvLink Bandwidth Counters +NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L6 = 120 # NVLink Bandwidth Counter for Counter Set 0, Lane 6 +NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L7 = 121 # NVLink Bandwidth Counter for Counter Set 0, Lane 7 +NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L8 = 122 # NVLink Bandwidth Counter for Counter Set 0, Lane 8 +NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L9 = 123 # NVLink Bandwidth Counter for Counter Set 0, Lane 9 +NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L10 = 124 # NVLink Bandwidth Counter for Counter Set 0, Lane 10 +NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L11 = 125 # NVLink Bandwidth Counter for Counter Set 0, Lane 11 + +# NvLink Bandwidth Counters +NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L6 = 126 # NVLink Bandwidth Counter for Counter Set 1, Lane 6 +NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L7 = 127 # NVLink Bandwidth Counter for Counter Set 1, Lane 7 +NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L8 = 128 # NVLink Bandwidth Counter for Counter Set 1, Lane 8 +NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L9 = 129 # NVLink Bandwidth Counter for Counter Set 1, Lane 9 +NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L10 = 130 # NVLink Bandwidth Counter for Counter Set 1, Lane 10 +NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L11 = 131 # NVLink Bandwidth Counter for Counter Set 1, Lane 11 + +# NVLink Speed +NVML_FI_DEV_NVLINK_SPEED_MBPS_L6 = 132 +NVML_FI_DEV_NVLINK_SPEED_MBPS_L7 = 133 +NVML_FI_DEV_NVLINK_SPEED_MBPS_L8 = 134 +NVML_FI_DEV_NVLINK_SPEED_MBPS_L9 = 135 +NVML_FI_DEV_NVLINK_SPEED_MBPS_L10 = 136 +NVML_FI_DEV_NVLINK_SPEED_MBPS_L11 = 137 + +# NVLink Throughput Counters +NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_TX = 138 # NVLink TX Data throughput in KiB +NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_RX = 139 # NVLink RX Data throughput in KiB +NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_TX = 140 # NVLink TX Data + protocol overhead in KiB +NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_RX = 141 # NVLink RX Data + protocol overhead in KiB + +# Row Remapper +NVML_FI_DEV_REMAPPED_COR = 142 +NVML_FI_DEV_REMAPPED_UNC = 143 +NVML_FI_DEV_REMAPPED_PENDING = 144 +NVML_FI_DEV_REMAPPED_FAILURE = 145 + +#Remote device NVLink ID +NVML_FI_DEV_NVLINK_REMOTE_NVLINK_ID = 146 + +# Number of NVLinks connected to NVSwitch +NVML_FI_DEV_NVSWITCH_CONNECTED_LINK_COUNT = 147 + +# NvLink ECC Data Error Counters +NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0 = 148 #< NVLink data ECC Error Counter for Link 0 +NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1 = 149 #< NVLink data ECC Error Counter for Link 1 +NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 = 150 #< NVLink data ECC Error Counter for Link 2 +NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 = 151 #< NVLink data ECC Error Counter for Link 3 +NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 = 152 #< NVLink data ECC Error Counter for Link 4 +NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 = 153 #< NVLink data ECC Error Counter for Link 5 +NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 = 154 #< NVLink data ECC Error Counter for Link 6 +NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 = 155 #< NVLink data ECC Error Counter for Link 7 +NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 = 156 #< NVLink data ECC Error Counter for Link 8 +NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 = 157 #< NVLink data ECC Error Counter for Link 9 +NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 = 158 #< NVLink data ECC Error Counter for Link 10 +NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11 = 159 #< NVLink data ECC Error Counter for Link 11 +NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTAL = 160 #< NvLink data ECC Error Counter total for all Links + +NVML_FI_DEV_NVLINK_ERROR_DL_REPLAY = 161 +NVML_FI_DEV_NVLINK_ERROR_DL_RECOVERY = 162 +NVML_FI_DEV_NVLINK_ERROR_DL_CRC = 163 +NVML_FI_DEV_NVLINK_GET_SPEED = 164 +NVML_FI_DEV_NVLINK_GET_STATE = 165 +NVML_FI_DEV_NVLINK_GET_VERSION = 166 + +NVML_FI_DEV_NVLINK_GET_POWER_STATE = 167 +NVML_FI_DEV_NVLINK_GET_POWER_THRESHOLD = 168 + +NVML_FI_DEV_PCIE_L0_TO_RECOVERY_COUNTER = 169 + +NVML_FI_DEV_C2C_LINK_COUNT = 170 +NVML_FI_DEV_C2C_LINK_GET_STATUS = 171 +NVML_FI_DEV_C2C_LINK_GET_MAX_BW = 172 + +NVML_FI_DEV_PCIE_COUNT_CORRECTABLE_ERRORS = 173 +NVML_FI_DEV_PCIE_COUNT_NAKS_RECEIVED = 174 +NVML_FI_DEV_PCIE_COUNT_RECEIVER_ERROR = 175 +NVML_FI_DEV_PCIE_COUNT_BAD_TLP = 176 +NVML_FI_DEV_PCIE_COUNT_NAKS_SENT = 177 +NVML_FI_DEV_PCIE_COUNT_BAD_DLLP = 178 +NVML_FI_DEV_PCIE_COUNT_NON_FATAL_ERROR = 179 +NVML_FI_DEV_PCIE_COUNT_FATAL_ERROR = 180 +NVML_FI_DEV_PCIE_COUNT_UNSUPPORTED_REQ = 181 +NVML_FI_DEV_PCIE_COUNT_LCRC_ERROR = 182 +NVML_FI_DEV_PCIE_COUNT_LANE_ERROR = 183 + +NVML_FI_DEV_IS_RESETLESS_MIG_SUPPORTED = 184 + +NVML_FI_DEV_POWER_AVERAGE = 185 +NVML_FI_DEV_POWER_INSTANT = 186 +NVML_FI_DEV_POWER_MIN_LIMIT = 187 +NVML_FI_DEV_POWER_MAX_LIMIT = 188 +NVML_FI_DEV_POWER_DEFAULT_LIMIT = 189 +NVML_FI_DEV_POWER_CURRENT_LIMIT = 190 +NVML_FI_DEV_ENERGY = 191 +NVML_FI_DEV_POWER_REQUESTED_LIMIT = 192 + +NVML_FI_DEV_TEMPERATURE_SHUTDOWN_TLIMIT = 193 +NVML_FI_DEV_TEMPERATURE_SLOWDOWN_TLIMIT = 194 +NVML_FI_DEV_TEMPERATURE_MEM_MAX_TLIMIT = 195 +NVML_FI_DEV_TEMPERATURE_GPU_MAX_TLIMIT = 196 + +NVML_FI_DEV_PCIE_COUNT_TX_BYTES = 197 +NVML_FI_DEV_PCIE_COUNT_RX_BYTES = 198 + +NVML_FI_DEV_IS_MIG_MODE_INDEPENDENT_MIG_QUERY_CAPABLE = 199 + +NVML_FI_DEV_NVLINK_GET_POWER_THRESHOLD_MAX = 200 + +NVML_FI_DEV_NVLINK_COUNT_XMIT_PACKETS = 201 +NVML_FI_DEV_NVLINK_COUNT_XMIT_BYTES = 202 +NVML_FI_DEV_NVLINK_COUNT_RCV_PACKETS = 203 +NVML_FI_DEV_NVLINK_COUNT_RCV_BYTES = 204 +NVML_FI_DEV_NVLINK_COUNT_VL15_DROPPED = 205 # Deprecated, do not use +NVML_FI_DEV_NVLINK_COUNT_MALFORMED_PACKET_ERRORS = 206 +NVML_FI_DEV_NVLINK_COUNT_BUFFER_OVERRUN_ERRORS = 207 +NVML_FI_DEV_NVLINK_COUNT_RCV_ERRORS = 208 +NVML_FI_DEV_NVLINK_COUNT_RCV_REMOTE_ERRORS = 209 +NVML_FI_DEV_NVLINK_COUNT_RCV_GENERAL_ERRORS = 210 +NVML_FI_DEV_NVLINK_COUNT_LOCAL_LINK_INTEGRITY_ERRORS = 211 +NVML_FI_DEV_NVLINK_COUNT_XMIT_DISCARDS = 212 + +NVML_FI_DEV_NVLINK_COUNT_LINK_RECOVERY_SUCCESSFUL_EVENTS = 213 +NVML_FI_DEV_NVLINK_COUNT_LINK_RECOVERY_FAILED_EVENTS = 214 +NVML_FI_DEV_NVLINK_COUNT_LINK_RECOVERY_EVENTS = 215 + +NVML_FI_DEV_NVLINK_COUNT_RAW_BER_LANE0 = 216 # Deprecated, do not use +NVML_FI_DEV_NVLINK_COUNT_RAW_BER_LANE1 = 217 # Deprecated, do not use +NVML_FI_DEV_NVLINK_COUNT_RAW_BER = 218 # Deprecated, do not use +NVML_FI_DEV_NVLINK_COUNT_EFFECTIVE_ERRORS = 219 +NVML_FI_DEV_NVLINK_COUNT_EFFECTIVE_BER = 220 +NVML_FI_DEV_NVLINK_COUNT_SYMBOL_ERRORS = 221 +NVML_FI_DEV_NVLINK_COUNT_SYMBOL_BER = 222 + +NVML_FI_DEV_NVLINK_GET_POWER_THRESHOLD_MIN = 223 +NVML_FI_DEV_NVLINK_GET_POWER_THRESHOLD_UNITS = 224 # Values are in the form NVML_NVLINK_LOW_POWER_THRESHOLD_UNIT_* +NVML_FI_DEV_NVLINK_GET_POWER_THRESHOLD_SUPPORTED = 225 + +NVML_FI_DEV_RESET_STATUS = 226 # Deprecated use NVML_FI_DEV_GET_GPU_RECOVERY_ACTION instead +NVML_FI_DEV_DRAIN_AND_RESET_STATUS = 227 # Deprecated use NVML_FI_DEV_GET_GPU_RECOVERY_ACTION instead +NVML_FI_DEV_PCIE_OUTBOUND_ATOMICS_MASK = 228 +NVML_FI_DEV_PCIE_INBOUND_ATOMICS_MASK = 229 +NVML_FI_DEV_GET_GPU_RECOVERY_ACTION = 230 + +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_0 = 235 +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_1 = 236 +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_2 = 237 +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_3 = 238 +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_4 = 239 +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_5 = 240 +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_6 = 241 +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_7 = 242 +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_8 = 243 +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_9 = 244 +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_10 = 245 +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_11 = 246 +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_12 = 247 +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_13 = 248 +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_14 = 249 +NVML_FI_DEV_NVLINK_COUNT_FEC_HISTORY_15 = 250 +NVML_FI_PWR_SMOOTHING_ENABLED = 251 # Enablement (0/DISABLED or 1/ENABLED) +NVML_FI_PWR_SMOOTHING_PRIV_LVL = 252 # Current privilege level +NVML_FI_PWR_SMOOTHING_IMM_RAMP_DOWN_ENABLED = 253 # Immediate ramp down enablement (0/DISABLED or 1/ENABLED) +NVML_FI_PWR_SMOOTHING_APPLIED_TMP_CEIL = 254 # Applied TMP ceiling value +NVML_FI_PWR_SMOOTHING_APPLIED_TMP_FLOOR = 255 # Applied TMP floor value +NVML_FI_PWR_SMOOTHING_MAX_PERCENT_TMP_FLOOR_SETTING = 256 # Max % TMP Floor value +NVML_FI_PWR_SMOOTHING_MIN_PERCENT_TMP_FLOOR_SETTING = 257 # Min % TMP Floor value +NVML_FI_PWR_SMOOTHING_HW_CIRCUITRY_PERCENT_LIFETIME_REMAINING = 258 # HW Circuitry % lifetime remaining +NVML_FI_PWR_SMOOTHING_MAX_NUM_PRESET_PROFILES = 259 # Max number of preset profiles +NVML_FI_PWR_SMOOTHING_PROFILE_PERCENT_TMP_FLOOR = 260 # % TMP floor for a given profile +NVML_FI_PWR_SMOOTHING_PROFILE_RAMP_UP_RATE = 261 # Ramp up rate in mW/s for a given profile +NVML_FI_PWR_SMOOTHING_PROFILE_RAMP_DOWN_RATE = 262 # Ramp down rate in mW/s for a given profile +NVML_FI_PWR_SMOOTHING_PROFILE_RAMP_DOWN_HYST_VAL = 263 # Ramp down hysteresis value in ms for a given profile +NVML_FI_PWR_SMOOTHING_ACTIVE_PRESET_PROFILE = 264 # Active preset profile number +NVML_FI_PWR_SMOOTHING_ADMIN_OVERRIDE_PERCENT_TMP_FLOOR = 265 # % TMP floor for a given profile +NVML_FI_PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_UP_RATE = 266 # Ramp up rate in mW/s for a given profile +NVML_FI_PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_DOWN_RATE = 267 # Ramp down rate in mW/s for a given profile +NVML_FI_PWR_SMOOTHING_ADMIN_OVERRIDE_RAMP_DOWN_HYST_VAL = 268 # Ramp down hysteresis value in ms for a given profile + +NVML_FI_MAX = 269 # One greater than the largest field ID defined above + +# NVML_FI_DEV_NVLINK_GET_STATE state enums +NVML_NVLINK_STATE_INACTIVE = 0x0 +NVML_NVLINK_STATE_ACTIVE = 0x1 +NVML_NVLINK_STATE_SLEEP = 0x2 + +NVML_NVLINK_LOW_POWER_THRESHOLD_UNIT_100US = 0 # NVML_FI_DEV_NVLINK_GET_POWER_THRESHOLD_UNITS +NVML_NVLINK_LOW_POWER_THRESHOLD_UNIT_50US = 1 # NVML_FI_DEV_NVLINK_GET_POWER_THRESHOLD_UNITS + +## Enums needed for the method nvmlDeviceGetVirtualizationMode and nvmlDeviceSetVirtualizationMode +NVML_GPU_VIRTUALIZATION_MODE_NONE = 0 # Represents Bare Metal GPU +NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH = 1 # Device is associated with GPU-Passthorugh +NVML_GPU_VIRTUALIZATION_MODE_VGPU = 2 # Device is associated with vGPU inside virtual machine. +NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU = 3 # Device is associated with VGX hypervisor in vGPU mode +NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA = 4 # Device is associated with VGX hypervisor in vSGA mode + +## Lib loading ## +nvmlLib = None +libLoadLock = threading.Lock() +_nvmlLib_refcount = 0 # Incremented on each nvmlInit and decremented on nvmlShutdown + +## vGPU Management +_nvmlVgpuTypeId_t = c_uint +_nvmlVgpuInstance_t = c_uint + +_nvmlVgpuVmIdType_t = c_uint +NVML_VGPU_VM_ID_DOMAIN_ID = 0 +NVML_VGPU_VM_ID_UUID = 1 + +_nvmlGridLicenseFeatureCode_t = c_uint +NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN = 0 +NVML_GRID_LICENSE_FEATURE_CODE_VGPU = 1 +NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX = 2 +NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION = 2 # deprecated, use NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX. +NVML_GRID_LICENSE_FEATURE_CODE_GAMING = 3 +NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE = 4 + +_nvmlGridLicenseExpiryStatus_t = c_uint8 +NVML_GRID_LICENSE_EXPIRY_NOT_AVAILABLE = 0, # Expiry information not available +NVML_GRID_LICENSE_EXPIRY_INVALID = 1, # Invalid expiry or error fetching expiry +NVML_GRID_LICENSE_EXPIRY_VALID = 2, # Valid expiry +NVML_GRID_LICENSE_EXPIRY_NOT_APPLICABLE = 3, # Expiry not applicable +NVML_GRID_LICENSE_EXPIRY_PERMANENT = 4, # Permanent expiry + +_nvmlVgpuCapability_t = c_uint +NVML_VGPU_CAP_NVLINK_P2P = 0 # vGPU P2P over NVLink is supported +NVML_VGPU_CAP_GPUDIRECT = 1 # GPUDirect capability is supported +NVML_VGPU_CAP_MULTI_VGPU_EXCLUSIVE = 2 # vGPU profile cannot be mixed with other vGPU profiles in same VM +NVML_VGPU_CAP_EXCLUSIVE_TYPE = 3 # vGPU profile cannot run on a GPU alongside other profiles of different type +NVML_VGPU_CAP_EXCLUSIVE_SIZE = 4 # vGPU profile cannot run on a GPU alongside other profiles of different size +NVML_VGPU_CAP_COUNT = 5 + +_nvmlVgpuDriverCapability_t = c_uint +NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU = 0 # Supports mixing of different vGPU profiles within one guest VM +NVML_VGPU_DRIVER_CAP_WARM_UPDATE = 1 # Supports FSR and warm update of vGPU host driver without terminating the running guest VM +NVML_VGPU_DRIVER_CAP_COUNT = 2 + +_nvmlDeviceVgpuCapability_t = c_uint +NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU = 0 # Query whether the fractional vGPU profiles on this GPU can be used in multi-vGPU configurations +NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES = 1 # Query whether the GPU supports concurrent execution of timesliced vGPU profiles of differing types +NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES = 2 # Query whether the GPU supports concurrent execution of timesliced vGPU profiles of differing framebuffer sizes +NVML_DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW = 3 # Query the GPU's read_device_buffer expected bandwidth capacity in megabytes per second +NVML_DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW = 4 # Query the GPU's write_device_buffer expected bandwidth capacity in megabytes per second +NVML_DEVICE_VGPU_CAP_DEVICE_STREAMING = 5 # Query whether the vGPU profiles on the GPU supports migration data streaming +NVML_DEVICE_VGPU_CAP_MINI_QUARTER_GPU = 6 # Set/Get support of mini-quarter vGPU profiles +NVML_DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU = 7 # Set/Get support for compute media engine vGPU profiles +NVML_DEVICE_VGPU_CAP_WARM_UPDATE = 8 # Query whether the GPU supports FSR and warm update +NVML_DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS = 9 # Query whether the GPU supports reporting of placements of timesliced vGPU profiles with identical framebuffer sizes +NVML_DEVICE_VGPU_CAP_COUNT = 10 + +_nvmlVgpuGuestInfoState_t = c_uint +NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = 0 +NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED = 1 + +_nvmlVgpuVmCompatibility_t = c_uint +NVML_VGPU_VM_COMPATIBILITY_NONE = 0x0 +NVML_VGPU_VM_COMPATIBILITY_COLD = 0x1 +NVML_VGPU_VM_COMPATIBILITY_HIBERNATE = 0x2 +NVML_VGPU_VM_COMPATIBILITY_SLEEP = 0x4 +NVML_VGPU_VM_COMPATIBILITY_LIVE = 0x8 + +_nvmlVgpuPgpuCompatibilityLimitCode_t = c_uint +NVML_VGPU_COMPATIBILITY_LIMIT_NONE = 0x0 +NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER = 0x1 +NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER = 0x2 +NVML_VGPU_COMPATIBILITY_LIMIT_GPU = 0x4 +NVML_VGPU_COMPATIBILITY_LIMIT_OTHER = 0x80000000 + +_nvmlHostVgpuMode_t = c_uint +NVML_HOST_VGPU_MODE_NON_SRIOV = 0 +NVML_HOST_VGPU_MODE_SRIOV = 1 + +_nvmlConfComputeGpusReadyState_t = c_uint +NVML_CC_ACCEPTING_CLIENT_REQUESTS_FALSE = 0 +NVML_CC_ACCEPTING_CLIENT_REQUESTS_TRUE = 1 + +_nvmlConfComputeGpuCaps_t = c_uint +NVML_CC_SYSTEM_GPUS_CC_NOT_CAPABLE = 0 +NVML_CC_SYSTEM_GPUS_CC_CAPABLE = 1 + +_nvmlConfComputeCpuCaps_t = c_uint +NVML_CC_SYSTEM_CPU_CAPS_NONE = 0 +NVML_CC_SYSTEM_CPU_CAPS_AMD_SEV = 1 +NVML_CC_SYSTEM_CPU_CAPS_INTEL_TDX = 2 +NVML_CC_SYSTEM_CPU_CAPS_AMD_SEV_SNP = 3 +NVML_CC_SYSTEM_CPU_CAPS_AMD_SNP_VTOM = 4 + +_nvmlConfComputeDevToolsMode_t = c_uint +NVML_CC_SYSTEM_DEVTOOLS_MODE_OFF = 0 +NVML_CC_SYSTEM_DEVTOOLS_MODE_ON = 1 + +NVML_CC_SYSTEM_MULTIGPU_NONE = 0 +NVML_CC_SYSTEM_MULTIGPU_PROTECTED_PCIE = 1 + +NVML_CC_SYSTEM_ENVIRONMENT_UNAVAILABLE = 0 +NVML_CC_SYSTEM_ENVIRONMENT_SIM = 1 +NVML_CC_SYSTEM_ENVIRONMENT_PROD = 2 + +_nvmlConfComputeCcFeature_t = c_uint +NVML_CC_SYSTEM_FEATURE_DISABLED = 0 +NVML_CC_SYSTEM_FEATURE_ENABLED = 1 + +_nvmlConfComputeCcKeyRotationThreshAttackerAdv_t = c_uint +NVML_CC_KEY_ROTATION_THRESH_ATTACKER_ADVANTAGE_MIN = 50 +NVML_CC_KEY_ROTATION_THRESH_ATTACKER_ADVANTAGE_MAX = 65 + +# GSP firmware +NVML_GSP_FIRMWARE_VERSION_BUF_SIZE = 0x40 + +class NVMLLibraryMismatchError(Exception): + pass + +## Error Checking ## +class NVMLError(Exception): + _valClassMapping = dict() + # List of currently known error codes + _errcode_to_string = { + NVML_ERROR_UNINITIALIZED: "Uninitialized", + NVML_ERROR_INVALID_ARGUMENT: "Invalid Argument", + NVML_ERROR_NOT_SUPPORTED: "Not Supported", + NVML_ERROR_NO_PERMISSION: "Insufficient Permissions", + NVML_ERROR_ALREADY_INITIALIZED: "Already Initialized", + NVML_ERROR_NOT_FOUND: "Not Found", + NVML_ERROR_INSUFFICIENT_SIZE: "Insufficient Size", + NVML_ERROR_INSUFFICIENT_POWER: "Insufficient External Power", + NVML_ERROR_DRIVER_NOT_LOADED: "Driver Not Loaded", + NVML_ERROR_TIMEOUT: "Timeout", + NVML_ERROR_IRQ_ISSUE: "Interrupt Request Issue", + NVML_ERROR_LIBRARY_NOT_FOUND: "NVML Shared Library Not Found", + NVML_ERROR_FUNCTION_NOT_FOUND: "Function Not Found", + NVML_ERROR_CORRUPTED_INFOROM: "Corrupted infoROM", + NVML_ERROR_GPU_IS_LOST: "GPU is lost", + NVML_ERROR_RESET_REQUIRED: "GPU requires restart", + NVML_ERROR_OPERATING_SYSTEM: "The operating system has blocked the request.", + NVML_ERROR_LIB_RM_VERSION_MISMATCH: "RM has detected an NVML/RM version mismatch.", + NVML_ERROR_MEMORY: "Insufficient Memory", + NVML_ERROR_UNKNOWN: "Unknown Error", + } + def __new__(typ, value): + ''' + Maps value to a proper subclass of NVMLError. + See _extractNVMLErrorsAsClasses function for more details + ''' + if typ == NVMLError: + typ = NVMLError._valClassMapping.get(value, typ) + obj = Exception.__new__(typ) + obj.value = value + return obj + def __str__(self): + try: + if self.value not in NVMLError._errcode_to_string: + NVMLError._errcode_to_string[self.value] = str(nvmlErrorString(self.value)) + return NVMLError._errcode_to_string[self.value] + except NVMLError: + return "NVML Error with code %d" % self.value + def __eq__(self, other): + return self.value == other.value + +def nvmlExceptionClass(nvmlErrorCode): + if nvmlErrorCode not in NVMLError._valClassMapping: + raise ValueError('nvmlErrorCode %s is not valid' % nvmlErrorCode) + return NVMLError._valClassMapping[nvmlErrorCode] + +def _extractNVMLErrorsAsClasses(): + ''' + Generates a hierarchy of classes on top of NVMLError class. + + Each NVML Error gets a new NVMLError subclass. This way try,except blocks can filter appropriate + exceptions more easily. + + NVMLError is a parent class. Each NVML_ERROR_* gets it's own subclass. + e.g. NVML_ERROR_ALREADY_INITIALIZED will be turned into NVMLError_AlreadyInitialized + ''' + this_module = sys.modules[__name__] + nvmlErrorsNames = [x for x in dir(this_module) if x.startswith("NVML_ERROR_")] + for err_name in nvmlErrorsNames: + # e.g. Turn NVML_ERROR_ALREADY_INITIALIZED into NVMLError_AlreadyInitialized + class_name = "NVMLError_" + string.capwords(err_name.replace("NVML_ERROR_", ""), "_").replace("_", "") + err_val = getattr(this_module, err_name) + def gen_new(val): + def new(typ): + obj = NVMLError.__new__(typ, val) + return obj + return new + new_error_class = type(class_name, (NVMLError,), {'__new__': gen_new(err_val)}) + new_error_class.__module__ = __name__ + setattr(this_module, class_name, new_error_class) + NVMLError._valClassMapping[err_val] = new_error_class +_extractNVMLErrorsAsClasses() + +def _nvmlCheckReturn(ret): + if (ret != NVML_SUCCESS): + raise NVMLError(ret) + return ret + +## Function access ## +_nvmlGetFunctionPointer_cache = dict() # function pointers are cached to prevent unnecessary libLoadLock locking +def _nvmlGetFunctionPointer(name): + global nvmlLib + + if name in _nvmlGetFunctionPointer_cache: + return _nvmlGetFunctionPointer_cache[name] + + libLoadLock.acquire() + try: + # ensure library was loaded + if (nvmlLib == None): + raise NVMLError(NVML_ERROR_UNINITIALIZED) + try: + _nvmlGetFunctionPointer_cache[name] = getattr(nvmlLib, name) + return _nvmlGetFunctionPointer_cache[name] + except AttributeError: + raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND) + finally: + # lock is always freed + libLoadLock.release() + +## Alternative object +# Allows the object to be printed +# Allows mismatched types to be assigned +# - like None when the Structure variant requires c_uint +class nvmlFriendlyObject(object): + def __init__(self, dictionary): + for x in dictionary: + setattr(self, x, dictionary[x]) + def __str__(self): + return self.__dict__.__str__() + +def nvmlStructToFriendlyObject(struct): + d = {} + for x in struct._fields_: + key = x[0] + value = getattr(struct, key) + # only need to convert from bytes if bytes, no need to check python version. + d[key] = value.decode() if isinstance(value, bytes) else value + obj = nvmlFriendlyObject(d) + return obj + +# pack the object so it can be passed to the NVML library +def nvmlFriendlyObjectToStruct(obj, model): + for x in model._fields_: + key = x[0] + value = obj.__dict__[key] + # any c_char_p in python3 needs to be bytes, default encoding works fine. + if sys.version_info >= (3,): + setattr(model, key, value.encode()) + else: + setattr(model, key, value) + return model + +## Unit structures +class struct_c_nvmlUnit_t(Structure): + pass # opaque handle +c_nvmlUnit_t = POINTER(struct_c_nvmlUnit_t) + +class _PrintableStructure(Structure): + """ + Abstract class that produces nicer __str__ output than ctypes.Structure. + e.g. instead of: + >>> print str(obj) + <class_name object at 0x7fdf82fef9e0> + this class will print + class_name(field_name: formatted_value, field_name: formatted_value) + + _fmt_ dictionary of <str _field_ name> -> <str format> + e.g. class that has _field_ 'hex_value', c_uint could be formatted with + _fmt_ = {"hex_value" : "%08X"} + to produce nicer output. + Default fomratting string for all fields can be set with key "<default>" like: + _fmt_ = {"<default>" : "%d MHz"} # e.g all values are numbers in MHz. + If not set it's assumed to be just "%s" + + Exact format of returned str from this class is subject to change in the future. + """ + _fmt_ = {} + def __str__(self): + result = [] + for x in self._fields_: + key = x[0] + value = getattr(self, key) + fmt = "%s" + if key in self._fmt_: + fmt = self._fmt_[key] + elif "<default>" in self._fmt_: + fmt = self._fmt_["<default>"] + result.append(("%s: " + fmt) % (key, value)) + return self.__class__.__name__ + "(" + ", ".join(result) + ")" + + def __getattribute__(self, name): + res = super(_PrintableStructure, self).__getattribute__(name) + # need to convert bytes to unicode for python3 don't need to for python2 + # Python 2 strings are of both str and bytes + # Python 3 strings are not of type bytes + # ctypes should convert everything to the correct values otherwise + if isinstance(res, bytes): + if isinstance(res, str): + return res + return res.decode() + return res + + def __setattr__(self, name, value): + if isinstance(value, str): + # encoding a python2 string returns the same value, since python2 strings are bytes already + # bytes passed in python3 will be ignored. + value = value.encode() + super(_PrintableStructure, self).__setattr__(name, value) + +class c_nvmlUnitInfo_t(_PrintableStructure): + _fields_ = [ + ('name', c_char * 96), + ('id', c_char * 96), + ('serial', c_char * 96), + ('firmwareVersion', c_char * 96), + ] + +class c_nvmlC2cModeInfo_v1_t(_PrintableStructure): + _fields_ = [ + ('isC2cEnabled', c_uint) + ] + +nvmlC2cModeInfo_v1 = 0x1000008; + +class c_nvmlLedState_t(_PrintableStructure): + _fields_ = [ + ('cause', c_char * 256), + ('color', _nvmlLedColor_t), + ] + +class c_nvmlPSUInfo_t(_PrintableStructure): + _fields_ = [ + ('state', c_char * 256), + ('current', c_uint), + ('voltage', c_uint), + ('power', c_uint), + ] + +class c_nvmlUnitFanInfo_t(_PrintableStructure): + _fields_ = [ + ('speed', c_uint), + ('state', _nvmlFanState_t), + ] + +class c_nvmlUnitFanSpeeds_t(_PrintableStructure): + _fields_ = [ + ('fans', c_nvmlUnitFanInfo_t * 24), + ('count', c_uint) + ] + +## Device structures +class struct_c_nvmlDevice_t(Structure): + pass # opaque handle +c_nvmlDevice_t = POINTER(struct_c_nvmlDevice_t) + +class nvmlPciInfoExt_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('domain', c_uint), + ('bus', c_uint), + ('device', c_uint), + ('pciDeviceId', c_uint), + ('pciSubSystemId', c_uint), + ('baseClass', c_uint), + ('subClass', c_uint), + ('busId', c_char * NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE), + ] + _fmt_ = { + 'version' : "0x%04X", + 'domain' : "0x%04X", + 'bus' : "0x%02X", + 'device' : "0x%02X", + 'pciDeviceId' : "0x%08X", + 'pciSubSystemId' : "0x%08X", + 'baseClass' : "0x%01X", + 'subClass' : "0x%01X", + } + +nvmlPciInfoExt_v1 = 0x1000040 + +# Legacy pciInfo used for _v1 and _v2 +class nvmlPciInfo_v2_t(_PrintableStructure): + _fields_ = [ + ('busId', c_char * NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE), + ('domain', c_uint), + ('bus', c_uint), + ('device', c_uint), + ('pciDeviceId', c_uint), + + # Added in 2.285 + ('pciSubSystemId', c_uint), + ('reserved0', c_uint), + ('reserved1', c_uint), + ('reserved2', c_uint), + ('reserved3', c_uint), + ] + _fmt_ = { + 'domain' : "0x%04X", + 'bus' : "0x%02X", + 'device' : "0x%02X", + 'pciDeviceId' : "0x%08X", + 'pciSubSystemId' : "0x%08X", + } + +class nvmlPciInfo_t(_PrintableStructure): + _fields_ = [ + # Moved to the new busId location below + ('busIdLegacy', c_char * NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE), + ('domain', c_uint), + ('bus', c_uint), + ('device', c_uint), + ('pciDeviceId', c_uint), + + # Added in 2.285 + ('pciSubSystemId', c_uint), + # New busId replaced the long deprecated and reserved fields with a + # field of the same size in 9.0 + ('busId', c_char * NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE), + ] + _fmt_ = { + 'domain' : "0x%08X", + 'bus' : "0x%02X", + 'device' : "0x%02X", + 'pciDeviceId' : "0x%08X", + 'pciSubSystemId' : "0x%08X", + } + +class c_nvmlSystemDriverBranchInfo_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ("branch", c_char * NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE), + ] + +SystemDriverBranchInfo_v1 = 0x1000054 + +class c_nvmlExcludedDeviceInfo_t(_PrintableStructure): + _fields_ = [ + ('pci', nvmlPciInfo_t), + ('uuid', c_char * NVML_DEVICE_UUID_BUFFER_SIZE) + ] + +class nvmlNvLinkUtilizationControl_t(_PrintableStructure): + _fields_ = [ + ('units', _nvmlNvLinkUtilizationCountUnits_t), + ('pktfilter', _nvmlNvLinkUtilizationCountPktTypes_t), + ] + +class c_nvmlMemory_t(_PrintableStructure): + _fields_ = [ + ('total', c_ulonglong), + ('free', c_ulonglong), + ('used', c_ulonglong), + ] + _fmt_ = {'<default>': "%d B"} + +class c_nvmlMemory_v2_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('total', c_ulonglong), + ('reserved', c_ulonglong), + ('free', c_ulonglong), + ('used', c_ulonglong), + ] + _fmt_ = {'<default>': "%d B"} + +nvmlMemory_v2 = 0x02000028 + +class c_nvmlBAR1Memory_t(_PrintableStructure): + _fields_ = [ + ('bar1Total', c_ulonglong), + ('bar1Free', c_ulonglong), + ('bar1Used', c_ulonglong), + ] + _fmt_ = {'<default>': "%d B"} + +class nvmlClkMonFaultInfo_t(Structure): + _fields_ = [("clkApiDomain", c_uint), + ("clkDomainFaultMask", c_uint) + ] + +MAX_CLK_DOMAINS = 32 + +class nvmlClkMonStatus_t(Structure): + _fields_ = [("bGlobalStatus", c_uint), + ("clkMonListSize", c_uint), + ("clkMonList", nvmlClkMonFaultInfo_t * MAX_CLK_DOMAINS) + ] + +# On Windows with the WDDM driver, usedGpuMemory is reported as None +# Code that processes this structure should check for None, I.E. +# +# if (info.usedGpuMemory == None): +# # TODO handle the error +# pass +# else: +# print("Using %d MiB of memory" % (info.usedGpuMemory / 1024 / 1024)) +# endif +# +# See NVML documentation for more information +class c_nvmlProcessInfo_v2_t(_PrintableStructure): + _fields_ = [ + ('pid', c_uint), + ('usedGpuMemory', c_ulonglong), + ('gpuInstanceId', c_uint), + ('computeInstanceId', c_uint), + ] + _fmt_ = {'usedGpuMemory': "%d B"} + +c_nvmlProcessInfo_v3_t = c_nvmlProcessInfo_v2_t + +c_nvmlProcessInfo_t = c_nvmlProcessInfo_v3_t + +_nvmlProcessMode_t = c_uint +NVML_PROCESS_MODE_COMPUTE = 0 +NVML_PROCESS_MODE_GRAPHICS = 1 +NVML_PROCESS_MODE_MPS = 2 + +class c_nvmlProcessDetail_v1_t(Structure): + _fields_ = [ + ('pid', c_uint), + ('usedGpuMemory', c_ulonglong), + ('gpuInstanceId', c_uint), + ('computeInstanceId', c_uint), + ('usedGpuCcProtectedMemory', c_ulonglong), + ] + +class c_nvmlProcessDetailList_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('mode', _nvmlProcessMode_t), + ('numProcArrayEntries', c_uint), + ('procArray', POINTER(c_nvmlProcessDetail_v1_t)), + ] + _fmt_ = {'numProcArrayEntries': "%d B"} + +c_nvmlProcessDetailList_t = c_nvmlProcessDetailList_v1_t + +nvmlProcessDetailList_v1 = 0x1000018 + +class c_nvmlBridgeChipInfo_t(_PrintableStructure): + _fields_ = [ + ('type', _nvmlBridgeChipType_t), + ('fwVersion', c_uint), + ] + +class c_nvmlBridgeChipHierarchy_t(_PrintableStructure): + _fields_ = [ + ('bridgeCount', c_uint), + ('bridgeChipInfo', c_nvmlBridgeChipInfo_t * 128), + ] + +class c_nvmlEccErrorCounts_t(_PrintableStructure): + _fields_ = [ + ('l1Cache', c_ulonglong), + ('l2Cache', c_ulonglong), + ('deviceMemory', c_ulonglong), + ('registerFile', c_ulonglong), + ] + +class c_nvmlUtilization_t(_PrintableStructure): + _fields_ = [ + ('gpu', c_uint), + ('memory', c_uint), + ] + _fmt_ = {'<default>': "%d %%"} + +# Added in 2.285 +class c_nvmlHwbcEntry_t(_PrintableStructure): + _fields_ = [ + ('hwbcId', c_uint), + ('firmwareVersion', c_char * 32), + ] + +class c_nvmlValue_t(Union): + _fields_ = [ + ('dVal', c_double), + ('uiVal', c_uint), + ('ulVal', c_ulong), + ('ullVal', c_ulonglong), + ('sllVal', c_longlong), + ('siVal', c_int), + ('usVal', c_ushort), + ] + +class c_nvmlSample_t(_PrintableStructure): + _fields_ = [ + ('timeStamp', c_ulonglong), + ('sampleValue', c_nvmlValue_t), + ] + +class c_nvmlViolationTime_t(_PrintableStructure): + _fields_ = [ + ('referenceTime', c_ulonglong), + ('violationTime', c_ulonglong), + ] + +class c_nvmlFieldValue_t(_PrintableStructure): + _fields_ = [ + ('fieldId', c_uint32), + ('scopeId', c_uint32), + ('timestamp', c_int64), + ('latencyUsec', c_int64), + ('valueType', _nvmlValueType_t), + ('nvmlReturn', _nvmlReturn_t), + ('value', c_nvmlValue_t) + ] + +NVML_NVLINK_TOTAL_SUPPORTED_BW_MODES = 23 + +nvmlNvlinkSupportedBwModes_v1 = 0x100001c +class c_nvmlNvlinkSupportedBwModes_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('bwModes', c_uint8 * NVML_NVLINK_TOTAL_SUPPORTED_BW_MODES), + ('totalBwModes', c_uint8) + ] + + def __init__(self): + super(c_nvmlNvlinkSupportedBwModes_v1_t, self).__init__(version=nvmlNvlinkSupportedBwModes_v1) + +nvmlNvlinkGetBwMode_v1 = 0x100000c +class c_nvmlNvlinkGetBwMode_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('bIsBest', c_uint), + ('bwMode', c_uint8) + ] + + def __init__(self): + super(c_nvmlNvlinkGetBwMode_v1_t, self).__init__(version=nvmlNvlinkGetBwMode_v1) + +nvmlNvlinkSetBwMode_v1 = 0x100000c +class c_nvmlNvlinkSetBwMode_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('bSetBest', c_uint), + ('bwMode', c_uint8) + ] + + def __init__(self): + super(c_nvmlNvlinkSetBwMode_v1_t, self).__init__(version=nvmlNvlinkSetBwMode_v1) + +class c_nvmlVgpuHeterogeneousMode_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('mode', c_uint), + ] + +VgpuHeterogeneousMode_v1 = 0x1000008 + +class c_nvmlVgpuPlacementId_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('placementId', c_uint), + ] + +VgpuPlacementId_v1 = 0x1000008 + +class c_nvmlVgpuPlacementList_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('count', c_uint), + ('placementSize', c_uint), + ('placementIds', POINTER(c_uint)), + ] + +VgpuPlacementList_v1 = 0x1000018 + +NVML_VGPU_PGPU_HETEROGENEOUS_MODE = 0 +NVML_VGPU_PGPU_HOMOGENEOUS_MODE = 1 + +class c_nvmlVgpuPlacementList_v2_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('placementSize', c_uint), + ('count', c_uint), + ('placementIds', POINTER(c_uint)), + ('mode', c_uint), + ] + +VgpuPlacementList_v2 = 0x2000020 + +class c_nvmlVgpuTypeBar1Info_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('bar1Size', c_ulonglong), + ] + +VgpuTypeBar1Info_v1 = 0x1000010 + +class c_nvmlVgpuInstanceUtilizationSample_t(_PrintableStructure): + _fields_ = [ + ('vgpuInstance', _nvmlVgpuInstance_t), + ('timeStamp', c_ulonglong), + ('smUtil', c_nvmlValue_t), + ('memUtil', c_nvmlValue_t), + ('encUtil', c_nvmlValue_t), + ('decUtil', c_nvmlValue_t), + ] + +class c_nvmlVgpuInstanceUtilizationInfo_v1_t(_PrintableStructure): + _fields_ = [ + ('timeStamp', c_ulonglong), + ('vgpuInstance', _nvmlVgpuInstance_t), + ('smUtil', c_nvmlValue_t), + ('memUtil', c_nvmlValue_t), + ('encUtil', c_nvmlValue_t), + ('decUtil', c_nvmlValue_t), + ('jpgUtil', c_nvmlValue_t), + ('ofaUtil', c_nvmlValue_t), + ] + +class c_nvmlVgpuInstancesUtilizationInfo_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('sampleValType', _nvmlValueType_t), + ('vgpuInstanceCount', c_uint), + ('lastSeenTimeStamp', c_ulonglong), + ('vgpuUtilArray', POINTER(c_nvmlVgpuInstanceUtilizationInfo_v1_t)), + ] + +VgpuInstancesUtilizationInfo_v1 = 0x01000020 + +class c_nvmlVgpuProcessUtilizationSample_t(_PrintableStructure): + _fields_ = [ + ('vgpuInstance', _nvmlVgpuInstance_t), + ('pid', c_uint), + ('processName', c_char * NVML_VGPU_NAME_BUFFER_SIZE), + ('timeStamp', c_ulonglong), + ('smUtil', c_uint), + ('memUtil', c_uint), + ('encUtil', c_uint), + ('decUtil', c_uint), + ] + +class c_nvmlVgpuProcessUtilizationInfo_v1_t(_PrintableStructure): + _fields_ = [ + ('processName', c_char * NVML_VGPU_NAME_BUFFER_SIZE), + ('timeStamp', c_ulonglong), + ('vgpuInstance', _nvmlVgpuInstance_t), + ('pid', c_uint), + ('smUtil', c_uint), + ('memUtil', c_uint), + ('encUtil', c_uint), + ('decUtil', c_uint), + ('jpgUtil', c_uint), + ('ofaUtil', c_uint), + ] + +class c_nvmlVgpuProcessesUtilizationInfo_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('vgpuProcessCount', c_uint), + ('lastSeenTimeStamp', c_ulonglong), + ('vgpuProcUtilArray', POINTER(c_nvmlVgpuProcessUtilizationInfo_v1_t)), + ] + +VgpuProcessesUtilizationInfo_v1 = 0x01000018 + +class nvmlVgpuRuntimeState_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('size', c_ulonglong), + ] + +VgpuRuntimeState_v1 = 0x1000010 + +class c_nvmlVgpuLicenseExpiry_t(_PrintableStructure): + _fields_ = [ + ('year', c_uint32), + ('month', c_uint16), + ('day', c_uint16), + ('hour', c_uint16), + ('min', c_uint16), + ('sec', c_uint16), + ('status', c_uint8), + ] + +NVML_GRID_LICENSE_STATE_UNKNOWN = 0 +NVML_GRID_LICENSE_STATE_UNINITIALIZED = 1 +NVML_GRID_LICENSE_STATE_UNLICENSED_UNRESTRICTED = 2 +NVML_GRID_LICENSE_STATE_UNLICENSED_RESTRICTED = 3 +NVML_GRID_LICENSE_STATE_UNLICENSED = 4 +NVML_GRID_LICENSE_STATE_LICENSED = 5 + +class c_nvmlVgpuLicenseInfo_t(_PrintableStructure): + _fields_ = [ + ('isLicensed', c_uint8), + ('licenseExpiry', c_nvmlVgpuLicenseExpiry_t), + ('currentState', c_uint), + ] + +class c_nvmlEncoderSession_t(_PrintableStructure): + _fields_ = [ + ('sessionId', c_uint), + ('pid', c_uint), + ('vgpuInstance', _nvmlVgpuInstance_t), + ('codecType', c_uint), + ('hResolution', c_uint), + ('vResolution', c_uint), + ('averageFps', c_uint), + ('encodeLatency', c_uint), + ] + +class c_nvmlProcessUtilizationSample_t(_PrintableStructure): + _fields_ = [ + ('pid', c_uint), + ('timeStamp', c_ulonglong), + ('smUtil', c_uint), + ('memUtil', c_uint), + ('encUtil', c_uint), + ('decUtil', c_uint), + ] + +class c_nvmlProcessUtilizationInfo_v1_t(_PrintableStructure): + _fields_ = [ + ('timeStamp', c_ulonglong), + ('pid', c_uint), + ('smUtil', c_uint), + ('memUtil', c_uint), + ('encUtil', c_uint), + ('decUtil', c_uint), + ('jpgUtil', c_uint), + ('ofaUtil', c_uint), + ] + +class c_nvmlProcessesUtilizationInfo_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('processSamplesCount', c_uint), + ('lastSeenTimeStamp', c_ulonglong), + ('procUtilArray', POINTER(c_nvmlProcessUtilizationInfo_v1_t)), + ] + +ProcessesUtilizationInfo_v1 = 0x01000018 + +class c_nvmlGridLicenseExpiry_t(_PrintableStructure): + _fields_ = [ + ('year', c_uint32), + ('month', c_uint16), + ('day', c_uint16), + ('hour', c_uint16), + ('min', c_uint16), + ('sec', c_uint16), + ('status', c_uint8), + ] + +class c_nvmlGridLicensableFeature_v4_t(_PrintableStructure): + _fields_ = [ + ('featureCode', _nvmlGridLicenseFeatureCode_t), + ('featureState', c_uint), + ('licenseInfo', c_char * NVML_GRID_LICENSE_BUFFER_SIZE), + ('productName', c_char * NVML_GRID_LICENSE_BUFFER_SIZE), + ('featureEnabled', c_uint), + ('licenseExpiry', c_nvmlGridLicenseExpiry_t), + ] + +class c_nvmlGridLicensableFeatures_v4_t(_PrintableStructure): + _fields_ = [ + ('isGridLicenseSupported', c_int), + ('licensableFeaturesCount', c_uint), + ('gridLicensableFeatures', c_nvmlGridLicensableFeature_v4_t * NVML_GRID_LICENSE_FEATURE_MAX_COUNT), + ] + +class c_nvmlGridLicensableFeature_v3_t(_PrintableStructure): + _fields_ = [ + ('featureCode', _nvmlGridLicenseFeatureCode_t), + ('featureState', c_uint), + ('licenseInfo', c_char * NVML_GRID_LICENSE_BUFFER_SIZE), + ('productName', c_char * NVML_GRID_LICENSE_BUFFER_SIZE), + ('featureEnabled', c_uint), + ] + +class c_nvmlGridLicensableFeatures_v3_t(_PrintableStructure): + _fields_ = [ + ('isGridLicenseSupported', c_int), + ('licensableFeaturesCount', c_uint), + ('gridLicensableFeatures', c_nvmlGridLicensableFeature_v3_t * NVML_GRID_LICENSE_FEATURE_MAX_COUNT), + ] + +class c_nvmlGridLicensableFeature_v2_t(_PrintableStructure): + _fields_ = [ + ('featureCode', _nvmlGridLicenseFeatureCode_t), + ('featureState', c_uint), + ('licenseInfo', c_char * NVML_GRID_LICENSE_BUFFER_SIZE), + ('productName', c_char * NVML_GRID_LICENSE_BUFFER_SIZE), + ] + +class c_nvmlGridLicensableFeatures_v2_t(_PrintableStructure): + _fields_ = [ + ('isGridLicenseSupported', c_int), + ('licensableFeaturesCount', c_uint), + ('gridLicensableFeatures', c_nvmlGridLicensableFeature_v2_t * NVML_GRID_LICENSE_FEATURE_MAX_COUNT), + ] + +class c_nvmlGridLicensableFeature_t(_PrintableStructure): + _fields_ = [ + ('featureCode', _nvmlGridLicenseFeatureCode_t), + ('featureState', c_uint), + ('licenseInfo', c_char * NVML_GRID_LICENSE_BUFFER_SIZE), + ] + +class c_nvmlGridLicensableFeatures_t(_PrintableStructure): + _fields_ = [ + ('isGridLicenseSupported', c_int), + ('licensableFeaturesCount', c_uint), + ('gridLicensableFeatures', c_nvmlGridLicensableFeature_t * NVML_GRID_LICENSE_FEATURE_MAX_COUNT), + ] + +class c_nvmlMarginTemperature_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('marginTemperature', c_int), + ] + +nvmlMarginTemperature_v1 = 0x1000008 + +## Event structures +class struct_c_nvmlEventSet_t(Structure): + pass # opaque handle +c_nvmlEventSet_t = POINTER(struct_c_nvmlEventSet_t) + +nvmlEventTypeSingleBitEccError = 0x0000000000000001 +nvmlEventTypeDoubleBitEccError = 0x0000000000000002 +nvmlEventTypePState = 0x0000000000000004 +nvmlEventTypeXidCriticalError = 0x0000000000000008 +nvmlEventTypeClock = 0x0000000000000010 +nvmlEventTypePowerSourceChange = 0x0000000000000080 +nvmlEventMigConfigChange = 0x0000000000000100 +nvmlEventTypeSingleBitEccErrorStorm = 0x0000000000000200 +nvmlEventTypeDramRetirementEvent = 0x0000000000000400 +nvmlEventTypeDramRetirementFailure = 0x0000000000000800 +nvmlEventTypeNonFatalPoisonError = 0x0000000000001000 +nvmlEventTypeFatalPoisonError = 0x0000000000002000 +nvmlEventTypeGpuUnavailableError = 0x0000000000004000 +nvmlEventTypeGpuRecoveryAction = 0x0000000000008000 +nvmlEventTypeNone = 0x0000000000000000 +nvmlEventTypeAll = ( + nvmlEventTypeNone + | nvmlEventTypeSingleBitEccError + | nvmlEventTypeDoubleBitEccError + | nvmlEventTypePState + | nvmlEventTypeClock + | nvmlEventTypePowerSourceChange + | nvmlEventTypeXidCriticalError + | nvmlEventMigConfigChange + | nvmlEventTypeSingleBitEccErrorStorm + | nvmlEventTypeDramRetirementEvent + | nvmlEventTypeDramRetirementFailure + | nvmlEventTypeNonFatalPoisonError + | nvmlEventTypeFatalPoisonError + | nvmlEventTypeGpuUnavailableError + | nvmlEventTypeGpuRecoveryAction + ) + +## Clock Event Reasons defines +nvmlClocksEventReasonGpuIdle = 0x0000000000000001 +nvmlClocksEventReasonApplicationsClocksSetting = 0x0000000000000002 +nvmlClocksEventReasonUserDefinedClocks = nvmlClocksEventReasonApplicationsClocksSetting # deprecated, use nvmlClocksEventReasonApplicationsClocksSetting +nvmlClocksEventReasonSwPowerCap = 0x0000000000000004 +nvmlClocksEventReasonHwSlowdown = 0x0000000000000008 +nvmlClocksEventReasonSyncBoost = 0x0000000000000010 +nvmlClocksEventReasonSwThermalSlowdown = 0x0000000000000020 +nvmlClocksEventReasonHwThermalSlowdown = 0x0000000000000040 +nvmlClocksEventReasonHwPowerBrakeSlowdown = 0x0000000000000080 +nvmlClocksEventReasonDisplayClockSetting = 0x0000000000000100 +nvmlClocksEventReasonNone = 0x0000000000000000 +nvmlClocksEventReasonAll = ( + nvmlClocksEventReasonNone | + nvmlClocksEventReasonGpuIdle | + nvmlClocksEventReasonApplicationsClocksSetting | + nvmlClocksEventReasonSwPowerCap | + nvmlClocksEventReasonHwSlowdown | + nvmlClocksEventReasonSyncBoost | + nvmlClocksEventReasonSwThermalSlowdown | + nvmlClocksEventReasonHwThermalSlowdown | + nvmlClocksEventReasonHwPowerBrakeSlowdown | + nvmlClocksEventReasonDisplayClockSetting + ) + +## Following have been deprecated +nvmlClocksThrottleReasonGpuIdle = 0x0000000000000001 +nvmlClocksThrottleReasonApplicationsClocksSetting = 0x0000000000000002 +nvmlClocksThrottleReasonUserDefinedClocks = nvmlClocksThrottleReasonApplicationsClocksSetting # deprecated, use nvmlClocksThrottleReasonApplicationsClocksSetting +nvmlClocksThrottleReasonSwPowerCap = 0x0000000000000004 +nvmlClocksThrottleReasonHwSlowdown = 0x0000000000000008 +nvmlClocksThrottleReasonSyncBoost = 0x0000000000000010 +nvmlClocksThrottleReasonSwThermalSlowdown = 0x0000000000000020 +nvmlClocksThrottleReasonHwThermalSlowdown = 0x0000000000000040 +nvmlClocksThrottleReasonHwPowerBrakeSlowdown = 0x0000000000000080 +nvmlClocksThrottleReasonDisplayClockSetting = 0x0000000000000100 +nvmlClocksThrottleReasonNone = 0x0000000000000000 +nvmlClocksThrottleReasonAll = ( + nvmlClocksThrottleReasonNone | + nvmlClocksThrottleReasonGpuIdle | + nvmlClocksThrottleReasonApplicationsClocksSetting | + nvmlClocksThrottleReasonSwPowerCap | + nvmlClocksThrottleReasonHwSlowdown | + nvmlClocksThrottleReasonSyncBoost | + nvmlClocksThrottleReasonSwThermalSlowdown | + nvmlClocksThrottleReasonHwThermalSlowdown | + nvmlClocksThrottleReasonHwPowerBrakeSlowdown | + nvmlClocksThrottleReasonDisplayClockSetting + ) + +class c_nvmlEventData_t(_PrintableStructure): + _fields_ = [ + ('device', c_nvmlDevice_t), + ('eventType', c_ulonglong), + ('eventData', c_ulonglong), + ('gpuInstanceId', c_uint), + ('computeInstanceId', c_uint) + ] + _fmt_ = {'eventType': "0x%08X"} + +class c_nvmlAccountingStats_t(_PrintableStructure): + _fields_ = [ + ('gpuUtilization', c_uint), + ('memoryUtilization', c_uint), + ('maxMemoryUsage', c_ulonglong), + ('time', c_ulonglong), + ('startTime', c_ulonglong), + ('isRunning', c_uint), + ('reserved', c_uint * 5) + ] + +class c_nvmlVgpuVersion_t(Structure): + _fields_ = [("minVersion", c_uint), + ("maxVersion", c_uint) + ] + +class c_nvmlVgpuMetadata_t(_PrintableStructure): + _fields_ = [("version", c_uint), + ("revision", c_uint), + ("guestInfoState", _nvmlVgpuGuestInfoState_t), + ("guestDriverVersion", c_char * NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE), + ("hostDriverVersion", c_char * NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE), + ("reserved", c_uint * 6), + ("vgpuVirtualizationCaps", c_uint), + ("guestVgpuVersion", c_uint), + ("opaqueDataSize", c_uint), + ("opaqueData", c_char * NVML_VGPU_METADATA_OPAQUE_DATA_SIZE) + ] + +class c_nvmlVgpuPgpuMetadata_t(_PrintableStructure): + _fields_ = [("version", c_uint), + ("revision", c_uint), + ("hostDriverVersion", c_char * NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE), + ("pgpuVirtualizationCaps", c_uint), + ("reserved", c_uint * 5), + ("hostSupportedVgpuRange", c_nvmlVgpuVersion_t), + ("opaqueDataSize", c_uint), + ("opaqueData", c_char * NVML_VGPU_PGPU_METADATA_OPAQUE_DATA_SIZE) + ] + +class c_nvmlVgpuPgpuCompatibility_t(Structure): + _fields_ = [("vgpuVmCompatibility", _nvmlVgpuVmCompatibility_t), + ("compatibilityLimitCode", _nvmlVgpuPgpuCompatibilityLimitCode_t) + ] + +## vGPU scheduler policy defines +NVML_VGPU_SCHEDULER_POLICY_UNKNOWN = 0 +NVML_VGPU_SCHEDULER_POLICY_BEST_EFFORT = 1 +NVML_VGPU_SCHEDULER_POLICY_EQUAL_SHARE = 2 +NVML_VGPU_SCHEDULER_POLICY_FIXED_SHARE = 3 + +## Supported vGPU scheduler policy count +NVML_SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT = 3 + +NVML_SCHEDULER_SW_MAX_LOG_ENTRIES = 200 + +NVML_VGPU_SCHEDULER_ARR_DEFAULT = 0 +NVML_VGPU_SCHEDULER_ARR_DISABLE = 1 +NVML_VGPU_SCHEDULER_ARR_ENABLE = 2 + +class c_nvmlVgpuSchedDataWithARR_t(_PrintableStructure): + _fields_ = [ + ('avgFactor', c_uint), + ('timeslice', c_uint), + ] + +class c_nvmlVgpuSchedData_t(_PrintableStructure): + _fields_ = [ + ('timeslice', c_uint), + ] + +class c_nvmlVgpuSchedulerParams_t(Union): + _fields_ = [ + ('vgpuSchedDataWithARR', c_nvmlVgpuSchedDataWithARR_t), + ('vgpuSchedData', c_nvmlVgpuSchedData_t), + ] + +class c_nvmlVgpuSchedulerLogEntry_t(_PrintableStructure): + _fields_ = [ + ('timestamp', c_ulonglong), + ('timeRunTotal', c_ulonglong), + ('timeRun', c_ulonglong), + ('swRunlistId', c_uint), + ('targetTimeSlice', c_ulonglong), + ('cumulativePreemptionTime', c_ulonglong), + ] + +class c_nvmlVgpuSchedulerLog_t(_PrintableStructure): + _fields_ = [ + ('engineId', c_uint), + ('schedulerPolicy', c_uint), + ('arrMode', c_uint), + ('schedulerParams', c_nvmlVgpuSchedulerParams_t), + ('entriesCount', c_uint), + ('logEntries', c_nvmlVgpuSchedulerLogEntry_t * NVML_SCHEDULER_SW_MAX_LOG_ENTRIES), + ] + +class c_nvmlVgpuSchedulerGetState_t(_PrintableStructure): + _fields_ = [ + ('schedulerPolicy', c_uint), + ('arrMode', c_uint), + ('schedulerParams', c_nvmlVgpuSchedulerParams_t), + ] + +class c_nvmlVgpuSchedSetDataWithARR_t(_PrintableStructure): + _fields_ = [ + ('avgFactor', c_uint), + ('frequency', c_uint), + ] + +class c_nvmlVgpuSchedSetData_t(_PrintableStructure): + _fields_ = [ + ('timeslice', c_uint), + ] + +class c_nvmlVgpuSchedulerSetParams_t(Union): + _fields_ = [ + ('vgpuSchedDataWithARR', c_nvmlVgpuSchedSetDataWithARR_t), + ('vgpuSchedData', c_nvmlVgpuSchedSetData_t), + ] + +class c_nvmlVgpuSchedulerSetState_t(_PrintableStructure): + _fields_ = [ + ('schedulerPolicy', c_uint), + ('enableARRMode', c_uint), + ('schedulerParams', c_nvmlVgpuSchedulerSetParams_t), + ] + +class c_nvmlVgpuSchedulerCapabilities_t(_PrintableStructure): + _fields_ = [ + ('supportedSchedulers', c_uint * NVML_SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT), + ('maxTimeslice', c_uint), + ('minTimeslice', c_uint), + ('isArrModeSupported', c_uint), + ('maxFrequencyForARR', c_uint), + ('minFrequencyForARR', c_uint), + ('maxAvgFactorForARR', c_uint), + ('minAvgFactorForARR', c_uint), + ] + +class c_nvmlFBCStats_t(Structure): + _fields_ = [("sessionsCount", c_uint), + ("averageFPS", c_uint), + ("averageLatency", c_uint) + ] + +class c_nvmlFBCSession_t(_PrintableStructure): + _fields_ = [ + ('sessionId', c_uint), + ('pid', c_uint), + ('vgpuInstance', _nvmlVgpuInstance_t), + ('displayOrdinal', c_uint), + ('sessionType', c_uint), + ('sessionFlags', c_uint), + ('hMaxResolution', c_uint), + ('vMaxResolution', c_uint), + ('hResolution', c_uint), + ('vResolution', c_uint), + ('averageFPS', c_uint), + ('averageLatency', c_uint), + ] + +NVML_DEVICE_MIG_DISABLE = 0x0 +NVML_DEVICE_MIG_ENABLE = 0x1 + +NVML_GPU_INSTANCE_PROFILE_1_SLICE = 0x0 +NVML_GPU_INSTANCE_PROFILE_2_SLICE = 0x1 +NVML_GPU_INSTANCE_PROFILE_3_SLICE = 0x2 +NVML_GPU_INSTANCE_PROFILE_4_SLICE = 0x3 +NVML_GPU_INSTANCE_PROFILE_7_SLICE = 0x4 +NVML_GPU_INSTANCE_PROFILE_8_SLICE = 0x5 +NVML_GPU_INSTANCE_PROFILE_6_SLICE = 0x6 +NVML_GPU_INSTANCE_PROFILE_1_SLICE_REV1 = 0x7 +NVML_GPU_INSTANCE_PROFILE_2_SLICE_REV1 = 0x8 +NVML_GPU_INSTANCE_PROFILE_1_SLICE_REV2 = 0x9 +NVML_GPU_INSTANCE_PROFILE_1_SLICE_GFX = 0xA +NVML_GPU_INSTANCE_PROFILE_2_SLICE_GFX = 0xB +NVML_GPU_INSTANCE_PROFILE_4_SLICE_GFX = 0xC +NVML_GPU_INSTANCE_PROFILE_COUNT = 0xD + +class c_nvmlGpuInstancePlacement_t(Structure): + _fields_ = [("start", c_uint), + ("size", c_uint) + ] + +class c_nvmlGpuInstanceProfileInfo_t(Structure): + _fields_ = [("id", c_uint), + ("isP2pSupported", c_uint), + ("sliceCount", c_uint), + ("instanceCount", c_uint), + ("multiprocessorCount", c_uint), + ("copyEngineCount", c_uint), + ("decoderCount", c_uint), + ("encoderCount", c_uint), + ("jpegCount", c_uint), + ("ofaCount", c_uint), + ("memorySizeMB", c_ulonglong), + ] + +nvmlGpuInstanceProfileInfo_v2 = 0x02000098 + +class c_nvmlGpuInstanceProfileInfo_v2_t(_PrintableStructure): + _fields_ = [("version", c_uint), + ("id", c_uint), + ("isP2pSupported", c_uint), + ("sliceCount", c_uint), + ("instanceCount", c_uint), + ("multiprocessorCount", c_uint), + ("copyEngineCount", c_uint), + ("decoderCount", c_uint), + ("encoderCount", c_uint), + ("jpegCount", c_uint), + ("ofaCount", c_uint), + ("memorySizeMB", c_ulonglong), + ("name", c_char * NVML_DEVICE_NAME_V2_BUFFER_SIZE) + ] + + def __init__(self): + super(c_nvmlGpuInstanceProfileInfo_v2_t, self).__init__(version=nvmlGpuInstanceProfileInfo_v2) + +class c_nvmlGpuInstanceInfo_t(Structure): + _fields_ = [("device", c_nvmlDevice_t), + ("id", c_uint), + ("profileId", c_uint), + ("placement", c_nvmlGpuInstancePlacement_t) + ] + +class struct_c_nvmlGpuInstance_t(Structure): + pass # opaque handle +c_nvmlGpuInstance_t = POINTER(struct_c_nvmlGpuInstance_t) + +NVML_COMPUTE_INSTANCE_PROFILE_1_SLICE = 0x0 +NVML_COMPUTE_INSTANCE_PROFILE_2_SLICE = 0x1 +NVML_COMPUTE_INSTANCE_PROFILE_3_SLICE = 0x2 +NVML_COMPUTE_INSTANCE_PROFILE_4_SLICE = 0x3 +NVML_COMPUTE_INSTANCE_PROFILE_7_SLICE = 0x4 +NVML_COMPUTE_INSTANCE_PROFILE_8_SLICE = 0x5 +NVML_COMPUTE_INSTANCE_PROFILE_6_SLICE = 0x6 +NVML_COMPUTE_INSTANCE_PROFILE_1_SLICE_REV1 = 0x7 +NVML_COMPUTE_INSTANCE_PROFILE_COUNT = 0x8 + +NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED = 0x0 +NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT = 0x1 + +class c_nvmlComputeInstancePlacement_t(Structure): + _fields_ = [("start", c_uint), + ("size", c_uint) + ] + +class c_nvmlComputeInstanceProfileInfo_t(Structure): + _fields_ = [("id", c_uint), + ("sliceCount", c_uint), + ("instanceCount", c_uint), + ("multiprocessorCount", c_uint), + ("sharedCopyEngineCount", c_uint), + ("sharedDecoderCount", c_uint), + ("sharedEncoderCount", c_uint), + ("sharedJpegCount", c_uint), + ("sharedOfaCount", c_uint) + ] + +nvmlComputeInstanceProfileInfo_v2 = 0x02000088 + +class c_nvmlComputeInstanceProfileInfo_v2_t(_PrintableStructure): + _fields_ = [("version", c_uint), + ("id", c_uint), + ("sliceCount", c_uint), + ("instanceCount", c_uint), + ("multiprocessorCount", c_uint), + ("sharedCopyEngineCount", c_uint), + ("sharedDecoderCount", c_uint), + ("sharedEncoderCount", c_uint), + ("sharedJpegCount", c_uint), + ("sharedOfaCount", c_uint), + ("name", c_char * NVML_DEVICE_NAME_V2_BUFFER_SIZE) + ] + + def __init__(self): + super(c_nvmlComputeInstanceProfileInfo_v2_t, self).__init__(version=nvmlComputeInstanceProfileInfo_v2) + +class c_nvmlComputeInstanceInfo_t(Structure): + _fields_ = [("device", c_nvmlDevice_t), + ("gpuInstance", c_nvmlGpuInstance_t), + ("id", c_uint), + ("profileId", c_uint), + ("placement", c_nvmlComputeInstancePlacement_t) + ] + +NVML_MAX_GPU_UTILIZATIONS = 8 +NVML_GPU_UTILIZATION_DOMAIN_GPU = 0 +NVML_GPU_UTILIZATION_DOMAIN_FB = 1 +NVML_GPU_UTILIZATION_DOMAIN_VID = 2 +NVML_GPU_UTILIZATION_DOMAIN_BUS = 3 +class c_nvmlGpuDynamicPstatesUtilization_t(Structure): + _fields_ = [("bIsPresent", c_uint, 1), + ("percentage", c_uint), + ("incThreshold", c_uint), + ("decThreshold", c_uint)] +class c_nvmlGpuDynamicPstatesInfo_t(Structure): + _fields_ = [("flags", c_uint), + ("utilization", c_nvmlGpuDynamicPstatesUtilization_t * NVML_MAX_GPU_UTILIZATIONS)] + +NVML_MAX_THERMAL_SENSORS_PER_GPU = 3 + +NVML_THERMAL_TARGET_NONE = 0 +NVML_THERMAL_TARGET_GPU = 1 +NVML_THERMAL_TARGET_MEMORY = 2 +NVML_THERMAL_TARGET_POWER_SUPPLY = 4 +NVML_THERMAL_TARGET_BOARD = 8 +NVML_THERMAL_TARGET_VCD_BOARD = 9 +NVML_THERMAL_TARGET_VCD_INLET = 10 +NVML_THERMAL_TARGET_VCD_OUTLET = 11 +NVML_THERMAL_TARGET_ALL = 15 +NVML_THERMAL_TARGET_UNKNOWN = -1 + +NVML_THERMAL_CONTROLLER_NONE = 0 +NVML_THERMAL_CONTROLLER_GPU_INTERNAL = 1 +NVML_THERMAL_CONTROLLER_ADM1032 = 2 +NVML_THERMAL_CONTROLLER_ADT7461 = 3 +NVML_THERMAL_CONTROLLER_MAX6649 = 4 +NVML_THERMAL_CONTROLLER_MAX1617 = 5 +NVML_THERMAL_CONTROLLER_LM99 = 6 +NVML_THERMAL_CONTROLLER_LM89 = 7 +NVML_THERMAL_CONTROLLER_LM64 = 8 +NVML_THERMAL_CONTROLLER_G781 = 9 +NVML_THERMAL_CONTROLLER_ADT7473 = 10 +NVML_THERMAL_CONTROLLER_SBMAX6649 = 11 +NVML_THERMAL_CONTROLLER_VBIOSEVT = 12 +NVML_THERMAL_CONTROLLER_OS = 13 +NVML_THERMAL_CONTROLLER_NVSYSCON_CANOAS = 14 +NVML_THERMAL_CONTROLLER_NVSYSCON_E551 = 15 +NVML_THERMAL_CONTROLLER_MAX6649R = 16 +NVML_THERMAL_CONTROLLER_ADT7473S = 17 +NVML_THERMAL_CONTROLLER_UNKNOWN = -1 + +class c_nvmlGpuThermalSensor_t(Structure): + _fields_ = [("controller", c_int), + ("defaultMinTemp", c_int), + ("defaultMaxTemp", c_int), + ("currentTemp", c_int), + ("target", c_int)] +class c_nvmlGpuThermalSettings_t(Structure): + _fields_ = [("count", c_uint), + ("sensor", c_nvmlGpuThermalSensor_t * NVML_MAX_THERMAL_SENSORS_PER_GPU)] + +_nvmlCoolerControl_t = c_uint +NVML_THERMAL_COOLER_SIGNAL_NONE = 0 +NVML_THERMAL_COOLER_SIGNAL_TOGGLE = 1 +NVML_THERMAL_COOLER_SIGNAL_VARIABLE = 2 +NVML_THERMAL_COOLER_SIGNAL_COUNT = 3 + +_nvmlCoolerTarget_t = c_uint +NVML_THERMAL_COOLER_TARGET_NONE = (1 << 0) +NVML_THERMAL_COOLER_TARGET_GPU = (1 << 1) +NVML_THERMAL_COOLER_TARGET_MEMORY = (1 << 2) +NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY = (1 << 3) +NVML_THERMAL_COOLER_TARGET_GPU_RELATED = (NVML_THERMAL_COOLER_TARGET_GPU | NVML_THERMAL_COOLER_TARGET_MEMORY | NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY) + +class c_nvmlCoolerInfo_t(_PrintableStructure): + _fields_ = [("version", c_uint), + ("index", c_uint), + ("coolerControlType", _nvmlCoolerControl_t), + ("coolerTarget", _nvmlCoolerTarget_t) + ] + +nvmlCoolerInfo_v1 = 0x1000010 + +def nvmlDeviceGetCoolerInfo(handle): + c_coolerInfo = c_nvmlCoolerInfo_t() + c_coolerInfo.version = nvmlCoolerInfo_v1 + c_coolerInfo.index = 0 + fn = _nvmlGetFunctionPointer("nvmlDeviceGetCoolerInfo") + ret = fn(handle, byref(c_coolerInfo)) + _nvmlCheckReturn(ret) + return [c_coolerInfo.coolerControlType, c_coolerInfo.coolerTarget] + +class struct_c_nvmlComputeInstance_t(Structure): + pass # opaque handle +c_nvmlComputeInstance_t = POINTER(struct_c_nvmlComputeInstance_t) + +class c_nvmlDeviceAttributes(Structure): + _fields_ = [("multiprocessorCount", c_uint), + ("sharedCopyEngineCount", c_uint), + ("sharedDecoderCount", c_uint), + ("sharedEncoderCount", c_uint), + ("sharedJpegCount", c_uint), + ("sharedOfaCount", c_uint), + ("gpuInstanceSliceCount", c_uint), + ("computeInstanceSliceCount", c_uint), + ("memorySizeMB", c_ulonglong), + ] + +class c_nvmlRowRemapperHistogramValues(Structure): + _fields_ = [("max", c_uint), + ("high", c_uint), + ("partial", c_uint), + ("low", c_uint), + ("none", c_uint) + ] + +NVML_GPU_CERT_CHAIN_SIZE = 0x1000 +NVML_GPU_ATTESTATION_CERT_CHAIN_SIZE = 0x1400 +NVML_CC_GPU_CEC_NONCE_SIZE = 0x20 +NVML_CC_GPU_ATTESTATION_REPORT_SIZE = 0x2000 +NVML_CC_GPU_CEC_ATTESTATION_REPORT_SIZE = 0x1000 +NVML_CC_CEC_ATTESTATION_REPORT_NOT_PRESENT = 0 +NVML_CC_CEC_ATTESTATION_REPORT_PRESENT = 1 + +class c_nvmlConfComputeSystemState_t(Structure): + _fields_ = [('environment', c_uint), + ('ccFeature', c_uint), + ('devToolsMode', c_uint), + ] + +nvmlSystemConfComputeSettings_v1 = 0x1000014 + +class c_nvmlSystemConfComputeSettings_v1_t(Structure): + _fields_ = [('version', c_uint), + ('environment', c_uint), + ('ccFeature', c_uint), + ('devToolsMode', c_uint), + ('multiGpuMode', c_uint), + ] + def __init__(self): + super(c_nvmlSystemConfComputeSettings_v1_t, self).__init__(version=nvmlSystemConfComputeSettings_v1) + +class c_nvmlConfComputeSystemCaps_t(Structure): + _fields_ = [('cpuCaps', c_uint), + ('gpusCaps', c_uint), + ] + +class c_nvmlConfComputeMemSizeInfo_t(Structure): + _fields_ = [('protectedMemSizeKib', c_ulonglong), + ('unprotectedMemSizeKib', c_ulonglong), + ] + +class c_nvmlConfComputeGpuCertificate_t(Structure): + _fields_ = [('certChainSize', c_uint), + ('attestationCertChainSize', c_uint), + ('certChain', c_uint8 * NVML_GPU_CERT_CHAIN_SIZE), + ('attestationCertChain', c_uint8 * NVML_GPU_ATTESTATION_CERT_CHAIN_SIZE), + ] + +class c_nvmlConfComputeGpuAttestationReport_t(Structure): + _fields_ = [('isCecAttestationReportPresent', c_uint), + ('attestationReportSize', c_uint), + ('cecAttestationReportSize', c_uint), + ('nonce', c_uint8 * NVML_CC_GPU_CEC_NONCE_SIZE), + ('attestationReport', c_uint8 * NVML_CC_GPU_ATTESTATION_REPORT_SIZE), + ('cecAttestationReport', c_uint8 * NVML_CC_GPU_CEC_ATTESTATION_REPORT_SIZE), + ] + +class c_nvmlConfComputeSetKeyRotationThresholdInfo_t(Structure): + _fields_ = [('version', c_uint), + ('maxAttackerAdvantage', c_ulong), + ] +ConfComputeSetKeyRotationThresholdInfo_v1 = 0x1000010 + +class c_nvmlConfComputeGetKeyRotationThresholdInfo_t(Structure): + _fields_ = [('version', c_uint), + ('attackerAdvantage', c_ulong), + ] +ConfComputeGetKeyRotationThresholdInfo_v1 = 0x1000010 + + +## string/bytes conversion for ease of use +def convertStrBytes(func): + ''' + In python 3, strings are unicode instead of bytes, and need to be converted for ctypes + Args from caller: (1, 'string', <__main__.c_nvmlDevice_t at 0xFFFFFFFF>) + Args passed to function: (1, b'string', <__main__.c_nvmlDevice_t at 0xFFFFFFFF)> + ---- + Returned from function: b'returned string' + Returned to caller: 'returned string' + ''' + @wraps(func) + def wrapper(*args, **kwargs): + # encoding a str returns bytes in python 2 and 3 + args = [arg.encode() if isinstance(arg, str) else arg for arg in args] + res = func(*args, **kwargs) + # In python 2, str and bytes are the same + # In python 3, str is unicode and should be decoded. + # Ctypes handles most conversions, this only effects c_char and char arrays. + if isinstance(res, bytes): + if isinstance(res, str): + return res + return res.decode() + return res + + if sys.version_info >= (3,): + return wrapper + return func + +def throwOnVersionMismatch(func): + @wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except NVMLError_FunctionNotFound: + raise NVMLLibraryMismatchError("Unversioned function called and the " + "pyNVML version does not match the NVML lib version. " + "Either use matching pyNVML and NVML lib versions or " + "use a versioned function such as " + func.__name__ + "_v2") + return wrapper + +## C function wrappers ## +def nvmlInitWithFlags(flags): + _LoadNvmlLibrary() + + # + # Initialize the library + # + fn = _nvmlGetFunctionPointer("nvmlInitWithFlags") + ret = fn(flags) + _nvmlCheckReturn(ret) + + # Atomically update refcount + global _nvmlLib_refcount + libLoadLock.acquire() + _nvmlLib_refcount += 1 + libLoadLock.release() + return None + +def nvmlInit(): + nvmlInitWithFlags(0) + return None + +def _LoadNvmlLibrary(): + ''' + Load the library if it isn't loaded already + ''' + global nvmlLib + + if (nvmlLib == None): + # lock to ensure only one caller loads the library + libLoadLock.acquire() + + try: + # ensure the library still isn't loaded + if (nvmlLib == None): + try: + if (sys.platform[:3] == "win"): + # cdecl calling convention + try: + # Check for nvml.dll in System32 first for DCH drivers + nvmlLib = CDLL(os.path.join(os.getenv("WINDIR", "C:/Windows"), "System32/nvml.dll")) + except OSError as ose: + # If nvml.dll is not found in System32, it should be in ProgramFiles + # load nvml.dll from %ProgramFiles%/NVIDIA Corporation/NVSMI/nvml.dll + nvmlLib = CDLL(os.path.join(os.getenv("ProgramFiles", "C:/Program Files"), "NVIDIA Corporation/NVSMI/nvml.dll")) + else: + # assume linux + nvmlLib = CDLL("libnvidia-ml.so.1") + except OSError as ose: + _nvmlCheckReturn(NVML_ERROR_LIBRARY_NOT_FOUND) + if (nvmlLib == None): + _nvmlCheckReturn(NVML_ERROR_LIBRARY_NOT_FOUND) + finally: + # lock is always freed + libLoadLock.release() + +def nvmlShutdown(): + # + # Leave the library loaded, but shutdown the interface + # + fn = _nvmlGetFunctionPointer("nvmlShutdown") + ret = fn() + _nvmlCheckReturn(ret) + + # Atomically update refcount + global _nvmlLib_refcount + libLoadLock.acquire() + if (0 < _nvmlLib_refcount): + _nvmlLib_refcount -= 1 + libLoadLock.release() + return None + +# Added in 2.285 +@convertStrBytes +def nvmlErrorString(result): + fn = _nvmlGetFunctionPointer("nvmlErrorString") + fn.restype = c_char_p # otherwise return is an int + ret = fn(result) + return ret + +# Added in 2.285 +@convertStrBytes +def nvmlSystemGetNVMLVersion(): + c_version = create_string_buffer(NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlSystemGetNVMLVersion") + ret = fn(c_version, c_uint(NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE)) + _nvmlCheckReturn(ret) + return c_version.value + +def nvmlSystemGetCudaDriverVersion(): + c_cuda_version = c_int() + fn = _nvmlGetFunctionPointer("nvmlSystemGetCudaDriverVersion") + ret = fn(byref(c_cuda_version)) + _nvmlCheckReturn(ret) + return c_cuda_version.value + +def nvmlSystemGetCudaDriverVersion_v2(): + c_cuda_version = c_int() + fn = _nvmlGetFunctionPointer("nvmlSystemGetCudaDriverVersion_v2") + ret = fn(byref(c_cuda_version)) + _nvmlCheckReturn(ret) + return c_cuda_version.value + +# Added in 2.285 +@convertStrBytes +def nvmlSystemGetProcessName(pid): + c_name = create_string_buffer(1024) + fn = _nvmlGetFunctionPointer("nvmlSystemGetProcessName") + ret = fn(c_uint(pid), c_name, c_uint(1024)) + _nvmlCheckReturn(ret) + return c_name.value + +@convertStrBytes +def nvmlSystemGetDriverVersion(): + c_version = create_string_buffer(NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlSystemGetDriverVersion") + ret = fn(c_version, c_uint(NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE)) + _nvmlCheckReturn(ret) + return c_version.value + +# Added in 2.285 +def nvmlSystemGetHicVersion(): + c_count = c_uint(0) + hics = None + fn = _nvmlGetFunctionPointer("nvmlSystemGetHicVersion") + + # get the count + ret = fn(byref(c_count), None) + + # this should only fail with insufficient size + if ((ret != NVML_SUCCESS) and + (ret != NVML_ERROR_INSUFFICIENT_SIZE)): + raise NVMLError(ret) + + # If there are no hics + if (c_count.value == 0): + return [] + + hic_array = c_nvmlHwbcEntry_t * c_count.value + hics = hic_array() + ret = fn(byref(c_count), hics) + _nvmlCheckReturn(ret) + return hics + +def nvmlSystemGetDriverBranch(): + c_branchInfo = c_nvmlSystemDriverBranchInfo_v1_t(0) + c_branchInfo.version = SystemDriverBranchInfo_v1 + fn = _nvmlGetFunctionPointer("nvmlSystemGetDriverBranch") + ret = fn(byref(c_branchInfo), c_uint(NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE)) + _nvmlCheckReturn(ret) + return c_branchInfo + +## Unit get functions +def nvmlUnitGetCount(): + c_count = c_uint() + fn = _nvmlGetFunctionPointer("nvmlUnitGetCount") + ret = fn(byref(c_count)) + _nvmlCheckReturn(ret) + return c_count.value + +def nvmlUnitGetHandleByIndex(index): + c_index = c_uint(index) + unit = c_nvmlUnit_t() + fn = _nvmlGetFunctionPointer("nvmlUnitGetHandleByIndex") + ret = fn(c_index, byref(unit)) + _nvmlCheckReturn(ret) + return unit + +def nvmlUnitGetUnitInfo(unit): + c_info = c_nvmlUnitInfo_t() + fn = _nvmlGetFunctionPointer("nvmlUnitGetUnitInfo") + ret = fn(unit, byref(c_info)) + _nvmlCheckReturn(ret) + return c_info + +def nvmlUnitGetLedState(unit): + c_state = c_nvmlLedState_t() + fn = _nvmlGetFunctionPointer("nvmlUnitGetLedState") + ret = fn(unit, byref(c_state)) + _nvmlCheckReturn(ret) + return c_state + +def nvmlUnitGetPsuInfo(unit): + c_info = c_nvmlPSUInfo_t() + fn = _nvmlGetFunctionPointer("nvmlUnitGetPsuInfo") + ret = fn(unit, byref(c_info)) + _nvmlCheckReturn(ret) + return c_info + +def nvmlUnitGetTemperature(unit, type): + c_temp = c_uint() + fn = _nvmlGetFunctionPointer("nvmlUnitGetTemperature") + ret = fn(unit, c_uint(type), byref(c_temp)) + _nvmlCheckReturn(ret) + return c_temp.value + +def nvmlUnitGetFanSpeedInfo(unit): + c_speeds = c_nvmlUnitFanSpeeds_t() + fn = _nvmlGetFunctionPointer("nvmlUnitGetFanSpeedInfo") + ret = fn(unit, byref(c_speeds)) + _nvmlCheckReturn(ret) + return c_speeds + +# added to API +def nvmlUnitGetDeviceCount(unit): + c_count = c_uint(0) + # query the unit to determine device count + fn = _nvmlGetFunctionPointer("nvmlUnitGetDevices") + ret = fn(unit, byref(c_count), None) + if (ret == NVML_ERROR_INSUFFICIENT_SIZE): + ret = NVML_SUCCESS + _nvmlCheckReturn(ret) + return c_count.value + +def nvmlUnitGetDevices(unit): + c_count = c_uint(nvmlUnitGetDeviceCount(unit)) + device_array = c_nvmlDevice_t * c_count.value + c_devices = device_array() + fn = _nvmlGetFunctionPointer("nvmlUnitGetDevices") + ret = fn(unit, byref(c_count), c_devices) + _nvmlCheckReturn(ret) + return c_devices + +## Device get functions +def nvmlDeviceGetCount(): + c_count = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetCount_v2") + ret = fn(byref(c_count)) + _nvmlCheckReturn(ret) + return c_count.value + +def nvmlDeviceGetHandleByIndex(index): + c_index = c_uint(index) + device = c_nvmlDevice_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleByIndex_v2") + ret = fn(c_index, byref(device)) + _nvmlCheckReturn(ret) + return device + +@convertStrBytes +def nvmlDeviceGetHandleBySerial(serial): + c_serial = c_char_p(serial) + device = c_nvmlDevice_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleBySerial") + ret = fn(c_serial, byref(device)) + _nvmlCheckReturn(ret) + return device + +@convertStrBytes +def nvmlDeviceGetHandleByUUID(uuid): + c_uuid = c_char_p(uuid) + device = c_nvmlDevice_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleByUUID") + ret = fn(c_uuid, byref(device)) + _nvmlCheckReturn(ret) + return device + +@convertStrBytes +def nvmlDeviceGetHandleByPciBusId(pciBusId): + c_busId = c_char_p(pciBusId) + device = c_nvmlDevice_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleByPciBusId_v2") + ret = fn(c_busId, byref(device)) + _nvmlCheckReturn(ret) + return device + +@convertStrBytes +def nvmlDeviceGetName(handle): + c_name = create_string_buffer(NVML_DEVICE_NAME_V2_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetName") + ret = fn(handle, c_name, c_uint(NVML_DEVICE_NAME_V2_BUFFER_SIZE)) + _nvmlCheckReturn(ret) + return c_name.value + +class c_nvmlDevicePerfModes_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('str', c_char * NVML_PERF_MODES_BUFFER_SIZE), + ] + +nvmlDevicePerfModes_v1 = 0x1000804 + +@convertStrBytes +def nvmlDeviceGetPerformanceModes(handle): + perfModes = c_nvmlDevicePerfModes_v1_t() + perfModes.version = nvmlDevicePerfModes_v1 + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPerformanceModes") + ret = fn(handle, byref(perfModes)) + _nvmlCheckReturn(ret) + return perfModes.str + +class c_nvmlDeviceCurrentClockFreqs_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('str', c_char * NVML_PERF_MODES_BUFFER_SIZE), + ] + +nvmlDeviceCurrentClockFreqs_v1 = 0x1000804 + +@convertStrBytes +def nvmlDeviceGetCurrentClockFreqs(handle): + currentClockFreqs = c_nvmlDeviceCurrentClockFreqs_v1_t() + currentClockFreqs.version = nvmlDeviceCurrentClockFreqs_v1 + fn = _nvmlGetFunctionPointer("nvmlDeviceGetCurrentClockFreqs") + ret = fn(handle, byref(currentClockFreqs)) + _nvmlCheckReturn(ret) + return currentClockFreqs.str + +def nvmlDeviceGetBoardId(handle): + c_id = c_uint(); + fn = _nvmlGetFunctionPointer("nvmlDeviceGetBoardId") + ret = fn(handle, byref(c_id)) + _nvmlCheckReturn(ret) + return c_id.value + +def nvmlDeviceGetMultiGpuBoard(handle): + c_multiGpu = c_uint(); + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMultiGpuBoard") + ret = fn(handle, byref(c_multiGpu)) + _nvmlCheckReturn(ret) + return c_multiGpu.value + +def nvmlDeviceGetBrand(handle): + c_type = _nvmlBrandType_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetBrand") + ret = fn(handle, byref(c_type)) + _nvmlCheckReturn(ret) + return c_type.value + +def nvmlDeviceGetC2cModeInfoV1(handle): + c_info = c_nvmlC2cModeInfo_v1_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetC2cModeInfoV") + ret = fn(handle, byref(c_info)) + _nvmlCheckReturn(ret) + return c_info + +def nvmlDeviceGetC2cModeInfoV(handle): + return nvmlDeviceGetC2cModeInfoV1(handle) + +@convertStrBytes +def nvmlDeviceGetBoardPartNumber(handle): + c_part_number = create_string_buffer(NVML_DEVICE_PART_NUMBER_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetBoardPartNumber") + ret = fn(handle, c_part_number, c_uint(NVML_DEVICE_PART_NUMBER_BUFFER_SIZE)) + _nvmlCheckReturn(ret) + return c_part_number.value + +@convertStrBytes +def nvmlDeviceGetSerial(handle): + c_serial = create_string_buffer(NVML_DEVICE_SERIAL_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetSerial") + ret = fn(handle, c_serial, c_uint(NVML_DEVICE_SERIAL_BUFFER_SIZE)) + _nvmlCheckReturn(ret) + return c_serial.value + +def nvmlDeviceGetModuleId(handle, moduleId=c_uint()): + isReference = type(moduleId) is not c_uint + moduleIdRef = moduleId if isReference else byref(moduleId) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetModuleId") + ret = fn(handle, moduleIdRef) + if isReference: + return ret + else: + _nvmlCheckReturn(ret) + return moduleId.value + +def nvmlDeviceGetMemoryAffinity(handle, nodeSetSize, scope): + affinity_array = c_ulonglong * nodeSetSize + c_affinity = affinity_array() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryAffinity") + ret = fn(handle, nodeSetSize, byref(c_affinity), _nvmlAffinityScope_t(scope)) + _nvmlCheckReturn(ret) + return c_affinity + +def nvmlDeviceGetCpuAffinityWithinScope(handle, cpuSetSize, scope): + affinity_array = c_ulonglong * cpuSetSize + c_affinity = affinity_array() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetCpuAffinityWithinScope") + ret = fn(handle, cpuSetSize, byref(c_affinity), _nvmlAffinityScope_t(scope)) + _nvmlCheckReturn(ret) + return c_affinity + +def nvmlDeviceGetCpuAffinity(handle, cpuSetSize): + affinity_array = c_ulonglong * cpuSetSize + c_affinity = affinity_array() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetCpuAffinity") + ret = fn(handle, cpuSetSize, byref(c_affinity)) + _nvmlCheckReturn(ret) + return c_affinity + +def nvmlDeviceSetCpuAffinity(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetCpuAffinity") + ret = fn(handle) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceClearCpuAffinity(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceClearCpuAffinity") + ret = fn(handle) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceGetNumaNodeId(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetNumaNodeId") + node = c_int() + ret = fn(handle, byref(node)) + _nvmlCheckReturn(ret) + return node.value + +def nvmlDeviceGetMinorNumber(handle): + c_minor_number = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMinorNumber") + ret = fn(handle, byref(c_minor_number)) + _nvmlCheckReturn(ret) + return c_minor_number.value + +@convertStrBytes +def nvmlDeviceGetUUID(handle): + c_uuid = create_string_buffer(NVML_DEVICE_UUID_V2_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetUUID") + ret = fn(handle, c_uuid, c_uint(NVML_DEVICE_UUID_V2_BUFFER_SIZE)) + _nvmlCheckReturn(ret) + return c_uuid.value + +@convertStrBytes +def nvmlDeviceGetInforomVersion(handle, infoRomObject): + c_version = create_string_buffer(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetInforomVersion") + ret = fn(handle, _nvmlInforomObject_t(infoRomObject), + c_version, c_uint(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE)) + _nvmlCheckReturn(ret) + return c_version.value + +# Added in 4.304 +@convertStrBytes +def nvmlDeviceGetInforomImageVersion(handle): + c_version = create_string_buffer(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetInforomImageVersion") + ret = fn(handle, c_version, c_uint(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE)) + _nvmlCheckReturn(ret) + return c_version.value + +# Added in 4.304 +def nvmlDeviceGetInforomConfigurationChecksum(handle): + c_checksum = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetInforomConfigurationChecksum") + ret = fn(handle, byref(c_checksum)) + _nvmlCheckReturn(ret) + return c_checksum.value + +# Added in 4.304 +def nvmlDeviceValidateInforom(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceValidateInforom") + ret = fn(handle) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceGetLastBBXFlushTime(handle): + c_timestamp = c_ulonglong() + c_durationUs = c_ulong() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetLastBBXFlushTime") + ret = fn(handle, byref(c_timestamp), byref(c_durationUs)) + _nvmlCheckReturn(ret) + return [c_timestamp.value, c_durationUs.value] + +def nvmlDeviceGetDisplayMode(handle): + c_mode = _nvmlEnableState_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetDisplayMode") + ret = fn(handle, byref(c_mode)) + _nvmlCheckReturn(ret) + return c_mode.value + +def nvmlDeviceGetDisplayActive(handle): + c_mode = _nvmlEnableState_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetDisplayActive") + ret = fn(handle, byref(c_mode)) + _nvmlCheckReturn(ret) + return c_mode.value + + +def nvmlDeviceGetPersistenceMode(handle): + c_state = _nvmlEnableState_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPersistenceMode") + ret = fn(handle, byref(c_state)) + _nvmlCheckReturn(ret) + return c_state.value + +def nvmlDeviceGetPciInfoExt(handle, c_info): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPciInfoExt") + ret = fn(handle, c_info) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceGetPciInfo_v3(handle): + c_info = nvmlPciInfo_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPciInfo_v3") + ret = fn(handle, byref(c_info)) + _nvmlCheckReturn(ret) + return c_info + +def nvmlDeviceGetPciInfo(handle): + return nvmlDeviceGetPciInfo_v3(handle) + +def nvmlDeviceGetClockInfo(handle, type): + c_clock = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetClockInfo") + ret = fn(handle, _nvmlClockType_t(type), byref(c_clock)) + _nvmlCheckReturn(ret) + return c_clock.value + +# Added in 2.285 +def nvmlDeviceGetMaxClockInfo(handle, type): + c_clock = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxClockInfo") + ret = fn(handle, _nvmlClockType_t(type), byref(c_clock)) + _nvmlCheckReturn(ret) + return c_clock.value + +# Added in 4.304 +def nvmlDeviceGetApplicationsClock(handle, type): + c_clock = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetApplicationsClock") + ret = fn(handle, _nvmlClockType_t(type), byref(c_clock)) + _nvmlCheckReturn(ret) + return c_clock.value + +def nvmlDeviceGetMaxCustomerBoostClock(handle, type): + c_clock = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxCustomerBoostClock") + ret = fn(handle, _nvmlClockType_t(type), byref(c_clock)) + _nvmlCheckReturn(ret) + return c_clock.value + +def nvmlDeviceGetClock(handle, type, id): + c_clock = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetClock") + ret = fn(handle, _nvmlClockType_t(type), _nvmlClockId_t(id), byref(c_clock)) + _nvmlCheckReturn(ret) + return c_clock.value + +# Added in 5.319 +def nvmlDeviceGetDefaultApplicationsClock(handle, type): + c_clock = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetDefaultApplicationsClock") + ret = fn(handle, _nvmlClockType_t(type), byref(c_clock)) + _nvmlCheckReturn(ret) + return c_clock.value + +# Added in 4.304 +def nvmlDeviceGetSupportedMemoryClocks(handle): + # first call to get the size + c_count = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedMemoryClocks") + ret = fn(handle, byref(c_count), None) + + if (ret == NVML_SUCCESS): + # special case, no clocks + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + clocks_array = c_uint * c_count.value + c_clocks = clocks_array() + + # make the call again + ret = fn(handle, byref(c_count), c_clocks) + _nvmlCheckReturn(ret) + + procs = [] + for i in range(c_count.value): + procs.append(c_clocks[i]) + + return procs + else: + # error case + raise NVMLError(ret) + +# Added in 4.304 +def nvmlDeviceGetSupportedGraphicsClocks(handle, memoryClockMHz): + # first call to get the size + c_count = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedGraphicsClocks") + ret = fn(handle, c_uint(memoryClockMHz), byref(c_count), None) + + if (ret == NVML_SUCCESS): + # special case, no clocks + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + clocks_array = c_uint * c_count.value + c_clocks = clocks_array() + + # make the call again + ret = fn(handle, c_uint(memoryClockMHz), byref(c_count), c_clocks) + _nvmlCheckReturn(ret) + + procs = [] + for i in range(c_count.value): + procs.append(c_clocks[i]) + + return procs + else: + # error case + raise NVMLError(ret) + +def nvmlDeviceGetFanSpeed(handle): + c_speed = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetFanSpeed") + ret = fn(handle, byref(c_speed)) + _nvmlCheckReturn(ret) + return c_speed.value + +def nvmlDeviceGetFanSpeed_v2(handle, fan): + c_speed = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetFanSpeed_v2") + ret = fn(handle, fan, byref(c_speed)) + _nvmlCheckReturn(ret) + return c_speed.value + +class c_nvmlFanSpeedInfo_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('fan', c_uint), + ('speed', c_uint), + ] + +nvmlFanSpeedInfo_v1 = 0x100000C + +def nvmlDeviceGetFanSpeedRPM(handle): + c_fanSpeed = c_nvmlFanSpeedInfo_t() + c_fanSpeed.fan = 0 + c_fanSpeed.version = nvmlFanSpeedInfo_v1 + fn = _nvmlGetFunctionPointer("nvmlDeviceGetFanSpeedRPM") + ret = fn(handle, byref(c_fanSpeed)) + _nvmlCheckReturn(ret) + return c_fanSpeed.speed + +def nvmlDeviceGetTargetFanSpeed(handle, fan): + c_speed = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetTargetFanSpeed") + ret = fn(handle, fan, byref(c_speed)) + _nvmlCheckReturn(ret) + return c_speed.value + +def nvmlDeviceGetNumFans(device): + c_numFans = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetNumFans") + ret = fn(device, byref(c_numFans)) + _nvmlCheckReturn(ret) + return c_numFans.value + +def nvmlDeviceSetDefaultFanSpeed_v2(handle, index): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetDefaultFanSpeed_v2"); + ret = fn(handle, index) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceGetMinMaxFanSpeed(handle, minSpeed=c_uint(), maxSpeed=c_uint()): + isReference = (type(minSpeed) is not c_uint) or (type(maxSpeed) is not c_uint) + minSpeedRef = minSpeed if isReference else byref(minSpeed) + maxSpeedRef = maxSpeed if isReference else byref(maxSpeed) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMinMaxFanSpeed") + ret = fn(handle, minSpeedRef, maxSpeedRef) + _nvmlCheckReturn(ret) + return NVML_SUCCESS if isReference else [minSpeed.value, maxSpeed.value] + +def nvmlDeviceGetFanControlPolicy_v2(handle, fan, fanControlPolicy=c_uint()): + isReference = type(fanControlPolicy) is not c_uint + fanControlPolicyRef = fanControlPolicy if isReference else byref(fanControlPolicy) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetFanControlPolicy_v2") + ret = fn(handle, fan, fanControlPolicyRef) + _nvmlCheckReturn(ret) + return NVML_SUCCESS if isReference else fanControlPolicy.value + +def nvmlDeviceSetFanControlPolicy(handle, fan, fanControlPolicy): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetFanControlPolicy") + ret = fn(handle, fan, _nvmlFanControlPolicy_t(fanControlPolicy)) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +class c_nvmlTemperature_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('sensorType', _nvmlTemperatureSensors_t), + ('temperature', c_int), + ] +nvmlTemperature_v1 = 0x100000C + +def nvmlDeviceGetTemperatureV1(handle, sensor): + c_temp = c_nvmlTemperature_v1_t() + c_temp.version = nvmlTemperature_v1 + c_temp.sensorType = _nvmlTemperatureSensors_t(sensor) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetTemperatureV") + ret = fn(handle, byref(c_temp)) + _nvmlCheckReturn(ret) + return c_temp.temperature + +def nvmlDeviceGetTemperatureV(handle, sensor, version=nvmlTemperature_v1): + if version == nvmlTemperature_v1: + return nvmlDeviceGetTemperatureV1(handle, sensor) + else: + raise NVMLError(NVML_ERROR_ARGUMENT_VERSION_MISMATCH) + +# DEPRECATED use nvmlDeviceGetTemperatureV instead +def nvmlDeviceGetTemperature(handle, sensor): + c_temp = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetTemperature") + ret = fn(handle, _nvmlTemperatureSensors_t(sensor), byref(c_temp)) + _nvmlCheckReturn(ret) + return c_temp.value + +def nvmlDeviceGetTemperatureThreshold(handle, threshold): + c_temp = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetTemperatureThreshold") + ret = fn(handle, _nvmlTemperatureThresholds_t(threshold), byref(c_temp)) + _nvmlCheckReturn(ret) + return c_temp.value + +def nvmlDeviceSetTemperatureThreshold(handle, threshold, temp): + c_temp = c_uint() + c_temp.value = temp + fn = _nvmlGetFunctionPointer("nvmlDeviceSetTemperatureThreshold") + ret = fn(handle, _nvmlTemperatureThresholds_t(threshold), byref(c_temp)) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceGetMarginTemperature(handle): + c_marginTempInfo = c_nvmlMarginTemperature_v1_t() + c_marginTempInfo.version = nvmlMarginTemperature_v1 + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMarginTemperature") + ret = fn(handle, byref(c_marginTempInfo)) + _nvmlCheckReturn(ret) + return c_marginTempInfo.marginTemperature + +# DEPRECATED use nvmlDeviceGetPerformanceState +def nvmlDeviceGetPowerState(handle): + c_pstate = _nvmlPstates_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerState") + ret = fn(handle, byref(c_pstate)) + _nvmlCheckReturn(ret) + return c_pstate.value + +def nvmlDeviceGetPerformanceState(handle): + c_pstate = _nvmlPstates_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPerformanceState") + ret = fn(handle, byref(c_pstate)) + _nvmlCheckReturn(ret) + return c_pstate.value + +def nvmlDeviceGetPowerManagementMode(handle): + c_pcapMode = _nvmlEnableState_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerManagementMode") + ret = fn(handle, byref(c_pcapMode)) + _nvmlCheckReturn(ret) + return c_pcapMode.value + +def nvmlDeviceGetPowerManagementLimit(handle): + c_limit = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerManagementLimit") + ret = fn(handle, byref(c_limit)) + _nvmlCheckReturn(ret) + return c_limit.value + +# Added in 4.304 +def nvmlDeviceGetPowerManagementLimitConstraints(handle): + c_minLimit = c_uint() + c_maxLimit = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerManagementLimitConstraints") + ret = fn(handle, byref(c_minLimit), byref(c_maxLimit)) + _nvmlCheckReturn(ret) + return [c_minLimit.value, c_maxLimit.value] + +# Added in 4.304 +def nvmlDeviceGetPowerManagementDefaultLimit(handle): + c_limit = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerManagementDefaultLimit") + ret = fn(handle, byref(c_limit)) + _nvmlCheckReturn(ret) + return c_limit.value + + +# Added in 331 +def nvmlDeviceGetEnforcedPowerLimit(handle): + c_limit = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetEnforcedPowerLimit") + ret = fn(handle, byref(c_limit)) + _nvmlCheckReturn(ret) + return c_limit.value + +def nvmlDeviceGetPowerUsage(handle): + c_watts = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerUsage") + ret = fn(handle, byref(c_watts)) + _nvmlCheckReturn(ret) + return c_watts.value + +def nvmlDeviceGetTotalEnergyConsumption(handle): + c_millijoules = c_uint64() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetTotalEnergyConsumption") + ret = fn(handle, byref(c_millijoules)) + _nvmlCheckReturn(ret) + return c_millijoules.value + +# Added in 4.304 +def nvmlDeviceGetGpuOperationMode(handle): + c_currState = _nvmlGpuOperationMode_t() + c_pendingState = _nvmlGpuOperationMode_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuOperationMode") + ret = fn(handle, byref(c_currState), byref(c_pendingState)) + _nvmlCheckReturn(ret) + return [c_currState.value, c_pendingState.value] + +# Added in 4.304 +def nvmlDeviceGetCurrentGpuOperationMode(handle): + return nvmlDeviceGetGpuOperationMode(handle)[0] + +# Added in 4.304 +def nvmlDeviceGetPendingGpuOperationMode(handle): + return nvmlDeviceGetGpuOperationMode(handle)[1] + +def nvmlDeviceGetMemoryInfo(handle, version=None): + if not version: + c_memory = c_nvmlMemory_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryInfo") + else: + c_memory = c_nvmlMemory_v2_t() + c_memory.version = version + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryInfo_v2") + ret = fn(handle, byref(c_memory)) + _nvmlCheckReturn(ret) + return c_memory + +def nvmlDeviceGetBAR1MemoryInfo(handle): + c_bar1_memory = c_nvmlBAR1Memory_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetBAR1MemoryInfo") + ret = fn(handle, byref(c_bar1_memory)) + _nvmlCheckReturn(ret) + return c_bar1_memory + +def nvmlDeviceGetComputeMode(handle): + c_mode = _nvmlComputeMode_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetComputeMode") + ret = fn(handle, byref(c_mode)) + _nvmlCheckReturn(ret) + return c_mode.value + +def nvmlDeviceGetCudaComputeCapability(handle): + c_major = c_int() + c_minor = c_int() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetCudaComputeCapability") + ret = fn(handle, byref(c_major), byref(c_minor)) + _nvmlCheckReturn(ret) + return (c_major.value, c_minor.value) + +def nvmlDeviceGetEccMode(handle): + c_currState = _nvmlEnableState_t() + c_pendingState = _nvmlEnableState_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetEccMode") + ret = fn(handle, byref(c_currState), byref(c_pendingState)) + _nvmlCheckReturn(ret) + return [c_currState.value, c_pendingState.value] + +# added to API +def nvmlDeviceGetCurrentEccMode(handle): + return nvmlDeviceGetEccMode(handle)[0] + +# added to API +def nvmlDeviceGetPendingEccMode(handle): + return nvmlDeviceGetEccMode(handle)[1] + +def nvmlDeviceGetDefaultEccMode(handle): + c_defaultState = _nvmlEnableState_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetDefaultEccMode") + ret = fn(handle, byref(c_defaultState)) + _nvmlCheckReturn(ret) + return [c_defaultState.value] + +def nvmlDeviceGetTotalEccErrors(handle, errorType, counterType): + c_count = c_ulonglong() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetTotalEccErrors") + ret = fn(handle, _nvmlMemoryErrorType_t(errorType), + _nvmlEccCounterType_t(counterType), byref(c_count)) + _nvmlCheckReturn(ret) + return c_count.value + +# This is deprecated, instead use nvmlDeviceGetMemoryErrorCounter +def nvmlDeviceGetDetailedEccErrors(handle, errorType, counterType): + c_counts = c_nvmlEccErrorCounts_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetDetailedEccErrors") + ret = fn(handle, _nvmlMemoryErrorType_t(errorType), + _nvmlEccCounterType_t(counterType), byref(c_counts)) + _nvmlCheckReturn(ret) + return c_counts + +# Added in 4.304 +def nvmlDeviceGetMemoryErrorCounter(handle, errorType, counterType, locationType): + c_count = c_ulonglong() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryErrorCounter") + ret = fn(handle, + _nvmlMemoryErrorType_t(errorType), + _nvmlEccCounterType_t(counterType), + _nvmlMemoryLocation_t(locationType), + byref(c_count)) + _nvmlCheckReturn(ret) + return c_count.value + +def nvmlDeviceGetUtilizationRates(handle): + c_util = c_nvmlUtilization_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetUtilizationRates") + ret = fn(handle, byref(c_util)) + _nvmlCheckReturn(ret) + return c_util + +def nvmlDeviceGetEncoderUtilization(handle): + c_util = c_uint() + c_samplingPeriod = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetEncoderUtilization") + ret = fn(handle, byref(c_util), byref(c_samplingPeriod)) + _nvmlCheckReturn(ret) + return [c_util.value, c_samplingPeriod.value] + +def nvmlDeviceGetDecoderUtilization(handle): + c_util = c_uint() + c_samplingPeriod = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetDecoderUtilization") + ret = fn(handle, byref(c_util), byref(c_samplingPeriod)) + _nvmlCheckReturn(ret) + return [c_util.value, c_samplingPeriod.value] + +def nvmlDeviceGetJpgUtilization(handle): + c_util = c_uint() + c_samplingPeriod = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetJpgUtilization") + ret = fn(handle, byref(c_util), byref(c_samplingPeriod)) + _nvmlCheckReturn(ret) + return [c_util.value, c_samplingPeriod.value] + +def nvmlDeviceGetOfaUtilization(handle): + c_util = c_uint() + c_samplingPeriod = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetOfaUtilization") + ret = fn(handle, byref(c_util), byref(c_samplingPeriod)) + _nvmlCheckReturn(ret) + return [c_util.value, c_samplingPeriod.value] + +def nvmlDeviceGetPcieReplayCounter(handle): + c_replay = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPcieReplayCounter") + ret = fn(handle, byref(c_replay)) + _nvmlCheckReturn(ret) + return c_replay.value + +def nvmlDeviceGetDriverModel(handle): + c_currModel = _nvmlDriverModel_t() + c_pendingModel = _nvmlDriverModel_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetDriverModel") + ret = fn(handle, byref(c_currModel), byref(c_pendingModel)) + _nvmlCheckReturn(ret) + return [c_currModel.value, c_pendingModel.value] + +# added to API +def nvmlDeviceGetCurrentDriverModel(handle): + return nvmlDeviceGetDriverModel(handle)[0] + +# added to API +def nvmlDeviceGetPendingDriverModel(handle): + return nvmlDeviceGetDriverModel(handle)[1] + +# Added in 2.285 +@convertStrBytes +def nvmlDeviceGetVbiosVersion(handle): + c_version = create_string_buffer(NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetVbiosVersion") + ret = fn(handle, c_version, c_uint(NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE)) + _nvmlCheckReturn(ret) + return c_version.value + +# Added in 2.285 +def nvmlDeviceGetComputeRunningProcesses_v2(handle): + # first call to get the size + c_count = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetComputeRunningProcesses_v2") + ret = fn(handle, byref(c_count), None) + if (ret == NVML_SUCCESS): + # special case, no running processes + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + # oversize the array incase more processes are created + c_count.value = c_count.value * 2 + 5 + proc_array = c_nvmlProcessInfo_v2_t * c_count.value + c_procs = proc_array() + # make the call again + ret = fn(handle, byref(c_count), c_procs) + _nvmlCheckReturn(ret) + procs = [] + for i in range(c_count.value): + # use an alternative struct for this object + obj = nvmlStructToFriendlyObject(c_procs[i]) + if (obj.usedGpuMemory == NVML_VALUE_NOT_AVAILABLE_ulonglong.value): + # special case for WDDM on Windows, see comment above + obj.usedGpuMemory = None + procs.append(obj) + return procs + else: + # error case + raise NVMLError(ret) + +# Added in 2.285 +def nvmlDeviceGetComputeRunningProcesses_v3(handle): + # first call to get the size + c_count = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetComputeRunningProcesses_v3") + ret = fn(handle, byref(c_count), None) + + if (ret == NVML_SUCCESS): + # special case, no running processes + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + # oversize the array incase more processes are created + c_count.value = c_count.value * 2 + 5 + proc_array = c_nvmlProcessInfo_v3_t * c_count.value + c_procs = proc_array() + + # make the call again + ret = fn(handle, byref(c_count), c_procs) + _nvmlCheckReturn(ret) + + procs = [] + for i in range(c_count.value): + # use an alternative struct for this object + obj = nvmlStructToFriendlyObject(c_procs[i]) + if (obj.usedGpuMemory == NVML_VALUE_NOT_AVAILABLE_ulonglong.value): + # special case for WDDM on Windows, see comment above + obj.usedGpuMemory = None + procs.append(obj) + + return procs + else: + # error case + raise NVMLError(ret) + +@throwOnVersionMismatch +def nvmlDeviceGetComputeRunningProcesses(handle): + return nvmlDeviceGetComputeRunningProcesses_v3(handle) + +def nvmlDeviceGetGraphicsRunningProcesses_v2(handle): + # first call to get the size + c_count = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGraphicsRunningProcesses_v2") + ret = fn(handle, byref(c_count), None) + if (ret == NVML_SUCCESS): + # special case, no running processes + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + # oversize the array incase more processes are created + c_count.value = c_count.value * 2 + 5 + proc_array = c_nvmlProcessInfo_v2_t * c_count.value + c_procs = proc_array() + # make the call again + ret = fn(handle, byref(c_count), c_procs) + _nvmlCheckReturn(ret) + procs = [] + for i in range(c_count.value): + # use an alternative struct for this object + obj = nvmlStructToFriendlyObject(c_procs[i]) + if (obj.usedGpuMemory == NVML_VALUE_NOT_AVAILABLE_ulonglong.value): + # special case for WDDM on Windows, see comment above + obj.usedGpuMemory = None + procs.append(obj) + return procs + else: + # error case + raise NVMLError(ret) + +def nvmlDeviceGetGraphicsRunningProcesses_v3(handle): + # first call to get the size + c_count = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGraphicsRunningProcesses_v3") + ret = fn(handle, byref(c_count), None) + + if (ret == NVML_SUCCESS): + # special case, no running processes + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + # oversize the array incase more processes are created + c_count.value = c_count.value * 2 + 5 + proc_array = c_nvmlProcessInfo_v3_t * c_count.value + c_procs = proc_array() + + # make the call again + ret = fn(handle, byref(c_count), c_procs) + _nvmlCheckReturn(ret) + + procs = [] + for i in range(c_count.value): + # use an alternative struct for this object + obj = nvmlStructToFriendlyObject(c_procs[i]) + if (obj.usedGpuMemory == NVML_VALUE_NOT_AVAILABLE_ulonglong.value): + # special case for WDDM on Windows, see comment above + obj.usedGpuMemory = None + procs.append(obj) + + return procs + else: + # error case + raise NVMLError(ret) + +@throwOnVersionMismatch +def nvmlDeviceGetGraphicsRunningProcesses(handle): + return nvmlDeviceGetGraphicsRunningProcesses_v3(handle) + +@throwOnVersionMismatch +def nvmlDeviceGetMPSComputeRunningProcesses(handle): + return nvmlDeviceGetMPSComputeRunningProcesses_v3(handle) + +def nvmlDeviceGetMPSComputeRunningProcesses_v2(handle): + # first call to get the size + c_count = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMPSComputeRunningProcesses_v2") + ret = fn(handle, byref(c_count), None) + + if (ret == NVML_SUCCESS): + # special case, no running processes + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + # oversize the array incase more processes are created + c_count.value = c_count.value * 2 + 5 + proc_array = c_nvmlProcessInfo_v2_t * c_count.value + c_procs = proc_array() + + # make the call again + ret = fn(handle, byref(c_count), c_procs) + _nvmlCheckReturn(ret) + + procs = [] + for i in range(c_count.value): + # use an alternative struct for this object + obj = nvmlStructToFriendlyObject(c_procs[i]) + if (obj.usedGpuMemory == NVML_VALUE_NOT_AVAILABLE_ulonglong.value): + # special case for WDDM on Windows, see comment above + obj.usedGpuMemory = None + procs.append(obj) + + return procs + else: + # error case + raise NVMLError(ret) + +def nvmlDeviceGetMPSComputeRunningProcesses_v3(handle): + # first call to get the size + c_count = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMPSComputeRunningProcesses_v3") + ret = fn(handle, byref(c_count), None) + + if (ret == NVML_SUCCESS): + # special case, no running processes + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + # oversize the array incase more processes are created + c_count.value = c_count.value * 2 + 5 + proc_array = c_nvmlProcessInfo_v3_t * c_count.value + c_procs = proc_array() + + # make the call again + ret = fn(handle, byref(c_count), c_procs) + _nvmlCheckReturn(ret) + + procs = [] + for i in range(c_count.value): + # use an alternative struct for this object + obj = nvmlStructToFriendlyObject(c_procs[i]) + if (obj.usedGpuMemory == NVML_VALUE_NOT_AVAILABLE_ulonglong.value): + # special case for WDDM on Windows, see comment above + obj.usedGpuMemory = None + procs.append(obj) + + return procs + else: + # error case + raise NVMLError(ret) + +def nvmlDeviceGetRunningProcessDetailList(handle, version, mode): + c_processDetailList = c_nvmlProcessDetailList_t() + c_processDetailList.version = version + c_processDetailList.mode = mode + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetRunningProcessDetailList") + + # first call to get the size + ret = fn(handle, byref(c_processDetailList)) + if (ret == NVML_SUCCESS): + # special case, no running processes + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + c_procs = c_nvmlProcessDetail_v1_t * c_processDetailList.numProcArrayEntries + c_processDetailList.procArray = cast((c_procs)(), POINTER(c_nvmlProcessDetail_v1_t)) + + # make the call again + ret = fn(handle, byref(c_processDetailList)) + _nvmlCheckReturn(ret) + + procs = [] + for i in range(c_processDetailList.numProcArrayEntries): + # use an alternative struct for this object + obj = c_processDetailList.procArray[i] + if (obj.usedGpuMemory == NVML_VALUE_NOT_AVAILABLE_ulonglong.value): + obj.usedGpuMemory = None + if (obj.usedGpuCcProtectedMemory == NVML_VALUE_NOT_AVAILABLE_ulonglong.value): + obj.usedGpuCcProtectedMemory = None + procs.append(obj) + + return procs + else: + # error case + raise NVMLError(ret) + +def nvmlDeviceGetAutoBoostedClocksEnabled(handle): + c_isEnabled = _nvmlEnableState_t() + c_defaultIsEnabled = _nvmlEnableState_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetAutoBoostedClocksEnabled") + ret = fn(handle, byref(c_isEnabled), byref(c_defaultIsEnabled)) + _nvmlCheckReturn(ret) + return [c_isEnabled.value, c_defaultIsEnabled.value] + #Throws NVML_ERROR_NOT_SUPPORTED if hardware doesn't support setting auto boosted clocks + +## Set functions +def nvmlUnitSetLedState(unit, color): + fn = _nvmlGetFunctionPointer("nvmlUnitSetLedState") + ret = fn(unit, _nvmlLedColor_t(color)) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceSetPersistenceMode(handle, mode): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetPersistenceMode") + ret = fn(handle, _nvmlEnableState_t(mode)) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceSetComputeMode(handle, mode): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetComputeMode") + ret = fn(handle, _nvmlComputeMode_t(mode)) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceSetEccMode(handle, mode): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetEccMode") + ret = fn(handle, _nvmlEnableState_t(mode)) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceClearEccErrorCounts(handle, counterType): + fn = _nvmlGetFunctionPointer("nvmlDeviceClearEccErrorCounts") + ret = fn(handle, _nvmlEccCounterType_t(counterType)) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceSetDriverModel(handle, model): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetDriverModel") + ret = fn(handle, _nvmlDriverModel_t(model)) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceSetAutoBoostedClocksEnabled(handle, enabled): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetAutoBoostedClocksEnabled") + ret = fn(handle, _nvmlEnableState_t(enabled)) + _nvmlCheckReturn(ret) + return None + #Throws NVML_ERROR_NOT_SUPPORTED if hardware doesn't support setting auto boosted clocks + +def nvmlDeviceSetDefaultAutoBoostedClocksEnabled(handle, enabled, flags): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetDefaultAutoBoostedClocksEnabled") + ret = fn(handle, _nvmlEnableState_t(enabled), c_uint(flags)) + _nvmlCheckReturn(ret) + return None + #Throws NVML_ERROR_NOT_SUPPORTED if hardware doesn't support setting auto boosted clocks + +def nvmlDeviceSetGpuLockedClocks(handle, minGpuClockMHz, maxGpuClockMHz): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetGpuLockedClocks") + ret = fn(handle, c_uint(minGpuClockMHz), c_uint(maxGpuClockMHz)) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceResetGpuLockedClocks(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceResetGpuLockedClocks") + ret = fn(handle) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceSetMemoryLockedClocks(handle, minMemClockMHz, maxMemClockMHz): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetMemoryLockedClocks") + ret = fn(handle, c_uint(minMemClockMHz), c_uint(maxMemClockMHz)) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceResetMemoryLockedClocks(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceResetMemoryLockedClocks") + ret = fn(handle) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceGetClkMonStatus(handle, c_clkMonInfo=nvmlClkMonStatus_t()): + isReference = type(c_clkMonInfo) is not nvmlClkMonStatus_t + c_clkMonInfoRef = c_clkMonInfo if isReference else byref(c_clkMonInfo) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetClkMonStatus") + ret = fn(handle, c_clkMonInfoRef) + _nvmlCheckReturn(ret) + return NVML_SUCCESS if isReference else c_clkMonInfo + +# Added in 4.304 +def nvmlDeviceSetApplicationsClocks(handle, maxMemClockMHz, maxGraphicsClockMHz): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetApplicationsClocks") + ret = fn(handle, c_uint(maxMemClockMHz), c_uint(maxGraphicsClockMHz)) + _nvmlCheckReturn(ret) + return None + +# Added in 4.304 +def nvmlDeviceResetApplicationsClocks(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceResetApplicationsClocks") + ret = fn(handle) + _nvmlCheckReturn(ret) + return None + +# Added in 4.304 +def nvmlDeviceSetPowerManagementLimit(handle, limit): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetPowerManagementLimit") + ret = fn(handle, c_uint(limit)) + _nvmlCheckReturn(ret) + return None + +# Added in 4.304 +def nvmlDeviceSetGpuOperationMode(handle, mode): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetGpuOperationMode") + ret = fn(handle, _nvmlGpuOperationMode_t(mode)) + _nvmlCheckReturn(ret) + return None + +# Added in 2.285 +def nvmlEventSetCreate(): + fn = _nvmlGetFunctionPointer("nvmlEventSetCreate") + eventSet = c_nvmlEventSet_t() + ret = fn(byref(eventSet)) + _nvmlCheckReturn(ret) + return eventSet + +# Added in 2.285 +def nvmlDeviceRegisterEvents(handle, eventTypes, eventSet): + fn = _nvmlGetFunctionPointer("nvmlDeviceRegisterEvents") + ret = fn(handle, c_ulonglong(eventTypes), eventSet) + _nvmlCheckReturn(ret) + return None + +# Added in 2.285 +def nvmlDeviceGetSupportedEventTypes(handle): + c_eventTypes = c_ulonglong() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedEventTypes") + ret = fn(handle, byref(c_eventTypes)) + _nvmlCheckReturn(ret) + return c_eventTypes.value + +# raises NVML_ERROR_TIMEOUT exception on timeout +def nvmlEventSetWait_v2(eventSet, timeoutms): + fn = _nvmlGetFunctionPointer("nvmlEventSetWait_v2") + data = c_nvmlEventData_t() + ret = fn(eventSet, byref(data), c_uint(timeoutms)) + _nvmlCheckReturn(ret) + return data + +def nvmlEventSetWait(eventSet, timeoutms): + return nvmlEventSetWait_v2(eventSet, timeoutms) + +# Added in 2.285 +def nvmlEventSetFree(eventSet): + fn = _nvmlGetFunctionPointer("nvmlEventSetFree") + ret = fn(eventSet) + _nvmlCheckReturn(ret) + return None + +# Added in 3.295 +def nvmlDeviceOnSameBoard(handle1, handle2): + fn = _nvmlGetFunctionPointer("nvmlDeviceOnSameBoard") + onSameBoard = c_int() + ret = fn(handle1, handle2, byref(onSameBoard)) + _nvmlCheckReturn(ret) + return (onSameBoard.value != 0) + +# Added in 3.295 +def nvmlDeviceGetCurrPcieLinkGeneration(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetCurrPcieLinkGeneration") + gen = c_uint() + ret = fn(handle, byref(gen)) + _nvmlCheckReturn(ret) + return gen.value + +# Added in 3.295 +def nvmlDeviceGetMaxPcieLinkGeneration(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxPcieLinkGeneration") + gen = c_uint() + ret = fn(handle, byref(gen)) + _nvmlCheckReturn(ret) + return gen.value + +# Added in 3.295 +def nvmlDeviceGetCurrPcieLinkWidth(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetCurrPcieLinkWidth") + width = c_uint() + ret = fn(handle, byref(width)) + _nvmlCheckReturn(ret) + return width.value + +# Added in 3.295 +def nvmlDeviceGetMaxPcieLinkWidth(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxPcieLinkWidth") + width = c_uint() + ret = fn(handle, byref(width)) + _nvmlCheckReturn(ret) + return width.value + +def nvmlDeviceGetGpuMaxPcieLinkGeneration(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuMaxPcieLinkGeneration") + gen = c_uint() + ret = fn(handle, byref(gen)) + _nvmlCheckReturn(ret) + return gen.value + +# Added in 4.304 +def nvmlDeviceGetSupportedClocksThrottleReasons(handle): + c_reasons= c_ulonglong() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedClocksThrottleReasons") + ret = fn(handle, byref(c_reasons)) + _nvmlCheckReturn(ret) + return c_reasons.value + +def nvmlDeviceGetSupportedClocksEventReasons(handle): + c_reasons= c_ulonglong() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedClocksEventReasons") + ret = fn(handle, byref(c_reasons)) + _nvmlCheckReturn(ret) + return c_reasons.value + +# Added in 4.304 +def nvmlDeviceGetCurrentClocksThrottleReasons(handle): + c_reasons= c_ulonglong() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetCurrentClocksThrottleReasons") + ret = fn(handle, byref(c_reasons)) + _nvmlCheckReturn(ret) + return c_reasons.value + +def nvmlDeviceGetCurrentClocksEventReasons(handle): + c_reasons= c_ulonglong() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetCurrentClocksEventReasons") + ret = fn(handle, byref(c_reasons)) + _nvmlCheckReturn(ret) + return c_reasons.value + +# Added in 5.319 +def nvmlDeviceGetIndex(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetIndex") + c_index = c_uint() + ret = fn(handle, byref(c_index)) + _nvmlCheckReturn(ret) + return c_index.value + +# Added in 5.319 +def nvmlDeviceGetAccountingMode(handle): + c_mode = _nvmlEnableState_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetAccountingMode") + ret = fn(handle, byref(c_mode)) + _nvmlCheckReturn(ret) + return c_mode.value + +def nvmlDeviceSetAccountingMode(handle, mode): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetAccountingMode") + ret = fn(handle, _nvmlEnableState_t(mode)) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceClearAccountingPids(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceClearAccountingPids") + ret = fn(handle) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceGetAccountingStats(handle, pid): + stats = c_nvmlAccountingStats_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetAccountingStats") + ret = fn(handle, c_uint(pid), byref(stats)) + _nvmlCheckReturn(ret) + if (stats.maxMemoryUsage == NVML_VALUE_NOT_AVAILABLE_ulonglong.value): + # special case for WDDM on Windows, see comment above + stats.maxMemoryUsage = None + return stats + +def nvmlDeviceGetAccountingPids(handle): + count = c_uint(nvmlDeviceGetAccountingBufferSize(handle)) + pids = (c_uint * count.value)() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetAccountingPids") + ret = fn(handle, byref(count), pids) + _nvmlCheckReturn(ret) + return list(map(int, pids[0:count.value])) + +def nvmlDeviceGetAccountingBufferSize(handle): + bufferSize = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetAccountingBufferSize") + ret = fn(handle, byref(bufferSize)) + _nvmlCheckReturn(ret) + return int(bufferSize.value) + +def nvmlDeviceGetRetiredPages(device, sourceFilter): + c_source = _nvmlPageRetirementCause_t(sourceFilter) + c_count = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetRetiredPages") + + # First call will get the size + ret = fn(device, c_source, byref(c_count), None) + + # this should only fail with insufficient size + if ((ret != NVML_SUCCESS) and + (ret != NVML_ERROR_INSUFFICIENT_SIZE)): + raise NVMLError(ret) + + # call again with a buffer + # oversize the array for the rare cases where additional pages + # are retired between NVML calls + c_count.value = c_count.value * 2 + 5 + page_array = c_ulonglong * c_count.value + c_pages = page_array() + ret = fn(device, c_source, byref(c_count), c_pages) + _nvmlCheckReturn(ret) + return list(map(int, c_pages[0:c_count.value])) + +def nvmlDeviceGetRetiredPages_v2(device, sourceFilter): + c_source = _nvmlPageRetirementCause_t(sourceFilter) + c_count = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetRetiredPages_v2") + + # First call will get the size + ret = fn(device, c_source, byref(c_count), None) + + # this should only fail with insufficient size + if ((ret != NVML_SUCCESS) and + (ret != NVML_ERROR_INSUFFICIENT_SIZE)): + raise NVMLError(ret) + + # call again with a buffer + # oversize the array for the rare cases where additional pages + # are retired between NVML calls + c_count.value = c_count.value * 2 + 5 + page_array = c_ulonglong * c_count.value + c_pages = page_array() + times_array = c_ulonglong * c_count.value + c_times = times_array() + ret = fn(device, c_source, byref(c_count), c_pages, c_times) + _nvmlCheckReturn(ret) + return [ { 'address': int(c_pages[i]), 'timestamp': int(c_times[i]) } for i in range(c_count.value) ]; + +def nvmlDeviceGetRetiredPagesPendingStatus(device): + c_pending = _nvmlEnableState_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetRetiredPagesPendingStatus") + ret = fn(device, byref(c_pending)) + _nvmlCheckReturn(ret) + return int(c_pending.value) + +def nvmlDeviceGetAPIRestriction(device, apiType): + c_permission = _nvmlEnableState_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetAPIRestriction") + ret = fn(device, _nvmlRestrictedAPI_t(apiType), byref(c_permission)) + _nvmlCheckReturn(ret) + return int(c_permission.value) + +def nvmlDeviceSetAPIRestriction(handle, apiType, isRestricted): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetAPIRestriction") + ret = fn(handle, _nvmlRestrictedAPI_t(apiType), _nvmlEnableState_t(isRestricted)) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceGetBridgeChipInfo(handle): + bridgeHierarchy = c_nvmlBridgeChipHierarchy_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetBridgeChipInfo") + ret = fn(handle, byref(bridgeHierarchy)) + _nvmlCheckReturn(ret) + return bridgeHierarchy + +def nvmlDeviceGetSamples(device, sampling_type, timeStamp): + c_sampling_type = _nvmlSamplingType_t(sampling_type) + c_time_stamp = c_ulonglong(timeStamp) + c_sample_count = c_uint(0) + c_sample_value_type = _nvmlValueType_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetSamples") + + ## First Call gets the size + ret = fn(device, c_sampling_type, c_time_stamp, byref(c_sample_value_type), byref(c_sample_count), None) + + # Stop if this fails + if (ret != NVML_SUCCESS): + raise NVMLError(ret) + + sampleArray = c_sample_count.value * c_nvmlSample_t + c_samples = sampleArray() + ret = fn(device, c_sampling_type, c_time_stamp, byref(c_sample_value_type), byref(c_sample_count), c_samples) + _nvmlCheckReturn(ret) + return (c_sample_value_type.value, c_samples[0:c_sample_count.value]) + +def nvmlDeviceGetViolationStatus(device, perfPolicyType): + c_perfPolicy_type = _nvmlPerfPolicyType_t(perfPolicyType) + c_violTime = c_nvmlViolationTime_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetViolationStatus") + + ## Invoke the method to get violation time + ret = fn(device, c_perfPolicy_type, byref(c_violTime)) + _nvmlCheckReturn(ret) + return c_violTime + +def nvmlDeviceGetPcieThroughput(device, counter): + c_util = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPcieThroughput") + ret = fn(device, _nvmlPcieUtilCounter_t(counter), byref(c_util)) + _nvmlCheckReturn(ret) + return c_util.value + +def nvmlSystemGetTopologyGpuSet(cpuNumber): + c_count = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlSystemGetTopologyGpuSet") + + # First call will get the size + ret = fn(cpuNumber, byref(c_count), None) + + if ret != NVML_SUCCESS: + raise NVMLError(ret) + # call again with a buffer + device_array = c_nvmlDevice_t * c_count.value + c_devices = device_array() + ret = fn(cpuNumber, byref(c_count), c_devices) + _nvmlCheckReturn(ret) + return list(c_devices[0:c_count.value]) + +def nvmlDeviceGetTopologyNearestGpus(device, level): + c_count = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetTopologyNearestGpus") + + # First call will get the size + ret = fn(device, level, byref(c_count), None) + + if ret != NVML_SUCCESS: + raise NVMLError(ret) + + # call again with a buffer + device_array = c_nvmlDevice_t * c_count.value + c_devices = device_array() + ret = fn(device, level, byref(c_count), c_devices) + _nvmlCheckReturn(ret) + return list(c_devices[0:c_count.value]) + +def nvmlDeviceGetTopologyCommonAncestor(device1, device2): + c_level = _nvmlGpuTopologyLevel_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetTopologyCommonAncestor") + ret = fn(device1, device2, byref(c_level)) + _nvmlCheckReturn(ret) + return c_level.value + +def nvmlDeviceGetNvLinkUtilizationCounter(device, link, counter): + c_rxcounter = c_ulonglong() + c_txcounter = c_ulonglong() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkUtilizationCounter") + ret = fn(device, link, counter, byref(c_rxcounter), byref(c_txcounter)) + _nvmlCheckReturn(ret) + return (c_rxcounter.value, c_txcounter.value) + +def nvmlDeviceFreezeNvLinkUtilizationCounter(device, link, counter, freeze): + fn = _nvmlGetFunctionPointer("nvmlDeviceFreezeNvLinkUtilizationCounter") + ret = fn(device, link, counter, freeze) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceResetNvLinkUtilizationCounter(device, link, counter): + fn = _nvmlGetFunctionPointer("nvmlDeviceResetNvLinkUtilizationCounter") + ret = fn(device, link, counter) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceSetNvLinkUtilizationControl(device, link, counter, control, reset): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetNvLinkUtilizationControl") + ret = fn(device, link, counter, byref(control), reset) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceGetNvLinkUtilizationControl(device, link, counter): + c_control = nvmlNvLinkUtilizationControl_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkUtilizationControl") + ret = fn(device, link, counter, byref(c_control)) + _nvmlCheckReturn(ret) + return c_control + +def nvmlDeviceGetNvLinkCapability(device, link, capability): + c_capResult = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkCapability") + ret = fn(device, link, capability, byref(c_capResult)) + _nvmlCheckReturn(ret) + return c_capResult.value + +def nvmlDeviceGetNvLinkErrorCounter(device, link, counter): + c_result = c_ulonglong() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkErrorCounter") + ret = fn(device, link, counter, byref(c_result)) + _nvmlCheckReturn(ret) + return c_result.value + +def nvmlDeviceResetNvLinkErrorCounters(device, link): + fn = _nvmlGetFunctionPointer("nvmlDeviceResetNvLinkErrorCounters") + ret = fn(device, link) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceGetNvLinkRemotePciInfo(device, link): + c_pci = nvmlPciInfo_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkRemotePciInfo_v2") + ret = fn(device, link, byref(c_pci)) + _nvmlCheckReturn(ret) + return c_pci + +def nvmlDeviceGetNvLinkRemoteDeviceType(handle, link): + c_type = _nvmlNvLinkDeviceType_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkRemoteDeviceType") + ret = fn(handle, link, byref(c_type)) + _nvmlCheckReturn(ret) + return c_type.value + +def nvmlDeviceGetNvLinkState(device, link): + c_isActive = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkState") + ret = fn(device, link, byref(c_isActive)) + _nvmlCheckReturn(ret) + return c_isActive.value + +def nvmlDeviceGetNvLinkVersion(device, link): + c_version = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvLinkVersion") + ret = fn(device, link, byref(c_version)) + _nvmlCheckReturn(ret) + return c_version.value + +def nvmlDeviceModifyDrainState(pciInfo, newState): + fn = _nvmlGetFunctionPointer("nvmlDeviceModifyDrainState") + ret = fn(pointer(pciInfo), newState) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceQueryDrainState(pciInfo): + c_newState = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceQueryDrainState") + ret = fn(pointer(pciInfo), byref(c_newState)) + _nvmlCheckReturn(ret) + return c_newState.value + +def nvmlDeviceRemoveGpu(pciInfo): + fn = _nvmlGetFunctionPointer("nvmlDeviceRemoveGpu") + ret = fn(pointer(pciInfo)) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceDiscoverGpus(pciInfo): + fn = _nvmlGetFunctionPointer("nvmlDeviceDiscoverGpus") + ret = fn(pointer(pciInfo)) + _nvmlCheckReturn(ret) + return None + +def nvmlDeviceGetFieldValues(handle, fieldIds): + values_arr = c_nvmlFieldValue_t * len(fieldIds) + values = values_arr() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetFieldValues") + + for i, fieldId in enumerate(fieldIds): + try: + (values[i].fieldId, values[i].scopeId) = fieldId + except TypeError: + values[i].fieldId = fieldId + + ret = fn(handle, c_int32(len(fieldIds)), byref(values)) + _nvmlCheckReturn(ret) + return values + +def nvmlDeviceClearFieldValues(handle, fieldIds): + values_arr = c_nvmlFieldValue_t * len(fieldIds) + values = values_arr() + fn = _nvmlGetFunctionPointer("nvmlDeviceClearFieldValues") + + for i, fieldId in enumerate(fieldIds): + try: + (values[i].fieldId, values[i].scopeId) = fieldId + except TypeError: + values[i].fieldId = fieldId + + ret = fn(handle, c_int32(len(fieldIds)), byref(values)) + _nvmlCheckReturn(ret) + return values + +def nvmlDeviceGetVirtualizationMode(handle): + c_virtualization_mode = c_ulonglong() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetVirtualizationMode") + ret = fn(handle, byref(c_virtualization_mode)) + _nvmlCheckReturn(ret) + return c_virtualization_mode.value + +def nvmlDeviceSetVirtualizationMode(handle, virtualization_mode): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetVirtualizationMode") + return fn(handle, virtualization_mode) + +def nvmlDeviceGetVgpuHeterogeneousMode(handle): + c_vgpuHeterogeneousMode = c_nvmlVgpuHeterogeneousMode_v1_t(0) + c_vgpuHeterogeneousMode.version = VgpuHeterogeneousMode_v1 + fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuHeterogeneousMode") + ret = fn(handle, byref(c_vgpuHeterogeneousMode)) + _nvmlCheckReturn(ret) + return c_vgpuHeterogeneousMode.mode + +def nvmlDeviceSetVgpuHeterogeneousMode(handle, heterogeneous_mode): + c_vgpuHeterogeneousMode = c_nvmlVgpuHeterogeneousMode_v1_t(0) + c_vgpuHeterogeneousMode.version = VgpuHeterogeneousMode_v1 + c_vgpuHeterogeneousMode.mode = heterogeneous_mode + fn = _nvmlGetFunctionPointer("nvmlDeviceSetVgpuHeterogeneousMode") + ret = fn(handle, byref(c_vgpuHeterogeneousMode)) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlVgpuInstanceGetPlacementId(vgpuInstance): + c_placement = c_nvmlVgpuPlacementId_v1_t(0) + c_placement.version = VgpuPlacementId_v1 + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetPlacementId") + ret = fn(vgpuInstance, byref(c_placement)) + _nvmlCheckReturn(ret) + return c_placement.placementId + +def nvmlDeviceGetVgpuTypeSupportedPlacements(handle, vgpuTypeId, mode=0, version=1): + c_max_instances = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetMaxInstances") + ret = fn(handle, vgpuTypeId, byref(c_max_instances)) + _nvmlCheckReturn(ret) + + if version == 2: + c_vgpu_placements = c_nvmlVgpuPlacementList_v2_t() + c_vgpu_placements.version = VgpuPlacementList_v2 + c_vgpu_placements.count = c_max_instances.value + c_vgpu_placements.mode = mode + elif version == 1: + c_vgpu_placements = c_nvmlVgpuPlacementList_v1_t() + c_vgpu_placements.version = VgpuPlacementList_v1 + else: + raise NVMLError(NVML_ERROR_ARGUMENT_VERSION_MISMATCH) + + c_placements = c_uint * c_max_instances.value + c_vgpu_placements.placementIds = c_placements() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuTypeSupportedPlacements") + ret = fn(handle, vgpuTypeId, byref(c_vgpu_placements)) + _nvmlCheckReturn(ret) + return c_vgpu_placements + +def nvmlDeviceGetVgpuTypeCreatablePlacements(handle, vgpuTypeId, version=1): + c_max_instances = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetMaxInstances") + ret = fn(handle, vgpuTypeId, byref(c_max_instances)) + _nvmlCheckReturn(ret) + + if version == 2: + c_vgpu_placements = c_nvmlVgpuPlacementList_v2_t() + c_vgpu_placements.version = VgpuPlacementList_v2 + c_vgpu_placements.count = c_max_instances.value + elif version == 1: + c_vgpu_placements = c_nvmlVgpuPlacementList_v1_t() + c_vgpu_placements.version = VgpuPlacementList_v1 + + c_placements = c_uint * c_max_instances.value + c_vgpu_placements.placementIds = c_placements() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuTypeCreatablePlacements") + ret = fn(handle, vgpuTypeId, byref(c_vgpu_placements)) + _nvmlCheckReturn(ret) + return c_vgpu_placements + +def nvmlGetVgpuDriverCapabilities(capability): + c_capResult = c_uint() + fn = _nvmlGetFunctionPointer("nvmlGetVgpuDriverCapabilities") + ret = fn(_nvmlVgpuDriverCapability_t(capability), byref(c_capResult)) + _nvmlCheckReturn(ret) + return c_capResult.value + +def nvmlDeviceGetVgpuCapabilities(handle, capability): + c_capResult = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuCapabilities") + ret = fn(handle, _nvmlDeviceVgpuCapability_t(capability), byref(c_capResult)) + _nvmlCheckReturn(ret) + return c_capResult.value + +def nvmlDeviceSetVgpuCapabilities(handle, capability, state): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetVgpuCapabilities") + ret = fn(handle, _nvmlDeviceVgpuCapability_t(capability), state) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceGetSupportedVgpus(handle): + # first call to get the size + c_vgpu_count = c_uint(0) + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedVgpus") + ret = fn(handle, byref(c_vgpu_count), None) + + if (ret == NVML_SUCCESS): + # special case, no supported vGPUs + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + vgpu_type_ids_array = _nvmlVgpuTypeId_t * c_vgpu_count.value + c_vgpu_type_ids = vgpu_type_ids_array() + + # make the call again + ret = fn(handle, byref(c_vgpu_count), c_vgpu_type_ids) + _nvmlCheckReturn(ret) + vgpus = [] + for i in range(c_vgpu_count.value): + vgpus.append(c_vgpu_type_ids[i]) + return vgpus + else: + # error case + raise NVMLError(ret) + +def nvmlDeviceGetCreatableVgpus(handle): + # first call to get the size + c_vgpu_count = c_uint(0) + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetCreatableVgpus") + ret = fn(handle, byref(c_vgpu_count), None) + + if (ret == NVML_SUCCESS): + # special case, no supported vGPUs + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + vgpu_type_ids_array = _nvmlVgpuTypeId_t * c_vgpu_count.value + c_vgpu_type_ids = vgpu_type_ids_array() + + # make the call again + ret = fn(handle, byref(c_vgpu_count), c_vgpu_type_ids) + _nvmlCheckReturn(ret) + vgpus = [] + for i in range(c_vgpu_count.value): + vgpus.append(c_vgpu_type_ids[i]) + return vgpus + else: + # error case + raise NVMLError(ret) + +def nvmlVgpuTypeGetGpuInstanceProfileId(vgpuTypeId): + c_profile_id = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetGpuInstanceProfileId") + ret = fn(vgpuTypeId, byref(c_profile_id)) + _nvmlCheckReturn(ret) + return (c_profile_id.value) + +@convertStrBytes +def nvmlVgpuTypeGetClass(vgpuTypeId): + c_class = create_string_buffer(NVML_DEVICE_NAME_BUFFER_SIZE) + c_buffer_size = c_uint(NVML_DEVICE_NAME_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetClass") + ret = fn(vgpuTypeId, c_class, byref(c_buffer_size)) + _nvmlCheckReturn(ret) + return c_class.value + +@convertStrBytes +def nvmlVgpuTypeGetName(vgpuTypeId): + c_name = create_string_buffer(NVML_DEVICE_NAME_BUFFER_SIZE) + c_buffer_size = c_uint(NVML_DEVICE_NAME_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetName") + ret = fn(vgpuTypeId, c_name, byref(c_buffer_size)) + _nvmlCheckReturn(ret) + return c_name.value + +def nvmlVgpuTypeGetDeviceID(vgpuTypeId): + c_device_id = c_ulonglong(0) + c_subsystem_id = c_ulonglong(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetDeviceID") + ret = fn(vgpuTypeId, byref(c_device_id), byref(c_subsystem_id)) + _nvmlCheckReturn(ret) + return (c_device_id.value, c_subsystem_id.value) + +def nvmlVgpuTypeGetFramebufferSize(vgpuTypeId): + c_fb_size = c_ulonglong(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetFramebufferSize") + ret = fn(vgpuTypeId, byref(c_fb_size)) + _nvmlCheckReturn(ret) + return c_fb_size.value + +def nvmlVgpuTypeGetNumDisplayHeads(vgpuTypeId): + c_num_heads = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetNumDisplayHeads") + ret = fn(vgpuTypeId, byref(c_num_heads)) + _nvmlCheckReturn(ret) + return c_num_heads.value + +def nvmlVgpuTypeGetResolution(vgpuTypeId): + c_xdim = c_uint(0) + c_ydim = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetResolution") + ret = fn(vgpuTypeId, 0, byref(c_xdim), byref(c_ydim)) + _nvmlCheckReturn(ret) + return (c_xdim.value, c_ydim.value) + +@convertStrBytes +def nvmlVgpuTypeGetLicense(vgpuTypeId): + c_license = create_string_buffer(NVML_GRID_LICENSE_BUFFER_SIZE) + c_buffer_size = c_uint(NVML_GRID_LICENSE_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetLicense") + ret = fn(vgpuTypeId, c_license, c_buffer_size) + _nvmlCheckReturn(ret) + return c_license.value + +def nvmlVgpuTypeGetFrameRateLimit(vgpuTypeId): + c_frl_config = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetFrameRateLimit") + ret = fn(vgpuTypeId, byref(c_frl_config)) + _nvmlCheckReturn(ret) + return c_frl_config.value + +def nvmlVgpuTypeGetGspHeapSize(vgpuTypeId): + c_gsp_heap = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetGspHeapSize") + ret = fn(vgpuTypeId, byref(c_gsp_heap)) + _nvmlCheckReturn(ret) + return c_gsp_heap.value + +def nvmlVgpuTypeGetFbReservation(vgpuTypeId): + c_fb_reservation = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetFbReservation") + ret = fn(vgpuTypeId, byref(c_fb_reservation)) + _nvmlCheckReturn(ret) + return c_fb_reservation.value + +def nvmlVgpuInstanceGetRuntimeStateSize(vgpuInstance): + c_runtime_state = nvmlVgpuRuntimeState_v1_t() + c_runtime_state.version = VgpuRuntimeState_v1 + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetRuntimeStateSize") + ret = fn(vgpuInstance, byref(c_runtime_state)) + _nvmlCheckReturn(ret) + return c_runtime_state + +def nvmlVgpuTypeGetMaxInstances(handle, vgpuTypeId): + c_max_instances = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetMaxInstances") + ret = fn(handle, vgpuTypeId, byref(c_max_instances)) + _nvmlCheckReturn(ret) + return c_max_instances.value + +def nvmlVgpuTypeGetMaxInstancesPerVm(vgpuTypeId): + c_max_instances_per_vm = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetMaxInstancesPerVm") + ret = fn(vgpuTypeId, byref(c_max_instances_per_vm)) + _nvmlCheckReturn(ret) + return c_max_instances_per_vm.value + +def nvmlVgpuTypeGetBAR1Info(vgpuTypeId): + c_bar1Info = c_nvmlVgpuTypeBar1Info_v1_t(0) + c_bar1Info.version = VgpuTypeBar1Info_v1 + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetBAR1Info") + ret = fn(vgpuTypeId, byref(c_bar1Info)) + _nvmlCheckReturn(ret) + return c_bar1Info + +def nvmlDeviceGetActiveVgpus(handle): + # first call to get the size + c_vgpu_count = c_uint(0) + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetActiveVgpus") + ret = fn(handle, byref(c_vgpu_count), None) + + if (ret == NVML_SUCCESS): + # special case, no active vGPUs + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + vgpu_instance_array = _nvmlVgpuInstance_t * c_vgpu_count.value + c_vgpu_instances = vgpu_instance_array() + + # make the call again + ret = fn(handle, byref(c_vgpu_count), c_vgpu_instances) + _nvmlCheckReturn(ret) + vgpus = [] + for i in range(c_vgpu_count.value): + vgpus.append(c_vgpu_instances[i]) + return vgpus + else: + # error case + raise NVMLError(ret) + +@convertStrBytes +def nvmlVgpuInstanceGetVmID(vgpuInstance): + c_vm_id = create_string_buffer(NVML_DEVICE_UUID_BUFFER_SIZE) + c_buffer_size = c_uint(NVML_GRID_LICENSE_BUFFER_SIZE) + c_vm_id_type = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetVmID") + ret = fn(vgpuInstance, byref(c_vm_id), c_buffer_size, byref(c_vm_id_type)) + _nvmlCheckReturn(ret) + return (c_vm_id.value, c_vm_id_type.value) + +@convertStrBytes +def nvmlVgpuInstanceGetUUID(vgpuInstance): + c_uuid = create_string_buffer(NVML_DEVICE_UUID_BUFFER_SIZE) + c_buffer_size = c_uint(NVML_DEVICE_UUID_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetUUID") + ret = fn(vgpuInstance, byref(c_uuid), c_buffer_size) + _nvmlCheckReturn(ret) + return c_uuid.value + +@convertStrBytes +def nvmlVgpuInstanceGetMdevUUID(vgpuInstance): + c_uuid = create_string_buffer(NVML_DEVICE_UUID_BUFFER_SIZE) + c_buffer_size = c_uint(NVML_DEVICE_UUID_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetMdevUUID") + ret = fn(vgpuInstance, byref(c_uuid), c_buffer_size) + _nvmlCheckReturn(ret) + return c_uuid.value + +@convertStrBytes +def nvmlVgpuInstanceGetVmDriverVersion(vgpuInstance): + c_driver_version = create_string_buffer(NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + c_buffer_size = c_uint(NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetVmDriverVersion") + ret = fn(vgpuInstance, byref(c_driver_version), c_buffer_size) + _nvmlCheckReturn(ret) + return c_driver_version.value + +def nvmlVgpuInstanceGetLicenseStatus(vgpuInstance): + c_license_status = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetLicenseStatus") + ret = fn(vgpuInstance, byref(c_license_status)) + _nvmlCheckReturn(ret) + return c_license_status.value + +def nvmlVgpuInstanceGetLicenseInfo_v2(vgpuInstance): + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetLicenseInfo_v2") + c_license_info = c_nvmlVgpuLicenseInfo_t() + ret = fn(vgpuInstance, byref(c_license_info)) + _nvmlCheckReturn(ret) + return c_license_info + +def nvmlVgpuInstanceGetLicenseInfo(vgpuInstance): + return nvmlVgpuInstanceGetLicenseInfo_v2(vgpuInstance) + +def nvmlVgpuInstanceGetFrameRateLimit(vgpuInstance): + c_frl = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetFrameRateLimit") + ret = fn(vgpuInstance, byref(c_frl)) + _nvmlCheckReturn(ret) + return c_frl.value + +def nvmlVgpuInstanceGetEccMode(vgpuInstance): + c_mode = _nvmlEnableState_t() + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetEccMode") + ret = fn(vgpuInstance, byref(c_mode)) + _nvmlCheckReturn(ret) + return c_mode.value + +def nvmlVgpuInstanceGetType(vgpuInstance): + c_vgpu_type = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetType") + ret = fn(vgpuInstance, byref(c_vgpu_type)) + _nvmlCheckReturn(ret) + return c_vgpu_type.value + +def nvmlVgpuInstanceGetEncoderCapacity(vgpuInstance): + c_encoder_capacity = c_ulonglong(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetEncoderCapacity") + ret = fn(vgpuInstance, byref(c_encoder_capacity)) + _nvmlCheckReturn(ret) + return c_encoder_capacity.value + +def nvmlVgpuInstanceSetEncoderCapacity(vgpuInstance, encoder_capacity): + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceSetEncoderCapacity") + return fn(vgpuInstance, encoder_capacity) + +def nvmlVgpuInstanceGetFbUsage(vgpuInstance): + c_fb_usage = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetFbUsage") + ret = fn(vgpuInstance, byref(c_fb_usage)) + _nvmlCheckReturn(ret) + return c_fb_usage.value + +def nvmlVgpuTypeGetCapabilities(vgpuTypeId, capability): + c_cap_result = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuTypeGetCapabilities") + ret = fn(vgpuTypeId, _nvmlVgpuCapability_t(capability), byref(c_cap_result)) + _nvmlCheckReturn(ret) + return (c_cap_result.value) + +def nvmlVgpuInstanceGetGpuInstanceId(vgpuInstance): + c_id = c_uint(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetGpuInstanceId") + ret = fn(vgpuInstance, byref(c_id)) + _nvmlCheckReturn(ret) + return (c_id.value) + +@convertStrBytes +def nvmlVgpuInstanceGetGpuPciId(vgpuInstance): + c_vgpuPciId = create_string_buffer(NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE) + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetGpuPciId") + ret = fn(vgpuInstance, c_vgpuPciId, byref(c_uint(NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE))) + _nvmlCheckReturn(ret) + return c_vgpuPciId.value + +def nvmlDeviceGetVgpuUtilization(handle, timeStamp): + # first call to get the size + c_vgpu_count = c_uint(0) + c_time_stamp = c_ulonglong(timeStamp) + c_sample_value_type = _nvmlValueType_t() + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuUtilization") + ret = fn(handle, c_time_stamp, byref(c_sample_value_type), byref(c_vgpu_count), None) + + if (ret == NVML_SUCCESS): + # special case, no active vGPUs + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + sampleArray = c_vgpu_count.value * c_nvmlVgpuInstanceUtilizationSample_t + c_samples = sampleArray() + + # make the call again + ret = fn(handle, c_time_stamp, byref(c_sample_value_type), byref(c_vgpu_count), c_samples) + _nvmlCheckReturn(ret) + + return c_samples[0:c_vgpu_count.value] + else: + # error case + raise NVMLError(ret) + +def nvmlDeviceGetVgpuInstancesUtilizationInfo(handle, timeStamp): + # first call to get the size + c_time_stamp = c_ulonglong(timeStamp) + c_vgpuUtilInfo = c_nvmlVgpuInstancesUtilizationInfo_v1_t(0) + c_vgpuUtilInfo.version = VgpuInstancesUtilizationInfo_v1 + c_vgpuUtilInfo.sampleValType = _nvmlValueType_t() + c_vgpuUtilInfo.vgpuInstanceCount = c_uint(0) + c_vgpuUtilInfo.lastSeenTimeStamp = c_time_stamp + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuInstancesUtilizationInfo") + ret = fn(handle, byref(c_vgpuUtilInfo)) + + if (ret == NVML_SUCCESS): + # special case, no active vGPUs + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + sampleArray = c_vgpuUtilInfo.vgpuInstanceCount * c_nvmlVgpuInstanceUtilizationInfo_v1_t + c_samples = sampleArray() + c_vgpuUtilInfo.vgpuUtilArray = c_samples + + # make the call again + ret = fn(handle, byref(c_vgpuUtilInfo)) + _nvmlCheckReturn(ret) + + return c_samples[0:c_vgpuUtilInfo.vgpuInstanceCount] + else: + # error case + raise NVMLError(ret) + +def nvmlDeviceGetP2PStatus(device1, device2, p2pIndex): + c_p2pstatus = _nvmlGpuP2PStatus_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetP2PStatus") + ret = fn(device1, device2,p2pIndex, byref(c_p2pstatus)) + _nvmlCheckReturn(ret) + return c_p2pstatus.value + +def nvmlDeviceGetGridLicensableFeatures_v4(handle): + c_get_grid_licensable_features = c_nvmlGridLicensableFeatures_v4_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGridLicensableFeatures_v4") + ret = fn(handle, byref(c_get_grid_licensable_features)) + _nvmlCheckReturn(ret) + + return (c_get_grid_licensable_features) + +def nvmlDeviceGetGridLicensableFeatures(handle): + return nvmlDeviceGetGridLicensableFeatures_v4(handle) + +def nvmlDeviceGetGspFirmwareVersion(handle, version=None): + isUserDefined = version is not None + if not isUserDefined: + version = (c_char * NVML_GSP_FIRMWARE_VERSION_BUF_SIZE)() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGspFirmwareVersion") + ret = fn(handle, version) + _nvmlCheckReturn(ret) + return NVML_SUCCESS if isUserDefined else version.value + +def nvmlDeviceGetGspFirmwareMode(handle, isEnabled=c_uint(), defaultMode=c_uint()): + isReference = type(isEnabled) is not c_uint + isEnabledRef = isEnabled if isReference else byref(isEnabled) + defaultModeRef = defaultMode if isReference else byref(defaultMode) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGspFirmwareMode") + ret = fn(handle, isEnabledRef, defaultModeRef) + _nvmlCheckReturn(ret) + return NVML_SUCCESS if isReference else [isEnabled.value, defaultMode.value] + +def nvmlDeviceGetEncoderCapacity(handle, encoderQueryType): + c_encoder_capacity = c_ulonglong(0) + c_encoderQuery_type = _nvmlEncoderQueryType_t(encoderQueryType) + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetEncoderCapacity") + ret = fn(handle, c_encoderQuery_type, byref(c_encoder_capacity)) + _nvmlCheckReturn(ret) + return c_encoder_capacity.value + +def nvmlDeviceGetVgpuProcessUtilization(handle, timeStamp): + # first call to get the size + c_vgpu_count = c_uint(0) + c_time_stamp = c_ulonglong(timeStamp) + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuProcessUtilization") + ret = fn(handle, c_time_stamp, byref(c_vgpu_count), None) + + if (ret == NVML_SUCCESS): + # special case, no active vGPUs + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + sampleArray = c_vgpu_count.value * c_nvmlVgpuProcessUtilizationSample_t + c_samples = sampleArray() + + # make the call again + ret = fn(handle, c_time_stamp, byref(c_vgpu_count), c_samples) + _nvmlCheckReturn(ret) + + return c_samples[0:c_vgpu_count.value] + else: + # error case + raise NVMLError(ret) + +def nvmlDeviceGetVgpuProcessesUtilizationInfo(handle, timeStamp): + # first call to get the size + c_time_stamp = c_ulonglong(timeStamp) + c_vgpuProcUtilInfo = c_nvmlVgpuProcessesUtilizationInfo_v1_t(0) + c_vgpuProcUtilInfo.version = VgpuProcessesUtilizationInfo_v1 + c_vgpuProcUtilInfo.vgpuProcessCount = c_uint(0) + c_vgpuProcUtilInfo.lastSeenTimeStamp = c_time_stamp + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuProcessesUtilizationInfo") + ret = fn(handle, byref(c_vgpuProcUtilInfo)) + + if (ret == NVML_SUCCESS): + # special case, no active vGPUs + return [] + elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + sampleArray = c_vgpuProcUtilInfo.vgpuProcessCount * c_nvmlVgpuProcessUtilizationInfo_v1_t + c_samples = sampleArray() + c_vgpuProcUtilInfo.vgpuProcUtilArray = c_samples + + # make the call again + ret = fn(handle, byref(c_vgpuProcUtilInfo)) + _nvmlCheckReturn(ret) + + return c_samples[0:c_vgpuProcUtilInfo.vgpuProcessCount] + else: + # error case + raise NVMLError(ret) + +def nvmlDeviceGetEncoderStats(handle): + c_encoderCount = c_ulonglong(0) + c_encodeFps = c_ulonglong(0) + c_encoderLatency = c_ulonglong(0) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetEncoderStats") + ret = fn(handle, byref(c_encoderCount), byref(c_encodeFps), byref(c_encoderLatency)) + _nvmlCheckReturn(ret) + return (c_encoderCount.value, c_encodeFps.value, c_encoderLatency.value) + +def nvmlDeviceGetEncoderSessions(handle): + # first call to get the size + c_session_count = c_uint(0) + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetEncoderSessions") + ret = fn(handle, byref(c_session_count), None) + + if (ret == NVML_SUCCESS): + if (c_session_count.value != 0): + # typical case + session_array = c_nvmlEncoderSession_t * c_session_count.value + c_sessions = session_array() + + # make the call again + ret = fn(handle, byref(c_session_count), c_sessions) + _nvmlCheckReturn(ret) + sessions = [] + for i in range(c_session_count.value): + sessions.append(c_sessions[i]) + return sessions + else: + return [] # no active sessions + else: + # error case + raise NVMLError(ret) + +def nvmlDeviceGetFBCStats(handle): + c_fbcStats = c_nvmlFBCStats_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetFBCStats") + ret = fn(handle, byref(c_fbcStats)) + _nvmlCheckReturn(ret) + return c_fbcStats + +def nvmlDeviceGetFBCSessions(handle): + # first call to get the size + c_session_count = c_uint(0) + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetFBCSessions") + ret = fn(handle, byref(c_session_count), None) + + if (ret == NVML_SUCCESS): + if (c_session_count.value != 0): + # typical case + session_array = c_nvmlFBCSession_t * c_session_count.value + c_sessions = session_array() + + # make the call again + ret = fn(handle, byref(c_session_count), c_sessions) + _nvmlCheckReturn(ret) + sessions = [] + for i in range(c_session_count.value): + sessions.append(c_sessions[i]) + return sessions + else: + return [] # no active sessions + else: + # error case + raise NVMLError(ret) + +def nvmlVgpuInstanceGetEncoderStats(vgpuInstance): + c_encoderCount = c_ulonglong(0) + c_encodeFps = c_ulonglong(0) + c_encoderLatency = c_ulonglong(0) + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetEncoderStats") + ret = fn(vgpuInstance, byref(c_encoderCount), byref(c_encodeFps), byref(c_encoderLatency)) + _nvmlCheckReturn(ret) + return (c_encoderCount.value, c_encodeFps.value, c_encoderLatency.value) + +def nvmlVgpuInstanceGetEncoderSessions(vgpuInstance): + # first call to get the size + c_session_count = c_uint(0) + + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetEncoderSessions") + ret = fn(vgpuInstance, byref(c_session_count), None) + + if (ret == NVML_SUCCESS): + if (c_session_count.value != 0): + # typical case + session_array = c_nvmlEncoderSession_t * c_session_count.value + c_sessions = session_array() + + # make the call again + ret = fn(vgpuInstance, byref(c_session_count), c_sessions) + _nvmlCheckReturn(ret) + sessions = [] + for i in range(c_session_count.value): + sessions.append(c_sessions[i]) + return sessions + else: + return [] # no active sessions + else: + # error case + raise NVMLError(ret) + +def nvmlVgpuInstanceGetFBCStats(vgpuInstance): + c_fbcStats = c_nvmlFBCStats_t() + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetFBCStats") + ret = fn(vgpuInstance, byref(c_fbcStats)) + _nvmlCheckReturn(ret) + return c_fbcStats + +def nvmlVgpuInstanceGetFBCSessions(vgpuInstance): + # first call to get the size + c_session_count = c_uint(0) + + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetFBCSessions") + ret = fn(vgpuInstance, byref(c_session_count), None) + + if (ret == NVML_SUCCESS): + if (c_session_count.value != 0): + # typical case + session_array = c_nvmlFBCSession_t * c_session_count.value + c_sessions = session_array() + + # make the call again + ret = fn(vgpuInstance, byref(c_session_count), c_sessions) + _nvmlCheckReturn(ret) + sessions = [] + for i in range(c_session_count.value): + sessions.append(c_sessions[i]) + return sessions + else: + return [] # no active sessions + else: + # error case + raise NVMLError(ret) + +def nvmlDeviceGetProcessUtilization(handle, timeStamp): + # first call to get the size + c_count = c_uint(0) + c_time_stamp = c_ulonglong(timeStamp) + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetProcessUtilization") + ret = fn(handle, None, byref(c_count), c_time_stamp) + + if (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + sampleArray = c_count.value * c_nvmlProcessUtilizationSample_t + c_samples = sampleArray() + + # make the call again + ret = fn(handle, c_samples, byref(c_count), c_time_stamp) + _nvmlCheckReturn(ret) + + return c_samples[0:c_count.value] + else: + # error case + raise NVMLError(ret) + +def nvmlDeviceGetProcessesUtilizationInfo(handle, timeStamp): + # first call to get the size + c_time_stamp = c_ulonglong(timeStamp) + c_processesUtilInfo = c_nvmlProcessesUtilizationInfo_v1_t(0) + c_processesUtilInfo.version = ProcessesUtilizationInfo_v1 + c_processesUtilInfo.processSamplesCount = c_uint(0) + c_processesUtilInfo.lastSeenTimeStamp = c_time_stamp + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetProcessesUtilizationInfo") + ret = fn(handle, byref(c_processesUtilInfo)) + + if (ret == NVML_ERROR_INSUFFICIENT_SIZE): + # typical case + sampleArray = c_processesUtilInfo.processSamplesCount * c_nvmlProcessUtilizationInfo_v1_t + c_samples = sampleArray() + c_processesUtilInfo.procUtilArray = c_samples + + # make the call again + ret = fn(handle, byref(c_processesUtilInfo)) + _nvmlCheckReturn(ret) + + return c_samples[0:c_processesUtilInfo.processSamplesCount] + else: + # error case + raise NVMLError(ret) + +def nvmlVgpuInstanceGetMetadata(vgpuInstance): + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetMetadata") + c_vgpuMetadata = c_nvmlVgpuMetadata_t() + c_bufferSize = c_uint(0) + # Make the first NVML API call to get the c_bufferSize value. + # We have already allocated required buffer above. + ret = fn(vgpuInstance, byref(c_vgpuMetadata), byref(c_bufferSize)) + if (ret == NVML_ERROR_INSUFFICIENT_SIZE): + ret = fn(vgpuInstance, byref(c_vgpuMetadata), byref(c_bufferSize)) + _nvmlCheckReturn(ret) + else: + raise NVMLError(ret) + return c_vgpuMetadata + +def nvmlDeviceGetVgpuMetadata(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuMetadata") + c_vgpuPgpuMetadata = c_nvmlVgpuPgpuMetadata_t() + c_bufferSize = c_uint(0) + # Make the first NVML API call to get the c_bufferSize value. + # We have already allocated required buffer above. + ret = fn(handle, byref(c_vgpuPgpuMetadata), byref(c_bufferSize)) + if (ret == NVML_ERROR_INSUFFICIENT_SIZE): + ret = fn(handle, byref(c_vgpuPgpuMetadata), byref(c_bufferSize)) + _nvmlCheckReturn(ret) + else: + raise NVMLError(ret) + return c_vgpuPgpuMetadata + +def nvmlGetVgpuCompatibility(vgpuMetadata, pgpuMetadata): + fn = _nvmlGetFunctionPointer("nvmlGetVgpuCompatibility") + c_vgpuPgpuCompatibility = c_nvmlVgpuPgpuCompatibility_t() + ret = fn(byref(vgpuMetadata), byref(pgpuMetadata), byref(c_vgpuPgpuCompatibility)) + _nvmlCheckReturn(ret) + return c_vgpuPgpuCompatibility + +@convertStrBytes +def nvmlDeviceGetPgpuMetadataString(handle): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPgpuMetadataString") + c_pgpuMetadata = create_string_buffer(NVML_VGPU_PGPU_METADATA_OPAQUE_DATA_SIZE) + c_bufferSize = c_uint(0) + # Make the first NVML API call to get the c_bufferSize value. + # We have already allocated required buffer above. + ret = fn(handle, byref(c_pgpuMetadata), byref(c_bufferSize)) + if (ret == NVML_ERROR_INSUFFICIENT_SIZE): + ret = fn(handle, byref(c_pgpuMetadata), byref(c_bufferSize)) + _nvmlCheckReturn(ret) + else: + raise NVMLError(ret) + return (c_pgpuMetadata.value, c_bufferSize.value) + +def nvmlDeviceGetVgpuSchedulerLog(handle): + c_vgpu_sched_log = c_nvmlVgpuSchedulerLog_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuSchedulerLog") + ret = fn(handle, byref(c_vgpu_sched_log)) + _nvmlCheckReturn(ret) + return c_vgpu_sched_log + +def nvmlDeviceGetVgpuSchedulerState(handle): + c_vgpu_sched_state = c_nvmlVgpuSchedulerGetState_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuSchedulerState") + ret = fn(handle, byref(c_vgpu_sched_state)) + _nvmlCheckReturn(ret) + return c_vgpu_sched_state + +def nvmlDeviceGetVgpuSchedulerCapabilities(handle): + c_vgpu_sched_caps = c_nvmlVgpuSchedulerCapabilities_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetVgpuSchedulerCapabilities") + ret = fn(handle, byref(c_vgpu_sched_caps)) + _nvmlCheckReturn(ret) + return c_vgpu_sched_caps + +def nvmlDeviceSetVgpuSchedulerState(handle, sched_state): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetVgpuSchedulerState") + ret = fn(handle, byref(sched_state)) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlSetVgpuVersion(vgpuVersion): + fn = _nvmlGetFunctionPointer("nvmlSetVgpuVersion") + ret = fn(byref(vgpuVersion)) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlGetVgpuVersion(supported=None, current=None): + isUserDefined = (supported is not None) or (current is not None) + if not isUserDefined: + supported = c_nvmlVgpuVersion_t() + current = c_nvmlVgpuVersion_t() + fn = _nvmlGetFunctionPointer("nvmlGetVgpuVersion") + ret = fn(byref(supported), byref(current)) + _nvmlCheckReturn(ret) + return NVML_SUCCESS if isUserDefined else [(supported.minVersion, + supported.maxVersion), + (current.minVersion, + current.maxVersion)] + +def nvmlVgpuInstanceGetAccountingMode(vgpuInstance): + c_mode = _nvmlEnableState_t() + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetAccountingMode") + ret = fn(vgpuInstance, byref(c_mode)) + _nvmlCheckReturn(ret) + return c_mode.value + +def nvmlVgpuInstanceGetAccountingPids(vgpuInstance): + c_pidCount = c_uint() + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetAccountingPids") + ret = fn(vgpuInstance, byref(c_pidCount), None) + if (ret == NVML_ERROR_INSUFFICIENT_SIZE): + sampleArray = c_pidCount.value * c_uint + c_pidArray = sampleArray() + ret = fn(vgpuInstance, byref(c_pidCount), byref(c_pidArray)) + _nvmlCheckReturn(ret) + else: + raise NVMLError(ret) + return (c_pidCount, c_pidArray) + +def nvmlVgpuInstanceGetAccountingStats(vgpuInstance, pid): + c_accountingStats = c_nvmlAccountingStats_t() + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceGetAccountingStats") + ret = fn(vgpuInstance, pid, byref(c_accountingStats)) + _nvmlCheckReturn(ret) + return c_accountingStats + +def nvmlVgpuInstanceClearAccountingPids(vgpuInstance): + fn = _nvmlGetFunctionPointer("nvmlVgpuInstanceClearAccountingPids") + ret = fn(vgpuInstance) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlGetExcludedDeviceCount(): + c_count = c_uint() + fn = _nvmlGetFunctionPointer("nvmlGetExcludedDeviceCount") + ret = fn(byref(c_count)) + _nvmlCheckReturn(ret) + return c_count.value + +def nvmlGetExcludedDeviceInfoByIndex(index): + c_index = c_uint(index) + info = c_nvmlExcludedDeviceInfo_t() + fn = _nvmlGetFunctionPointer("nvmlGetExcludedDeviceInfoByIndex") + ret = fn(c_index, byref(info)) + _nvmlCheckReturn(ret) + return info + +def nvmlDeviceGetHostVgpuMode(handle): + c_host_vgpu_mode = _nvmlHostVgpuMode_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetHostVgpuMode") + ret = fn(handle, byref(c_host_vgpu_mode)) + _nvmlCheckReturn(ret) + return c_host_vgpu_mode.value + +def nvmlDeviceSetMigMode(device, mode): + c_activationStatus = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceSetMigMode") + ret = fn(device, mode, byref(c_activationStatus)) + _nvmlCheckReturn(ret) + return c_activationStatus.value + +def nvmlDeviceGetMigMode(device): + c_currentMode = c_uint() + c_pendingMode = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMigMode") + ret = fn(device, byref(c_currentMode), byref(c_pendingMode)) + _nvmlCheckReturn(ret) + return [c_currentMode.value, c_pendingMode.value] + +def nvmlDeviceGetGpuInstanceProfileInfo(device, profile, version=2): + if version == 2: + c_info = c_nvmlGpuInstanceProfileInfo_v2_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuInstanceProfileInfoV") + elif version == 1: + c_info = c_nvmlGpuInstanceProfileInfo_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuInstanceProfileInfo") + else: + raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND) + ret = fn(device, profile, byref(c_info)) + _nvmlCheckReturn(ret) + return c_info + +# Define function alias for the API exposed by NVML +nvmlDeviceGetGpuInstanceProfileInfoV = nvmlDeviceGetGpuInstanceProfileInfo + +def nvmlDeviceGetGpuInstanceRemainingCapacity(device, profileId): + c_count = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuInstanceRemainingCapacity") + ret = fn(device, profileId, byref(c_count)) + _nvmlCheckReturn(ret) + return c_count.value + +def nvmlDeviceGetGpuInstancePossiblePlacements(device, profileId, placementsRef, countRef): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuInstancePossiblePlacements_v2") + ret = fn(device, profileId, placementsRef, countRef) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceCreateGpuInstance(device, profileId): + c_instance = c_nvmlGpuInstance_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceCreateGpuInstance") + ret = fn(device, profileId, byref(c_instance)) + _nvmlCheckReturn(ret) + return c_instance + +def nvmlDeviceCreateGpuInstanceWithPlacement(device, profileId, placement): + c_instance = c_nvmlGpuInstance_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceCreateGpuInstanceWithPlacement") + ret = fn(device, profileId, placement, byref(c_instance)) + _nvmlCheckReturn(ret) + return c_instance + +def nvmlGpuInstanceDestroy(gpuInstance): + fn = _nvmlGetFunctionPointer("nvmlGpuInstanceDestroy") + ret = fn(gpuInstance) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceGetGpuInstances(device, profileId, gpuInstancesRef, countRef): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuInstances") + ret = fn(device, profileId, gpuInstancesRef, countRef) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceGetGpuInstanceById(device, gpuInstanceId): + c_instance = c_nvmlGpuInstance_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuInstanceById") + ret = fn(device, gpuInstanceId, byref(c_instance)) + _nvmlCheckReturn(ret) + return c_instance + +def nvmlGpuInstanceGetInfo(gpuInstance): + c_info = c_nvmlGpuInstanceInfo_t() + fn = _nvmlGetFunctionPointer("nvmlGpuInstanceGetInfo") + ret = fn(gpuInstance, byref(c_info)) + _nvmlCheckReturn(ret) + return c_info + +def nvmlGpuInstanceGetComputeInstanceProfileInfo(device, profile, engProfile, version=2): + if version == 2: + c_info = c_nvmlComputeInstanceProfileInfo_v2_t() + fn = _nvmlGetFunctionPointer("nvmlGpuInstanceGetComputeInstanceProfileInfoV") + elif version == 1: + c_info = c_nvmlComputeInstanceProfileInfo_t() + fn = _nvmlGetFunctionPointer("nvmlGpuInstanceGetComputeInstanceProfileInfo") + else: + raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND) + ret = fn(device, profile, engProfile, byref(c_info)) + _nvmlCheckReturn(ret) + return c_info + +# Define function alias for the API exposed by NVML +nvmlGpuInstanceGetComputeInstanceProfileInfoV = nvmlGpuInstanceGetComputeInstanceProfileInfo + +def nvmlGpuInstanceGetComputeInstanceRemainingCapacity(gpuInstance, profileId): + c_count = c_uint() + fn = _nvmlGetFunctionPointer("nvmlGpuInstanceGetComputeInstanceRemainingCapacity") + ret = fn(gpuInstance, profileId, byref(c_count)) + _nvmlCheckReturn(ret) + return c_count.value + +def nvmlGpuInstanceGetComputeInstancePossiblePlacements(gpuInstance, profileId, placementsRef, countRef): + fn = _nvmlGetFunctionPointer("nvmlGpuInstanceGetComputeInstancePossiblePlacements") + ret = fn(gpuInstance, profileId, placementsRef, countRef) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlGpuInstanceCreateComputeInstance(gpuInstance, profileId): + c_instance = c_nvmlComputeInstance_t() + fn = _nvmlGetFunctionPointer("nvmlGpuInstanceCreateComputeInstance") + ret = fn(gpuInstance, profileId, byref(c_instance)) + _nvmlCheckReturn(ret) + return c_instance + +def nvmlGpuInstanceCreateComputeInstanceWithPlacement(gpuInstance, profileId, placement): + c_instance = c_nvmlComputeInstance_t() + fn = _nvmlGetFunctionPointer("nvmlGpuInstanceCreateComputeInstanceWithPlacement") + ret = fn(gpuInstance, profileId, placement, byref(c_instance)) + _nvmlCheckReturn(ret) + return c_instance + +def nvmlComputeInstanceDestroy(computeInstance): + fn = _nvmlGetFunctionPointer("nvmlComputeInstanceDestroy") + ret = fn(computeInstance) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlGpuInstanceGetComputeInstances(gpuInstance, profileId, computeInstancesRef, countRef): + fn = _nvmlGetFunctionPointer("nvmlGpuInstanceGetComputeInstances") + ret = fn(gpuInstance, profileId, computeInstancesRef, countRef) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlGpuInstanceGetComputeInstanceById(gpuInstance, computeInstanceId): + c_instance = c_nvmlComputeInstance_t() + fn = _nvmlGetFunctionPointer("nvmlGpuInstanceGetComputeInstanceById") + ret = fn(gpuInstance, computeInstanceId, byref(c_instance)) + _nvmlCheckReturn(ret) + return c_instance + +def nvmlComputeInstanceGetInfo_v2(computeInstance): + c_info = c_nvmlComputeInstanceInfo_t() + fn = _nvmlGetFunctionPointer("nvmlComputeInstanceGetInfo_v2") + ret = fn(computeInstance, byref(c_info)) + _nvmlCheckReturn(ret) + return c_info + +def nvmlComputeInstanceGetInfo(computeInstance): + return nvmlComputeInstanceGetInfo_v2(computeInstance) + +def nvmlDeviceIsMigDeviceHandle(device): + c_isMigDevice = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceIsMigDeviceHandle") + ret = fn(device, byref(c_isMigDevice)) + _nvmlCheckReturn(ret) + return c_isMigDevice + +def nvmlDeviceGetGpuInstanceId(device): + c_gpuInstanceId = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuInstanceId") + ret = fn(device, byref(c_gpuInstanceId)) + _nvmlCheckReturn(ret) + return c_gpuInstanceId.value + +def nvmlDeviceGetComputeInstanceId(device): + c_computeInstanceId = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetComputeInstanceId") + ret = fn(device, byref(c_computeInstanceId)) + _nvmlCheckReturn(ret) + return c_computeInstanceId.value + +def nvmlDeviceGetMaxMigDeviceCount(device): + c_count = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxMigDeviceCount") + ret = fn(device, byref(c_count)) + _nvmlCheckReturn(ret) + return c_count.value + +def nvmlDeviceGetMigDeviceHandleByIndex(device, index): + c_index = c_uint(index) + migDevice = c_nvmlDevice_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMigDeviceHandleByIndex") + ret = fn(device, c_index, byref(migDevice)) + _nvmlCheckReturn(ret) + return migDevice + +def nvmlDeviceGetDeviceHandleFromMigDeviceHandle(migDevice): + device = c_nvmlDevice_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetDeviceHandleFromMigDeviceHandle") + ret = fn(migDevice, byref(device)) + _nvmlCheckReturn(ret) + return device + +def nvmlDeviceGetAttributes_v2(device): + c_attrs = c_nvmlDeviceAttributes() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetAttributes_v2") + ret = fn(device, byref(c_attrs)) + _nvmlCheckReturn(ret) + return c_attrs + +def nvmlDeviceGetAttributes(device): + return nvmlDeviceGetAttributes_v2(device) + +def nvmlDeviceGetRemappedRows(device): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetRemappedRows") + c_corr = c_uint() + c_unc = c_uint() + c_bpending = c_uint() + c_bfailure = c_uint() + ret = fn(device, byref(c_corr), byref(c_unc), byref(c_bpending), byref(c_bfailure)) + _nvmlCheckReturn(ret) + return (c_corr.value, c_unc.value, c_bpending.value, c_bfailure.value) + +def nvmlDeviceGetRowRemapperHistogram(device): + c_vals = c_nvmlRowRemapperHistogramValues() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetRowRemapperHistogram") + ret = fn(device, byref(c_vals)) + _nvmlCheckReturn(ret) + return c_vals + +def nvmlDeviceGetArchitecture(device): + arch = _nvmlDeviceArchitecture_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetArchitecture") + ret = fn(device, byref(arch)) + _nvmlCheckReturn(ret) + return arch.value + +def nvmlDeviceGetBusType(device): + c_busType = _nvmlBusType_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetBusType") + ret = fn(device, byref(c_busType)) + _nvmlCheckReturn(ret) + return c_busType.value + +def nvmlDeviceGetIrqNum(device): + c_irqNum = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetIrqNum") + ret = fn(device, byref(c_irqNum)) + _nvmlCheckReturn(ret) + return c_irqNum.value + +def nvmlDeviceGetNumGpuCores(device): + c_numCores = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetNumGpuCores") + ret = fn(device, byref(c_numCores)) + _nvmlCheckReturn(ret) + return c_numCores.value + +def nvmlDeviceGetPowerSource(device): + c_powerSource = _nvmlPowerSource_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerSource") + ret = fn(device, byref(c_powerSource)) + _nvmlCheckReturn(ret) + return c_powerSource.value + +def nvmlDeviceGetMemoryBusWidth(device): + c_memBusWidth = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryBusWidth") + ret = fn(device, byref(c_memBusWidth)) + _nvmlCheckReturn(ret) + return c_memBusWidth.value + +def nvmlDeviceGetPcieLinkMaxSpeed(device): + c_speed = _nvmlPcieLinkMaxSpeed_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPcieLinkMaxSpeed") + ret = fn(device, byref(c_speed)) + _nvmlCheckReturn(ret) + return c_speed.value + +def nvmlDeviceGetAdaptiveClockInfoStatus(device): + c_adaptiveClockInfoStatus = _nvmlAdaptiveClockInfoStatus_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetAdaptiveClockInfoStatus") + ret = fn(device, byref(c_adaptiveClockInfoStatus)) + _nvmlCheckReturn(ret) + return c_adaptiveClockInfoStatus.value + +def nvmlDeviceGetPcieSpeed(device): + c_speed = c_uint() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPcieSpeed") + ret = fn(device, byref(c_speed)) + _nvmlCheckReturn(ret) + return c_speed.value + +def nvmlDeviceGetDynamicPstatesInfo(device, c_dynamicpstatesinfo=c_nvmlGpuDynamicPstatesInfo_t()): + isReference = type(c_dynamicpstatesinfo) is not c_nvmlGpuDynamicPstatesInfo_t + dynamicpstatesinfoRef = c_dynamicpstatesinfo if isReference else byref(c_dynamicpstatesinfo) + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetDynamicPstatesInfo"); + ret = fn(device, dynamicpstatesinfoRef) + _nvmlCheckReturn(ret) + return NVML_SUCCESS if isReference else c_dynamicpstatesinfo + +def nvmlDeviceSetFanSpeed_v2(handle, index, speed): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetFanSpeed_v2"); + ret = fn(handle, index, speed) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceGetThermalSettings(device, sensorindex, c_thermalsettings=c_nvmlGpuThermalSettings_t()): + isReference = type(c_thermalsettings) is not c_nvmlGpuThermalSettings_t + thermalsettingsRef = c_thermalsettings if isReference else byref(c_thermalsettings) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetThermalSettings"); + ret = fn(device, sensorindex, thermalsettingsRef) + _nvmlCheckReturn(ret) + return NVML_SUCCESS if isReference else c_thermalsettings.sensor[:] + +def nvmlDeviceGetMinMaxClockOfPState(device, clockType, pstate, minClockMHz=c_uint(), maxClockMHz=c_uint()): + isReference = (type(minClockMHz) is not c_uint) or (type(maxClockMHz) is not c_uint) + minClockMHzRef = minClockMHz if isReference else byref(minClockMHz) + maxClockMHzRef = maxClockMHz if isReference else byref(maxClockMHz) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMinMaxClockOfPState"); + ret = fn(device, _nvmlClockType_t(clockType), _nvmlClockType_t(pstate), minClockMHzRef, maxClockMHzRef) + _nvmlCheckReturn(ret) + return NVML_SUCCESS if isReference else (minClockMHz.value, maxClockMHz.value) + +class c_nvmlClockOffset_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('type', _nvmlClockType_t), + ('pstate', _nvmlPstates_t), + ('clockOffsetMHz', c_int), + ('minClockOffsetMHz', c_int), + ('maxClockOffsetMHz', c_int), + ] + +nvmlClockOffset_v1 = 0x1000018 + +def nvmlDeviceGetClockOffsets(device, info): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetClockOffsets"); + ret = fn(device, info) + return NVML_SUCCESS + +def nvmlDeviceSetClockOffsets(device, info): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetClockOffsets"); + ret = fn(device, info) + return NVML_SUCCESS + +def nvmlDeviceGetSupportedPerformanceStates(device): + pstates = [] + c_count = c_uint(NVML_MAX_GPU_PERF_PSTATES) + c_size = sizeof(c_uint)*c_count.value + + # NOTE: use 'c_uint' to represent the size of the nvmlPstate_t enumeration. + pstates_array = _nvmlPstates_t * c_count.value + c_pstates = pstates_array() + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedPerformanceStates") + ret = fn(device, c_pstates, c_size) + _nvmlCheckReturn(ret) + + for value in c_pstates: + if value != NVML_PSTATE_UNKNOWN: + pstates.append(value) + + return pstates + +def nvmlDeviceGetGpcClkVfOffset(device): + offset = c_int32() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpcClkVfOffset") + ret = fn(device, byref(offset)) + _nvmlCheckReturn(ret) + return offset.value + +def nvmlDeviceSetGpcClkVfOffset(device, offset): + c_offset = c_int32(offset) + fn = _nvmlGetFunctionPointer("nvmlDeviceSetGpcClkVfOffset") + ret = fn(device, c_offset) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceGetGpcClkMinMaxVfOffset(device, minOffset=c_int(), maxOffset=c_int()): + isReference = (type(minOffset) is not c_int) or (type(maxOffset) is not c_int) + minOffsetRef = minOffset if isReference else byref(minOffset) + maxOffsetRef = maxOffset if isReference else byref(maxOffset) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpcClkMinMaxVfOffset") + ret = fn(device, minOffsetRef, maxOffsetRef) + _nvmlCheckReturn(ret) + return NVML_SUCCESS if isReference else (minOffset.value, maxOffset.value) + +def nvmlDeviceGetMemClkVfOffset(device): + offset = c_int32() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemClkVfOffset") + ret = fn(device, byref(offset)) + _nvmlCheckReturn(ret) + return offset.value + +def nvmlDeviceSetMemClkVfOffset(device, offset): + c_offset = c_int32(offset) + fn = _nvmlGetFunctionPointer("nvmlDeviceSetMemClkVfOffset") + ret = fn(device, c_offset) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceGetMemClkMinMaxVfOffset(device, minOffset=c_int(), maxOffset=c_int()): + isReference = (type(minOffset) is not c_int) or (type(maxOffset) is not c_int) + minOffsetRef = minOffset if isReference else byref(minOffset) + maxOffsetRef = maxOffset if isReference else byref(maxOffset) + + fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemClkMinMaxVfOffset") + ret = fn(device, minOffsetRef, maxOffsetRef) + _nvmlCheckReturn(ret) + return NVML_SUCCESS if isReference else (minOffset.value, maxOffset.value) + +def nvmlSystemSetConfComputeGpusReadyState(state): + c_state = c_uint(state) + fn = _nvmlGetFunctionPointer("nvmlSystemSetConfComputeGpusReadyState") + ret = fn(c_state) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlSystemGetConfComputeGpusReadyState(): + c_state = c_uint() + fn = _nvmlGetFunctionPointer("nvmlSystemGetConfComputeGpusReadyState") + ret = fn(byref(c_state)) + _nvmlCheckReturn(ret) + return c_state.value + +def nvmlSystemGetConfComputeCapabilities(): + c_ccSysCaps = c_nvmlConfComputeSystemCaps_t() + fn = _nvmlGetFunctionPointer("nvmlSystemGetConfComputeCapabilities") + ret = fn(byref(c_ccSysCaps)) + _nvmlCheckReturn(ret) + return c_ccSysCaps + +def nvmlSystemGetConfComputeState(): + c_state = c_nvmlConfComputeSystemState_t() + fn = _nvmlGetFunctionPointer("nvmlSystemGetConfComputeState") + ret = fn(byref(c_state)) + _nvmlCheckReturn(ret) + return c_state + +def nvmlSystemGetConfComputeSettings(settings): + fn = _nvmlGetFunctionPointer("nvmlSystemGetConfComputeSettings") + return fn(settings) + +def nvmlDeviceSetConfComputeUnprotectedMemSize(device, c_ccMemSize): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetConfComputeUnprotectedMemSize") + ret = fn(device, c_ccMemSize) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceGetConfComputeMemSizeInfo(device): + c_ccMemSize = c_nvmlConfComputeMemSizeInfo_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetConfComputeMemSizeInfo") + ret = fn(device, byref(c_ccMemSize)) + _nvmlCheckReturn(ret) + return c_ccMemSize + +def nvmlDeviceGetConfComputeProtectedMemoryUsage(device): + c_memory = c_nvmlMemory_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetConfComputeProtectedMemoryUsage") + ret = fn(device, byref(c_memory)) + _nvmlCheckReturn(ret) + return c_memory + +def nvmlDeviceGetConfComputeGpuCertificate(device): + c_cert = c_nvmlConfComputeGpuCertificate_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetConfComputeGpuCertificate") + ret = fn(device, byref(c_cert)) + _nvmlCheckReturn(ret) + return c_cert + +def nvmlDeviceGetConfComputeGpuAttestationReport(device, c_nonce): + c_attestReport = c_nvmlConfComputeGpuAttestationReport_t() + c_nonce_arr = (c_uint8 * len(c_nonce))(*(c_nonce)) + setattr(c_attestReport, 'nonce', c_nonce_arr) + fn = _nvmlGetFunctionPointer("nvmlDeviceGetConfComputeGpuAttestationReport") + ret = fn(device, byref(c_attestReport)) + _nvmlCheckReturn(ret) + return c_attestReport + +def nvmlSystemSetConfComputeKeyRotationThresholdInfo(max_atk_adv): + c_keyRotationThrInfo = c_nvmlConfComputeSetKeyRotationThresholdInfo_t(0) + c_keyRotationThrInfo.version = ConfComputeSetKeyRotationThresholdInfo_v1 + c_keyRotationThrInfo.maxAttackerAdvantage = max_atk_adv + fn = _nvmlGetFunctionPointer("nvmlSystemSetConfComputeKeyRotationThresholdInfo") + ret = fn(byref(c_keyRotationThrInfo)) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlSystemGetConfComputeKeyRotationThresholdInfo(): + c_keyRotationThrInfo = c_nvmlConfComputeGetKeyRotationThresholdInfo_t(0) + c_keyRotationThrInfo.version = ConfComputeGetKeyRotationThresholdInfo_v1 + fn = _nvmlGetFunctionPointer("nvmlSystemGetConfComputeKeyRotationThresholdInfo") + ret = fn(byref(c_keyRotationThrInfo)) + _nvmlCheckReturn(ret) + return c_keyRotationThrInfo + +## GPM ## +######### + +## Enums/defines + +#### GPM Metric Identifiers +NVML_GPM_METRIC_GRAPHICS_UTIL = 1 # Percentage of time any compute/graphics app was active on the GPU. 0.0 - 100.0 +NVML_GPM_METRIC_SM_UTIL = 2 # Percentage of SMs that were busy. 0.0 - 100.0 +NVML_GPM_METRIC_SM_OCCUPANCY = 3 # Percentage of warps that were active vs theoretical maximum. 0.0 - 100.0 +NVML_GPM_METRIC_INTEGER_UTIL = 4 # Percentage of time the GPU's SMs were doing integer operations. 0.0 - 100.0 +NVML_GPM_METRIC_ANY_TENSOR_UTIL = 5 # Percentage of time the GPU's SMs were doing ANY tensor operations. 0.0 - 100.0 +NVML_GPM_METRIC_DFMA_TENSOR_UTIL = 6 # Percentage of time the GPU's SMs were doing DFMA tensor operations. 0.0 - 100.0 +NVML_GPM_METRIC_HMMA_TENSOR_UTIL = 7 # Percentage of time the GPU's SMs were doing HMMA tensor operations. 0.0 - 100.0 +NVML_GPM_METRIC_IMMA_TENSOR_UTIL = 9 # Percentage of time the GPU's SMs were doing IMMA tensor operations. 0.0 - 100.0 +NVML_GPM_METRIC_DRAM_BW_UTIL = 10 # Percentage of DRAM bw used vs theoretical maximum. 0.0 - 100.0 +NVML_GPM_METRIC_FP64_UTIL = 11 # Percentage of time the GPU's SMs were doing non-tensor FP64 math. 0.0 - 100.0 +NVML_GPM_METRIC_FP32_UTIL = 12 # Percentage of time the GPU's SMs were doing non-tensor FP32 math. 0.0 - 100.0 +NVML_GPM_METRIC_FP16_UTIL = 13 # Percentage of time the GPU's SMs were doing non-tensor FP16 math. 0.0 - 100.0 +NVML_GPM_METRIC_PCIE_TX_PER_SEC = 20 # PCIe traffic from this GPU in MiB/sec +NVML_GPM_METRIC_PCIE_RX_PER_SEC = 21 # PCIe traffic to this GPU in MiB/sec +NVML_GPM_METRIC_NVDEC_0_UTIL = 30 # Percent utilization of NVDEC 0. 0.0 - 100.0 +NVML_GPM_METRIC_NVDEC_1_UTIL = 31 # Percent utilization of NVDEC 1. 0.0 - 100.0 +NVML_GPM_METRIC_NVDEC_2_UTIL = 32 # Percent utilization of NVDEC 2. 0.0 - 100.0 +NVML_GPM_METRIC_NVDEC_3_UTIL = 33 # Percent utilization of NVDEC 3. 0.0 - 100.0 +NVML_GPM_METRIC_NVDEC_4_UTIL = 34 # Percent utilization of NVDEC 4. 0.0 - 100.0 +NVML_GPM_METRIC_NVDEC_5_UTIL = 35 # Percent utilization of NVDEC 5. 0.0 - 100.0 +NVML_GPM_METRIC_NVDEC_6_UTIL = 36 # Percent utilization of NVDEC 6. 0.0 - 100.0 +NVML_GPM_METRIC_NVDEC_7_UTIL = 37 # Percent utilization of NVDEC 7. 0.0 - 100.0 +NVML_GPM_METRIC_NVJPG_0_UTIL = 40 # Percent utilization of NVJPG 0. 0.0 - 100.0 +NVML_GPM_METRIC_NVJPG_1_UTIL = 41 # Percent utilization of NVJPG 1. 0.0 - 100.0 +NVML_GPM_METRIC_NVJPG_2_UTIL = 42 # Percent utilization of NVJPG 2. 0.0 - 100.0 +NVML_GPM_METRIC_NVJPG_3_UTIL = 43 # Percent utilization of NVJPG 3. 0.0 - 100.0 +NVML_GPM_METRIC_NVJPG_4_UTIL = 44 # Percent utilization of NVJPG 4. 0.0 - 100.0 +NVML_GPM_METRIC_NVJPG_5_UTIL = 45 # Percent utilization of NVJPG 5. 0.0 - 100.0 +NVML_GPM_METRIC_NVJPG_6_UTIL = 46 # Percent utilization of NVJPG 6. 0.0 - 100.0 +NVML_GPM_METRIC_NVJPG_7_UTIL = 47 # Percent utilization of NVJPG 7. 0.0 - 100.0 +NVML_GPM_METRIC_NVOFA_0_UTIL = 50 # Percent utilization of NVOFA 0. 0.0 - 100.0 +NVML_GPM_METRIC_NVOFA_1_UTIL = 51 # Percent utilization of NVOFA 1. 0.0 - 100.0 +NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC = 60 # NvLink read bandwidth for all links in MiB/sec +NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC = 61 # NvLink write bandwidth for all links in MiB/sec +NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC = 62 # NvLink read bandwidth for link 0 in MiB/sec +NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC = 63 # NvLink write bandwidth for link 0 in MiB/sec +NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC = 64 # NvLink read bandwidth for link 1 in MiB/sec +NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC = 65 # NvLink write bandwidth for link 1 in MiB/sec +NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC = 66 # NvLink read bandwidth for link 2 in MiB/sec +NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC = 67 # NvLink write bandwidth for link 2 in MiB/sec +NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC = 68 # NvLink read bandwidth for link 3 in MiB/sec +NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC = 69 # NvLink write bandwidth for link 3 in MiB/sec +NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC = 70 # NvLink read bandwidth for link 4 in MiB/sec +NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC = 71 # NvLink write bandwidth for link 4 in MiB/sec +NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC = 72 # NvLink read bandwidth for link 5 in MiB/sec +NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC = 73 # NvLink write bandwidth for link 5 in MiB/sec +NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC = 74 # NvLink read bandwidth for link 6 in MiB/sec +NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC = 75 # NvLink write bandwidth for link 6 in MiB/sec +NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC = 76 # NvLink read bandwidth for link 7 in MiB/sec +NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC = 77 # NvLink write bandwidth for link 7 in MiB/sec +NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC = 78 # NvLink read bandwidth for link 8 in MiB/sec +NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC = 79 # NvLink write bandwidth for link 8 in MiB/sec +NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC = 80 # NvLink read bandwidth for link 9 in MiB/sec +NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC = 81 # NvLink write bandwidth for link 9 in MiB/sec +NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC = 82 # NvLink read bandwidth for link 10 in MiB/sec +NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC = 83 # NvLink write bandwidth for link 10 in MiB/sec +NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC = 84 # NvLink read bandwidth for link 11 in MiB/sec +NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC = 85 # NvLink write bandwidth for link 11 in MiB/sec +NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC = 86 # NvLink read bandwidth for link 12 in MiB/sec +NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC = 87 # NvLink write bandwidth for link 12 in MiB/sec +NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC = 88 # NvLink read bandwidth for link 13 in MiB/sec +NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC = 89 # NvLink write bandwidth for link 13 in MiB/sec +NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC = 90 # NvLink read bandwidth for link 14 in MiB/sec +NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC = 91 # NvLink write bandwidth for link 14 in MiB/sec +NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC = 92 # NvLink read bandwidth for link 15 in MiB/sec +NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC = 93 # NvLink write bandwidth for link 15 in MiB/sec +NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC = 94 # NvLink read bandwidth for link 16 in MiB/sec +NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC = 95 # NvLink write bandwidth for link 16 in MiB/sec +NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC = 96 # NvLink read bandwidth for link 17 in MiB/sec +NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC = 97 # NvLink write bandwidth for link 17 in MiB/sec +NVML_GPM_METRIC_MAX = 98 + +## Structs + +class c_nvmlUnitInfo_t(_PrintableStructure): + _fields_ = [ + ('name', c_char * 96), + ('id', c_char * 96), + ('serial', c_char * 96), + ('firmwareVersion', c_char * 96), + ] + +class struct_c_nvmlGpmSample_t(Structure): + pass # opaque handle +c_nvmlGpmSample_t = POINTER(struct_c_nvmlGpmSample_t) + +class c_metricInfo_t(Structure): + _fields_ = [ + ("shortName", c_char_p), + ("longName", c_char_p), + ("unit", c_char_p), + ] + +class c_nvmlGpmMetric_t(_PrintableStructure): + _fields_ = [ + ('metricId', c_uint), + ('nvmlReturn', _nvmlReturn_t), + ('value', c_double), + ('metricInfo', c_metricInfo_t) + ] + +class c_nvmlGpmMetricsGet_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('numMetrics', c_uint), + ('sample1', c_nvmlGpmSample_t), + ('sample2', c_nvmlGpmSample_t), + ('metrics', c_nvmlGpmMetric_t * NVML_GPM_METRIC_MAX) + ] + +NVML_GPM_METRICS_GET_VERSION = 1 + +class c_nvmlGpmSupport_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('isSupportedDevice', c_uint), + ] + +NVML_GPM_SUPPORT_VERSION = 1 + +## Functions + +def nvmlGpmMetricsGet(metricsGet): + fn = _nvmlGetFunctionPointer("nvmlGpmMetricsGet") + ret = fn(byref(metricsGet)) + _nvmlCheckReturn(ret) + return metricsGet + +def nvmlGpmSampleFree(gpmSample): + fn = _nvmlGetFunctionPointer("nvmlGpmSampleFree") + ret = fn(gpmSample) + _nvmlCheckReturn(ret) + return + +def nvmlGpmSampleAlloc(): + gpmSample = c_nvmlGpmSample_t() + fn = _nvmlGetFunctionPointer("nvmlGpmSampleAlloc") + ret = fn(byref(gpmSample)) + _nvmlCheckReturn(ret) + return gpmSample + +def nvmlGpmSampleGet(device, gpmSample): + fn = _nvmlGetFunctionPointer("nvmlGpmSampleGet") + ret = fn(device, gpmSample) + _nvmlCheckReturn(ret) + return gpmSample + +def nvmlGpmMigSampleGet(device, gpuInstanceId, gpmSample): + fn = _nvmlGetFunctionPointer("nvmlGpmMigSampleGet") + ret = fn(device, gpuInstanceId, gpmSample) + _nvmlCheckReturn(ret) + return gpmSample + +def nvmlGpmQueryDeviceSupport(device): + gpmSupport = c_nvmlGpmSupport_t() + gpmSupport.version = NVML_GPM_SUPPORT_VERSION + fn = _nvmlGetFunctionPointer("nvmlGpmQueryDeviceSupport") + ret = fn(device, byref(gpmSupport)) + _nvmlCheckReturn(ret) + return gpmSupport + +def nvmlGpmSetStreamingEnabled(device, state): + c_state = c_uint(state) + fn = _nvmlGetFunctionPointer("nvmlGpmSetStreamingEnabled") + ret = fn(device, c_state) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlGpmQueryIfStreamingEnabled(device): + c_state = c_uint() + fn = _nvmlGetFunctionPointer("nvmlGpmQueryIfStreamingEnabled") + ret = fn(device, byref(c_state)) + _nvmlCheckReturn(ret) + return c_state.value + +# Low Power Structure and Function + +NVML_NVLINK_POWER_STATE_HIGH_SPEED = 0x0 +NVML_NVLINK_POWER_STATE_LOW = 0x1 + +NVML_NVLINK_LOW_POWER_THRESHOLD_MIN = 0x1 +NVML_NVLINK_LOW_POWER_THRESHOLD_MAX = 0x1FFF +NVML_NVLINK_LOW_POWER_THRESHOLD_RESET = 0xFFFFFFFF +NVML_NVLINK_LOW_POWER_THRESHOLD_DEFAULT = NVML_NVLINK_LOW_POWER_THRESHOLD_RESET + +class c_nvmlNvLinkPowerThres_t(Structure): + _fields_ = [ + ("lowPwrThreshold", c_uint), + ] + +def nvmlDeviceSetNvLinkDeviceLowPowerThreshold(device, l1threshold): + c_info = c_nvmlNvLinkPowerThres_t() + c_info.lowPwrThreshold = l1threshold + fn = _nvmlGetFunctionPointer("nvmlDeviceSetNvLinkDeviceLowPowerThreshold") + ret = fn(device, byref(c_info)) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +NVML_GPU_FABRIC_UUID_LEN = 16 + +_nvmlGpuFabricState_t = c_uint +NVML_GPU_FABRIC_STATE_NOT_SUPPORTED = 0 +NVML_GPU_FABRIC_STATE_NOT_STARTED = 1 +NVML_GPU_FABRIC_STATE_IN_PROGRESS = 2 +NVML_GPU_FABRIC_STATE_COMPLETED = 3 + +class c_nvmlGpuFabricInfo_t(_PrintableStructure): + _fields_ = [ + ("clusterUuid", c_char * NVML_DEVICE_UUID_BUFFER_SIZE), + ("status", _nvmlReturn_t), + ("cliqueId", c_uint32), + ("state", _nvmlGpuFabricState_t) + ] + +NVML_GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_NOT_SUPPORTED = 0 +NVML_GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_TRUE = 1 +NVML_GPU_FABRIC_HEALTH_MASK_DEGRADED_BW_FALSE = 2 +NVML_GPU_FABRIC_HEALTH_MASK_SHIFT_DEGRADED_BW = 0 +NVML_GPU_FABRIC_HEALTH_MASK_WIDTH_DEGRADED_BW = 0x11 + +NVML_GPU_FABRIC_HEALTH_MASK_ROUTE_RECOVERY_NOT_SUPPORTED = 0 +NVML_GPU_FABRIC_HEALTH_MASK_ROUTE_RECOVERY_TRUE = 1 +NVML_GPU_FABRIC_HEALTH_MASK_ROUTE_RECOVERY_FALSE = 2 +NVML_GPU_FABRIC_HEALTH_MASK_SHIFT_ROUTE_RECOVERY = 2 +NVML_GPU_FABRIC_HEALTH_MASK_WIDTH_ROUTE_RECOVERY = 0x11 + +NVML_GPU_FABRIC_HEALTH_MASK_ROUTE_UNHEALTHY_NOT_SUPPORTED = 0 +NVML_GPU_FABRIC_HEALTH_MASK_ROUTE_UNHEALTHY_TRUE = 1 +NVML_GPU_FABRIC_HEALTH_MASK_ROUTE_UNHEALTHY_FALSE = 2 +NVML_GPU_FABRIC_HEALTH_MASK_SHIFT_ROUTE_UNHEALTHY = 4 +NVML_GPU_FABRIC_HEALTH_MASK_WIDTH_ROUTE_UNHEALTHY = 0x11 + +NVML_GPU_FABRIC_HEALTH_MASK_ACCESS_TIMEOUT_RECOVERY_NOT_SUPPORTED = 0 +NVML_GPU_FABRIC_HEALTH_MASK_ACCESS_TIMEOUT_RECOVERY_TRUE = 1 +NVML_GPU_FABRIC_HEALTH_MASK_ACCESS_TIMEOUT_RECOVERY_FALSE = 2 +NVML_GPU_FABRIC_HEALTH_MASK_SHIFT_ACCESS_TIMEOUT_RECOVERY = 6 +NVML_GPU_FABRIC_HEALTH_MASK_WIDTH_ACCESS_TIMEOUT_RECOVERY = 0x11 + +nvmlGpuFabricInfo_v2 = 0x02000024 + +class c_nvmlGpuFabricInfoV_t(_PrintableStructure): + _fields_ = [ + ("version", c_uint), + ("clusterUuid", c_char * NVML_GPU_FABRIC_UUID_LEN), + ("status", _nvmlReturn_t), + ("cliqueId", c_uint32), + ("state", _nvmlGpuFabricState_t), + ("healthMask", c_uint32) + ] + + def __init__(self): + super(c_nvmlGpuFabricInfoV_t, self).__init__(version=nvmlGpuFabricInfo_v2) + +def nvmlDeviceGetGpuFabricInfo(device, gpuFabricInfo): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuFabricInfo"); + ret = fn(device, gpuFabricInfo) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceGetGpuFabricInfoV(device, gpuFabricInfo): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuFabricInfoV"); + ret = fn(device, gpuFabricInfo) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +###################### +## Enums/defines +#### NVML GPU NVLINK BW MODE +NVML_GPU_NVLINK_BW_MODE_FULL = 0x0 +NVML_GPU_NVLINK_BW_MODE_OFF = 0x1 +NVML_GPU_NVLINK_BW_MODE_MIN = 0x2 +NVML_GPU_NVLINK_BW_MODE_HALF = 0x3 +NVML_GPU_NVLINK_BW_MODE_3QUARTER = 0x4 +NVML_GPU_NVLINK_BW_MODE_COUNT = 0x5 + +def nvmlSystemSetNvlinkBwMode(mode): + fn = _nvmlGetFunctionPointer("nvmlSystemSetNvlinkBwMode") + ret = fn(mode) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlSystemGetNvlinkBwMode(): + mode = c_uint() + fn = _nvmlGetFunctionPointer("nvmlSystemGetNvlinkBwMode") + ret = fn(byref(mode)) + _nvmlCheckReturn(ret) + return mode.value + +_nvmlPowerScopeType_t = c_uint +NVML_POWER_SCOPE_GPU = 0 +NVML_POWER_SCOPE_MODULE = 1 +NVML_POWER_SCOPE_MEMORY = 2 + +class c_nvmlPowerValue_v2_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('powerScope', _nvmlPowerScopeType_t), + ('powerValueMw', c_uint), + ] + _fmt_ = {'<default>': "%d B"} + +nvmlPowerValue_v2 = 0x0200000C + +def nvmlDeviceSetPowerManagementLimit_v2(device, powerScope, powerLimit, version=nvmlPowerValue_v2): + c_powerScope = _nvmlPowerScopeType_t(powerScope) + c_powerValue = c_nvmlPowerValue_v2_t() + c_powerValue.version = c_uint(version) + c_powerValue.powerScope = c_powerScope + c_powerValue.powerValueMw = c_uint(powerLimit) + fn = _nvmlGetFunctionPointer("nvmlDeviceSetPowerManagementLimit_v2") + ret = fn(device, byref(c_powerValue)) + return NVML_SUCCESS + +class c_nvmlEccSramErrorStatus_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('aggregateUncParity', c_ulonglong), + ('aggregateUncSecDed', c_ulonglong), + ('aggregateCor', c_ulonglong), + ('volatileUncParity', c_ulonglong), + ('volatileUncSecDed', c_ulonglong), + ('volatileCor', c_ulonglong), + ('aggregateUncBucketL2', c_ulonglong), + ('aggregateUncBucketSm', c_ulonglong), + ('aggregateUncBucketPcie', c_ulonglong), + ('aggregateUncBucketMcu', c_ulonglong), + ('aggregateUncBucketOther', c_ulonglong), + ('bThresholdExceeded', c_uint) + ] + + def __init__(self): + super(c_nvmlEccSramErrorStatus_v1_t, self).__init__(version=nvmlEccSramErrorStatus_v1) + +nvmlEccSramErrorStatus_v1 = 0x1000068 +def nvmlDeviceGetSramEccErrorStatus(device, status): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetSramEccErrorStatus") + ret = fn(device, status) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +NVML_DEV_CAP_EGM = (1 << 0) +nvmlDeviceCapabilities_v1 = 0x1000008 + +class c_nvmlDeviceCapabilities_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('capMask', c_uint), + ] + + def __init__(self): + super(c_nvmlDeviceCapabilities_v1_t, self).__init__(version=nvmlDeviceCapabilities_v1) + + +def nvmlDeviceGetCapabilities(device, caps): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetCapabilities") + return fn(device, caps) + +class c_nvmlPlatformInfo_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('ibGuid', c_char * 16), + ('rackGuid', c_char * 16), + ('chassisPhysicalSlotNumber', c_char), + ('computeSlotIndex', c_char), + ('nodeIndex', c_char), + ('peerType', c_char), + ('moduleId', c_char) + ] + + def __init__(self): + super(c_nvmlPlatformInfo_v1_t, self).__init__(version=nvmlPlatformInfo_v1) + +nvmlPlatformInfo_v1 = 0x100002c +def nvmlDeviceGetPlatformInfo(device, platformInfo): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetPlatformInfo") + ret = fn(device, platformInfo) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +class c_nvmlMask255_t(_PrintableStructure): + _fields_ = [ + ('mask', c_uint * 8), + ] + +NVML_WORKLOAD_POWER_MAX_PROFILES = 255 +NVML_POWER_PROFILE_MAX_P = 0 +NVML_POWER_PROFILE_MAX_Q = 1 +NVML_POWER_PROFILE_COMPUTE = 2 +NVML_POWER_PROFILE_MEMORY_BOUND = 3 +NVML_POWER_PROFILE_NETWORK = 4 +NVML_POWER_PROFILE_BALANCED = 5 +NVML_POWER_PROFILE_LLM_INFERENCE = 6 +NVML_POWER_PROFILE_LLM_TRAINING = 7 +NVML_POWER_PROFILE_RBM = 8 +NVML_POWER_PROFILE_DCPCIE = 9 +NVML_POWER_PROFILE_HMMA_SPARSE = 10 +NVML_POWER_PROFILE_HMMA_DENSE = 11 +NVML_POWER_PROFILE_SYNC_BALANCED = 12 +NVML_POWER_PROFILE_HPC = 13 +NVML_POWER_PROFILE_MIG = 14 +NVML_POWER_PROFILE_MAX = 15 + +nvmlWorkloadPowerProfileInfo_v1 = 0x100002c +class c_nvmlWorkloadPowerProfileInfo_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('profileId', c_uint), + ('priority', c_uint), + ('conflictingmask', c_nvmlMask255_t) + ] + + def __init__(self): + super(c_nvmlWorkloadPowerProfileInfo_v1_t, self).__init__(version=nvmlWorkloadPowerProfileInfo_v1) + +nvmlWorkloadPowerProfileProfilesInfo_v1 = 0x1002bf8 +class c_nvmlWorkloadPowerProfileProfilesInfo_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('perfProfilesMask', c_nvmlMask255_t), + ('perfProfile', c_nvmlWorkloadPowerProfileInfo_v1_t * NVML_WORKLOAD_POWER_MAX_PROFILES) + ] + + def __init__(self): + super(c_nvmlWorkloadPowerProfileProfilesInfo_v1_t, self).__init__(version=nvmlWorkloadPowerProfileProfilesInfo_v1) + +nvmlWorkloadPowerProfileCurrentProfiles_v1 = 0x1000064 +class c_nvmlWorkloadPowerProfileCurrentProfiles_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('perfProfilesMask', c_nvmlMask255_t), + ('requestedProfilesMask', c_nvmlMask255_t), + ('enforcedProfilesMask', c_nvmlMask255_t) + ] + + def __init__(self): + super(c_nvmlWorkloadPowerProfileCurrentProfiles_v1_t, self).__init__(version=nvmlWorkloadPowerProfileCurrentProfiles_v1) + +nvmlWorkloadPowerProfileRequestedProfiles_v1 = 0x1000024 +class c_nvmlWorkloadPowerProfileRequestedProfiles_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('requestedProfilesMask', c_nvmlMask255_t), + ] + + def __init__(self): + super(c_nvmlWorkloadPowerProfileRequestedProfiles_v1_t, self).__init__(version=nvmlWorkloadPowerProfileRequestedProfiles_v1) + +def nvmlDeviceWorkloadPowerProfileGetProfilesInfo(device, profilesInfo): + fn = _nvmlGetFunctionPointer("nvmlDeviceWorkloadPowerProfileGetProfilesInfo") + ret = fn(device, profilesInfo) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceWorkloadPowerProfileGetCurrentProfiles(device, currentProfiles): + fn = _nvmlGetFunctionPointer("nvmlDeviceWorkloadPowerProfileGetCurrentProfiles") + ret = fn(device, currentProfiles) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceWorkloadPowerProfileSetRequestedProfiles(device, requestedProfiles): + fn = _nvmlGetFunctionPointer("nvmlDeviceWorkloadPowerProfileSetRequestedProfiles") + ret = fn(device, requestedProfiles) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(device, requestedProfiles): + fn = _nvmlGetFunctionPointer("nvmlDeviceWorkloadPowerProfileClearRequestedProfiles") + ret = fn(device, requestedProfiles) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceGetNvlinkSupportedBwModes(device, supportedBwModes): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvlinkSupportedBwModes") + ret = fn(device, supportedBwModes) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceGetNvlinkBwMode(device, getBwMode): + fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvlinkBwMode") + ret = fn(device, getBwMode) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +def nvmlDeviceSetNvlinkBwMode(device, setBwMode): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetNvlinkBwMode") + ret = fn(device, setBwMode) + _nvmlCheckReturn(ret) + return NVML_SUCCESS + +nvmlDramEncryptionInfo_v1 = 0x01000008 + +class c_nvmlDramEncryptionInfo_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('encryptionState', _nvmlEnableState_t), + ] + + def __init__(self): + super(c_nvmlDramEncryptionInfo_t, self).__init__(version=nvmlDramEncryptionInfo_v1) + +def nvmlDeviceGetDramEncryptionMode(handle): + c_currState = c_nvmlDramEncryptionInfo_t() + c_pendingState = c_nvmlDramEncryptionInfo_t() + fn = _nvmlGetFunctionPointer("nvmlDeviceGetDramEncryptionMode") + ret = fn(handle, byref(c_currState), byref(c_pendingState)) + _nvmlCheckReturn(ret) + return [c_currState.encryptionState, c_pendingState.encryptionState] + +# added to API +def nvmlDeviceGetCurrentDramEncryptionMode(handle): + return nvmlDeviceGetDramEncryptionMode(handle)[0] + +# added to API +def nvmlDeviceGetPendingDramEncryptionMode(handle): + return nvmlDeviceGetDramEncryptionMode(handle)[1] + +def nvmlDeviceSetDramEncryptionMode(handle, mode): + fn = _nvmlGetFunctionPointer("nvmlDeviceSetDramEncryptionMode") + c_dramEncryptionMode = c_nvmlDramEncryptionInfo_t() + c_dramEncryptionMode.encryptionState = mode; + ret = fn(handle, byref(c_dramEncryptionMode)) + _nvmlCheckReturn(ret) + return None + +# Power Smoothing defines +NVML_POWER_SMOOTHING_MAX_NUM_PROFILES = 5 +NVML_POWER_SMOOTHING_ADMIN_OVERRIDE_NOT_SET = 0xFFFFFFFF +NVML_POWER_SMOOTHING_PROFILE_PARAM_PERCENT_TMP_FLOOR = 0 +NVML_POWER_SMOOTHING_PROFILE_PARAM_RAMP_UP_RATE = 1 +NVML_POWER_SMOOTHING_PROFILE_PARAM_RAMP_DOWN_RATE = 2 +NVML_POWER_SMOOTHING_PROFILE_PARAM_RAMP_DOWN_HYSTERESIS = 3 + +nvmlPowerSmoothingState_v1=0x1000008 +class c_nvmlPowerSmoothingState_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('state', c_uint), + ] + + def __init__(self): + super(c_nvmlPowerSmoothingState_v1_t, self).__init__(version=nvmlPowerSmoothingState_v1) + +nvmlPowerSmoothingProfile_v1=0x1000018 +class c_nvmlPowerSmoothingProfile_v1_t(_PrintableStructure): + _fields_ = [ + ('version', c_uint), + ('profileId', c_uint), + ('paramId', c_uint), + ('value', c_double), + ] + + def __init__(self): + super(c_nvmlPowerSmoothingProfile_v1_t, self).__init__(version=nvmlPowerSmoothingProfile_v1) + +def nvmlDevicePowerSmoothingActivatePresetProfile(device, profile): + fn = _nvmlGetFunctionPointer("nvmlDevicePowerSmoothingActivatePresetProfile") + ret = fn(device, profile) + _nvmlCheckReturn(ret) + +def nvmlDevicePowerSmoothingUpdatePresetProfileParam(device, profile): + fn = _nvmlGetFunctionPointer("nvmlDevicePowerSmoothingUpdatePresetProfileParam") + ret = fn(device, profile) + _nvmlCheckReturn(ret) + +def nvmlDevicePowerSmoothingSetState(device, state): + fn = _nvmlGetFunctionPointer("nvmlDevicePowerSmoothingSetState") + ret = fn(device, state) + _nvmlCheckReturn(ret) + diff --git a/parrot/lib/python3.10/site-packages/ruff/__init__.py b/parrot/lib/python3.10/site-packages/ruff/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/torch-2.4.1.dist-info/INSTALLER b/parrot/lib/python3.10/site-packages/torch-2.4.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch-2.4.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/parrot/lib/python3.10/site-packages/torch-2.4.1.dist-info/METADATA b/parrot/lib/python3.10/site-packages/torch-2.4.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..b3223a6a1ebbe5af2fa1a416556b85eb0729a690 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch-2.4.1.dist-info/METADATA @@ -0,0 +1,536 @@ +Metadata-Version: 2.1 +Name: torch +Version: 2.4.1 +Summary: Tensors and Dynamic neural networks in Python with strong GPU acceleration +Home-page: https://pytorch.org/ +Author: PyTorch Team +Author-email: packages@pytorch.org +License: BSD-3 +Download-URL: https://github.com/pytorch/pytorch/tags +Keywords: pytorch,machine learning +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Requires-Python: >=3.8.0 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: NOTICE +Requires-Dist: filelock +Requires-Dist: typing-extensions (>=4.8.0) +Requires-Dist: sympy +Requires-Dist: networkx +Requires-Dist: jinja2 +Requires-Dist: fsspec +Requires-Dist: nvidia-cuda-nvrtc-cu12 (==12.1.105) ; platform_system == "Linux" and platform_machine == "x86_64" +Requires-Dist: nvidia-cuda-runtime-cu12 (==12.1.105) ; platform_system == "Linux" and platform_machine == "x86_64" +Requires-Dist: nvidia-cuda-cupti-cu12 (==12.1.105) ; platform_system == "Linux" and platform_machine == "x86_64" +Requires-Dist: nvidia-cudnn-cu12 (==9.1.0.70) ; platform_system == "Linux" and platform_machine == "x86_64" +Requires-Dist: nvidia-cublas-cu12 (==12.1.3.1) ; platform_system == "Linux" and platform_machine == "x86_64" +Requires-Dist: nvidia-cufft-cu12 (==11.0.2.54) ; platform_system == "Linux" and platform_machine == "x86_64" +Requires-Dist: nvidia-curand-cu12 (==10.3.2.106) ; platform_system == "Linux" and platform_machine == "x86_64" +Requires-Dist: nvidia-cusolver-cu12 (==11.4.5.107) ; platform_system == "Linux" and platform_machine == "x86_64" +Requires-Dist: nvidia-cusparse-cu12 (==12.1.0.106) ; platform_system == "Linux" and platform_machine == "x86_64" +Requires-Dist: nvidia-nccl-cu12 (==2.20.5) ; platform_system == "Linux" and platform_machine == "x86_64" +Requires-Dist: nvidia-nvtx-cu12 (==12.1.105) ; platform_system == "Linux" and platform_machine == "x86_64" +Requires-Dist: triton (==3.0.0) ; platform_system == "Linux" and platform_machine == "x86_64" and python_version < "3.13" +Provides-Extra: opt-einsum +Requires-Dist: opt-einsum (>=3.3) ; extra == 'opt-einsum' +Provides-Extra: optree +Requires-Dist: optree (>=0.11.0) ; extra == 'optree' + +![PyTorch Logo](https://github.com/pytorch/pytorch/raw/main/docs/source/_static/img/pytorch-logo-dark.png) + +-------------------------------------------------------------------------------- + +PyTorch is a Python package that provides two high-level features: +- Tensor computation (like NumPy) with strong GPU acceleration +- Deep neural networks built on a tape-based autograd system + +You can reuse your favorite Python packages such as NumPy, SciPy, and Cython to extend PyTorch when needed. + +Our trunk health (Continuous Integration signals) can be found at [hud.pytorch.org](https://hud.pytorch.org/ci/pytorch/pytorch/main). + +<!-- toc --> + +- [More About PyTorch](#more-about-pytorch) + - [A GPU-Ready Tensor Library](#a-gpu-ready-tensor-library) + - [Dynamic Neural Networks: Tape-Based Autograd](#dynamic-neural-networks-tape-based-autograd) + - [Python First](#python-first) + - [Imperative Experiences](#imperative-experiences) + - [Fast and Lean](#fast-and-lean) + - [Extensions Without Pain](#extensions-without-pain) +- [Installation](#installation) + - [Binaries](#binaries) + - [NVIDIA Jetson Platforms](#nvidia-jetson-platforms) + - [From Source](#from-source) + - [Prerequisites](#prerequisites) + - [NVIDIA CUDA Support](#nvidia-cuda-support) + - [AMD ROCm Support](#amd-rocm-support) + - [Intel GPU Support](#intel-gpu-support) + - [Install Dependencies](#install-dependencies) + - [Get the PyTorch Source](#get-the-pytorch-source) + - [Install PyTorch](#install-pytorch) + - [Adjust Build Options (Optional)](#adjust-build-options-optional) + - [Docker Image](#docker-image) + - [Using pre-built images](#using-pre-built-images) + - [Building the image yourself](#building-the-image-yourself) + - [Building the Documentation](#building-the-documentation) + - [Previous Versions](#previous-versions) +- [Getting Started](#getting-started) +- [Resources](#resources) +- [Communication](#communication) +- [Releases and Contributing](#releases-and-contributing) +- [The Team](#the-team) +- [License](#license) + +<!-- tocstop --> + +## More About PyTorch + +[Learn the basics of PyTorch](https://pytorch.org/tutorials/beginner/basics/intro.html) + +At a granular level, PyTorch is a library that consists of the following components: + +| Component | Description | +| ---- | --- | +| [**torch**](https://pytorch.org/docs/stable/torch.html) | A Tensor library like NumPy, with strong GPU support | +| [**torch.autograd**](https://pytorch.org/docs/stable/autograd.html) | A tape-based automatic differentiation library that supports all differentiable Tensor operations in torch | +| [**torch.jit**](https://pytorch.org/docs/stable/jit.html) | A compilation stack (TorchScript) to create serializable and optimizable models from PyTorch code | +| [**torch.nn**](https://pytorch.org/docs/stable/nn.html) | A neural networks library deeply integrated with autograd designed for maximum flexibility | +| [**torch.multiprocessing**](https://pytorch.org/docs/stable/multiprocessing.html) | Python multiprocessing, but with magical memory sharing of torch Tensors across processes. Useful for data loading and Hogwild training | +| [**torch.utils**](https://pytorch.org/docs/stable/data.html) | DataLoader and other utility functions for convenience | + +Usually, PyTorch is used either as: + +- A replacement for NumPy to use the power of GPUs. +- A deep learning research platform that provides maximum flexibility and speed. + +Elaborating Further: + +### A GPU-Ready Tensor Library + +If you use NumPy, then you have used Tensors (a.k.a. ndarray). + +![Tensor illustration](./docs/source/_static/img/tensor_illustration.png) + +PyTorch provides Tensors that can live either on the CPU or the GPU and accelerates the +computation by a huge amount. + +We provide a wide variety of tensor routines to accelerate and fit your scientific computation needs +such as slicing, indexing, mathematical operations, linear algebra, reductions. +And they are fast! + +### Dynamic Neural Networks: Tape-Based Autograd + +PyTorch has a unique way of building neural networks: using and replaying a tape recorder. + +Most frameworks such as TensorFlow, Theano, Caffe, and CNTK have a static view of the world. +One has to build a neural network and reuse the same structure again and again. +Changing the way the network behaves means that one has to start from scratch. + +With PyTorch, we use a technique called reverse-mode auto-differentiation, which allows you to +change the way your network behaves arbitrarily with zero lag or overhead. Our inspiration comes +from several research papers on this topic, as well as current and past work such as +[torch-autograd](https://github.com/twitter/torch-autograd), +[autograd](https://github.com/HIPS/autograd), +[Chainer](https://chainer.org), etc. + +While this technique is not unique to PyTorch, it's one of the fastest implementations of it to date. +You get the best of speed and flexibility for your crazy research. + +![Dynamic graph](https://github.com/pytorch/pytorch/raw/main/docs/source/_static/img/dynamic_graph.gif) + +### Python First + +PyTorch is not a Python binding into a monolithic C++ framework. +It is built to be deeply integrated into Python. +You can use it naturally like you would use [NumPy](https://www.numpy.org/) / [SciPy](https://www.scipy.org/) / [scikit-learn](https://scikit-learn.org) etc. +You can write your new neural network layers in Python itself, using your favorite libraries +and use packages such as [Cython](https://cython.org/) and [Numba](http://numba.pydata.org/). +Our goal is to not reinvent the wheel where appropriate. + +### Imperative Experiences + +PyTorch is designed to be intuitive, linear in thought, and easy to use. +When you execute a line of code, it gets executed. There isn't an asynchronous view of the world. +When you drop into a debugger or receive error messages and stack traces, understanding them is straightforward. +The stack trace points to exactly where your code was defined. +We hope you never spend hours debugging your code because of bad stack traces or asynchronous and opaque execution engines. + +### Fast and Lean + +PyTorch has minimal framework overhead. We integrate acceleration libraries +such as [Intel MKL](https://software.intel.com/mkl) and NVIDIA ([cuDNN](https://developer.nvidia.com/cudnn), [NCCL](https://developer.nvidia.com/nccl)) to maximize speed. +At the core, its CPU and GPU Tensor and neural network backends +are mature and have been tested for years. + +Hence, PyTorch is quite fast — whether you run small or large neural networks. + +The memory usage in PyTorch is extremely efficient compared to Torch or some of the alternatives. +We've written custom memory allocators for the GPU to make sure that +your deep learning models are maximally memory efficient. +This enables you to train bigger deep learning models than before. + +### Extensions Without Pain + +Writing new neural network modules, or interfacing with PyTorch's Tensor API was designed to be straightforward +and with minimal abstractions. + +You can write new neural network layers in Python using the torch API +[or your favorite NumPy-based libraries such as SciPy](https://pytorch.org/tutorials/advanced/numpy_extensions_tutorial.html). + +If you want to write your layers in C/C++, we provide a convenient extension API that is efficient and with minimal boilerplate. +No wrapper code needs to be written. You can see [a tutorial here](https://pytorch.org/tutorials/advanced/cpp_extension.html) and [an example here](https://github.com/pytorch/extension-cpp). + + +## Installation + +### Binaries +Commands to install binaries via Conda or pip wheels are on our website: [https://pytorch.org/get-started/locally/](https://pytorch.org/get-started/locally/) + + +#### NVIDIA Jetson Platforms + +Python wheels for NVIDIA's Jetson Nano, Jetson TX1/TX2, Jetson Xavier NX/AGX, and Jetson AGX Orin are provided [here](https://forums.developer.nvidia.com/t/pytorch-for-jetson-version-1-10-now-available/72048) and the L4T container is published [here](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-pytorch) + +They require JetPack 4.2 and above, and [@dusty-nv](https://github.com/dusty-nv) and [@ptrblck](https://github.com/ptrblck) are maintaining them. + + +### From Source + +#### Prerequisites +If you are installing from source, you will need: +- Python 3.8 or later (for Linux, Python 3.8.1+ is needed) +- A compiler that fully supports C++17, such as clang or gcc (gcc 9.4.0 or newer is required) + +We highly recommend installing an [Anaconda](https://www.anaconda.com/download) environment. You will get a high-quality BLAS library (MKL) and you get controlled dependency versions regardless of your Linux distro. + +##### NVIDIA CUDA Support +If you want to compile with CUDA support, [select a supported version of CUDA from our support matrix](https://pytorch.org/get-started/locally/), then install the following: +- [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads) +- [NVIDIA cuDNN](https://developer.nvidia.com/cudnn) v8.5 or above +- [Compiler](https://gist.github.com/ax3l/9489132) compatible with CUDA + +Note: You could refer to the [cuDNN Support Matrix](https://docs.nvidia.com/deeplearning/cudnn/reference/support-matrix.html) for cuDNN versions with the various supported CUDA, CUDA driver and NVIDIA hardware + +If you want to disable CUDA support, export the environment variable `USE_CUDA=0`. +Other potentially useful environment variables may be found in `setup.py`. + +If you are building for NVIDIA's Jetson platforms (Jetson Nano, TX1, TX2, AGX Xavier), Instructions to install PyTorch for Jetson Nano are [available here](https://devtalk.nvidia.com/default/topic/1049071/jetson-nano/pytorch-for-jetson-nano/) + +##### AMD ROCm Support +If you want to compile with ROCm support, install +- [AMD ROCm](https://rocm.docs.amd.com/en/latest/deploy/linux/quick_start.html) 4.0 and above installation +- ROCm is currently supported only for Linux systems. + +If you want to disable ROCm support, export the environment variable `USE_ROCM=0`. +Other potentially useful environment variables may be found in `setup.py`. + +##### Intel GPU Support +If you want to compile with Intel GPU support, follow these +- [PyTorch Prerequisites for Intel GPUs](https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpus.html) instructions. +- Intel GPU is supported for Linux and Windows. + +If you want to disable Intel GPU support, export the environment variable `USE_XPU=0`. +Other potentially useful environment variables may be found in `setup.py`. + +#### Install Dependencies + +**Common** + +```bash +conda install cmake ninja +# Run this command from the PyTorch directory after cloning the source code using the “Get the PyTorch Source“ section below +pip install -r requirements.txt +``` + +**On Linux** + +```bash +pip install mkl-static mkl-include +# CUDA only: Add LAPACK support for the GPU if needed +conda install -c pytorch magma-cuda121 # or the magma-cuda* that matches your CUDA version from https://anaconda.org/pytorch/repo + +# (optional) If using torch.compile with inductor/triton, install the matching version of triton +# Run from the pytorch directory after cloning +# For Intel GPU support, please explicitly `export USE_XPU=1` before running command. +make triton +``` + +**On MacOS** + +```bash +# Add this package on intel x86 processor machines only +pip install mkl-static mkl-include +# Add these packages if torch.distributed is needed +conda install pkg-config libuv +``` + +**On Windows** + +```bash +pip install mkl-static mkl-include +# Add these packages if torch.distributed is needed. +# Distributed package support on Windows is a prototype feature and is subject to changes. +conda install -c conda-forge libuv=1.39 +``` + +#### Get the PyTorch Source +```bash +git clone --recursive https://github.com/pytorch/pytorch +cd pytorch +# if you are updating an existing checkout +git submodule sync +git submodule update --init --recursive +``` + +#### Install PyTorch +**On Linux** + +If you would like to compile PyTorch with [new C++ ABI](https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html) enabled, then first run this command: +```bash +export _GLIBCXX_USE_CXX11_ABI=1 +``` + +If you're compiling for AMD ROCm then first run this command: +```bash +# Only run this if you're compiling for ROCm +python tools/amd_build/build_amd.py +``` + +Install PyTorch +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +python setup.py develop +``` + +> _Aside:_ If you are using [Anaconda](https://www.anaconda.com/distribution/#download-section), you may experience an error caused by the linker: +> +> ```plaintext +> build/temp.linux-x86_64-3.7/torch/csrc/stub.o: file not recognized: file format not recognized +> collect2: error: ld returned 1 exit status +> error: command 'g++' failed with exit status 1 +> ``` +> +> This is caused by `ld` from the Conda environment shadowing the system `ld`. You should use a newer version of Python that fixes this issue. The recommended Python version is 3.8.1+. + +**On macOS** + +```bash +python3 setup.py develop +``` + +**On Windows** + +Choose Correct Visual Studio Version. + +PyTorch CI uses Visual C++ BuildTools, which come with Visual Studio Enterprise, +Professional, or Community Editions. You can also install the build tools from +https://visualstudio.microsoft.com/visual-cpp-build-tools/. The build tools *do not* +come with Visual Studio Code by default. + +If you want to build legacy python code, please refer to [Building on legacy code and CUDA](https://github.com/pytorch/pytorch/blob/main/CONTRIBUTING.md#building-on-legacy-code-and-cuda) + +**CPU-only builds** + +In this mode PyTorch computations will run on your CPU, not your GPU + +```cmd +conda activate +python setup.py develop +``` + +Note on OpenMP: The desired OpenMP implementation is Intel OpenMP (iomp). In order to link against iomp, you'll need to manually download the library and set up the building environment by tweaking `CMAKE_INCLUDE_PATH` and `LIB`. The instruction [here](https://github.com/pytorch/pytorch/blob/main/docs/source/notes/windows.rst#building-from-source) is an example for setting up both MKL and Intel OpenMP. Without these configurations for CMake, Microsoft Visual C OpenMP runtime (vcomp) will be used. + +**CUDA based build** + +In this mode PyTorch computations will leverage your GPU via CUDA for faster number crunching + +[NVTX](https://docs.nvidia.com/gameworks/content/gameworkslibrary/nvtx/nvidia_tools_extension_library_nvtx.htm) is needed to build Pytorch with CUDA. +NVTX is a part of CUDA distributive, where it is called "Nsight Compute". To install it onto an already installed CUDA run CUDA installation once again and check the corresponding checkbox. +Make sure that CUDA with Nsight Compute is installed after Visual Studio. + +Currently, VS 2017 / 2019, and Ninja are supported as the generator of CMake. If `ninja.exe` is detected in `PATH`, then Ninja will be used as the default generator, otherwise, it will use VS 2017 / 2019. +<br/> If Ninja is selected as the generator, the latest MSVC will get selected as the underlying toolchain. + +Additional libraries such as +[Magma](https://developer.nvidia.com/magma), [oneDNN, a.k.a. MKLDNN or DNNL](https://github.com/oneapi-src/oneDNN), and [Sccache](https://github.com/mozilla/sccache) are often needed. Please refer to the [installation-helper](https://github.com/pytorch/pytorch/tree/main/.ci/pytorch/win-test-helpers/installation-helpers) to install them. + +You can refer to the [build_pytorch.bat](https://github.com/pytorch/pytorch/blob/main/.ci/pytorch/win-test-helpers/build_pytorch.bat) script for some other environment variables configurations + + +```cmd +cmd + +:: Set the environment variables after you have downloaded and unzipped the mkl package, +:: else CMake would throw an error as `Could NOT find OpenMP`. +set CMAKE_INCLUDE_PATH={Your directory}\mkl\include +set LIB={Your directory}\mkl\lib;%LIB% + +:: Read the content in the previous section carefully before you proceed. +:: [Optional] If you want to override the underlying toolset used by Ninja and Visual Studio with CUDA, please run the following script block. +:: "Visual Studio 2019 Developer Command Prompt" will be run automatically. +:: Make sure you have CMake >= 3.12 before you do this when you use the Visual Studio generator. +set CMAKE_GENERATOR_TOOLSET_VERSION=14.27 +set DISTUTILS_USE_SDK=1 +for /f "usebackq tokens=*" %i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -version [15^,17^) -products * -latest -property installationPath`) do call "%i\VC\Auxiliary\Build\vcvarsall.bat" x64 -vcvars_ver=%CMAKE_GENERATOR_TOOLSET_VERSION% + +:: [Optional] If you want to override the CUDA host compiler +set CUDAHOSTCXX=C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.27.29110\bin\HostX64\x64\cl.exe + +python setup.py develop + +``` + +##### Adjust Build Options (Optional) + +You can adjust the configuration of cmake variables optionally (without building first), by doing +the following. For example, adjusting the pre-detected directories for CuDNN or BLAS can be done +with such a step. + +On Linux +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +python setup.py build --cmake-only +ccmake build # or cmake-gui build +``` + +On macOS +```bash +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} +MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py build --cmake-only +ccmake build # or cmake-gui build +``` + +### Docker Image + +#### Using pre-built images + +You can also pull a pre-built docker image from Docker Hub and run with docker v19.03+ + +```bash +docker run --gpus all --rm -ti --ipc=host pytorch/pytorch:latest +``` + +Please note that PyTorch uses shared memory to share data between processes, so if torch multiprocessing is used (e.g. +for multithreaded data loaders) the default shared memory segment size that container runs with is not enough, and you +should increase shared memory size either with `--ipc=host` or `--shm-size` command line options to `nvidia-docker run`. + +#### Building the image yourself + +**NOTE:** Must be built with a docker version > 18.06 + +The `Dockerfile` is supplied to build images with CUDA 11.1 support and cuDNN v8. +You can pass `PYTHON_VERSION=x.y` make variable to specify which Python version is to be used by Miniconda, or leave it +unset to use the default. + +```bash +make -f docker.Makefile +# images are tagged as docker.io/${your_docker_username}/pytorch +``` + +You can also pass the `CMAKE_VARS="..."` environment variable to specify additional CMake variables to be passed to CMake during the build. +See [setup.py](./setup.py) for the list of available variables. + +```bash +make -f docker.Makefile +``` + +### Building the Documentation + +To build documentation in various formats, you will need [Sphinx](http://www.sphinx-doc.org) and the +readthedocs theme. + +```bash +cd docs/ +pip install -r requirements.txt +``` +You can then build the documentation by running `make <format>` from the +`docs/` folder. Run `make` to get a list of all available output formats. + +If you get a katex error run `npm install katex`. If it persists, try +`npm install -g katex` + +> Note: if you installed `nodejs` with a different package manager (e.g., +`conda`) then `npm` will probably install a version of `katex` that is not +compatible with your version of `nodejs` and doc builds will fail. +A combination of versions that is known to work is `node@6.13.1` and +`katex@0.13.18`. To install the latter with `npm` you can run +```npm install -g katex@0.13.18``` + +### Previous Versions + +Installation instructions and binaries for previous PyTorch versions may be found +on [our website](https://pytorch.org/previous-versions). + + +## Getting Started + +Three-pointers to get you started: +- [Tutorials: get you started with understanding and using PyTorch](https://pytorch.org/tutorials/) +- [Examples: easy to understand PyTorch code across all domains](https://github.com/pytorch/examples) +- [The API Reference](https://pytorch.org/docs/) +- [Glossary](https://github.com/pytorch/pytorch/blob/main/GLOSSARY.md) + +## Resources + +* [PyTorch.org](https://pytorch.org/) +* [PyTorch Tutorials](https://pytorch.org/tutorials/) +* [PyTorch Examples](https://github.com/pytorch/examples) +* [PyTorch Models](https://pytorch.org/hub/) +* [Intro to Deep Learning with PyTorch from Udacity](https://www.udacity.com/course/deep-learning-pytorch--ud188) +* [Intro to Machine Learning with PyTorch from Udacity](https://www.udacity.com/course/intro-to-machine-learning-nanodegree--nd229) +* [Deep Neural Networks with PyTorch from Coursera](https://www.coursera.org/learn/deep-neural-networks-with-pytorch) +* [PyTorch Twitter](https://twitter.com/PyTorch) +* [PyTorch Blog](https://pytorch.org/blog/) +* [PyTorch YouTube](https://www.youtube.com/channel/UCWXI5YeOsh03QvJ59PMaXFw) + +## Communication +* Forums: Discuss implementations, research, etc. https://discuss.pytorch.org +* GitHub Issues: Bug reports, feature requests, install issues, RFCs, thoughts, etc. +* Slack: The [PyTorch Slack](https://pytorch.slack.com/) hosts a primary audience of moderate to experienced PyTorch users and developers for general chat, online discussions, collaboration, etc. If you are a beginner looking for help, the primary medium is [PyTorch Forums](https://discuss.pytorch.org). If you need a slack invite, please fill this form: https://goo.gl/forms/PP1AGvNHpSaJP8to1 +* Newsletter: No-noise, a one-way email newsletter with important announcements about PyTorch. You can sign-up here: https://eepurl.com/cbG0rv +* Facebook Page: Important announcements about PyTorch. https://www.facebook.com/pytorch +* For brand guidelines, please visit our website at [pytorch.org](https://pytorch.org/) + +## Releases and Contributing + +Typically, PyTorch has three minor releases a year. Please let us know if you encounter a bug by [filing an issue](https://github.com/pytorch/pytorch/issues). + +We appreciate all contributions. If you are planning to contribute back bug-fixes, please do so without any further discussion. + +If you plan to contribute new features, utility functions, or extensions to the core, please first open an issue and discuss the feature with us. +Sending a PR without discussion might end up resulting in a rejected PR because we might be taking the core in a different direction than you might be aware of. + +To learn more about making a contribution to Pytorch, please see our [Contribution page](CONTRIBUTING.md). For more information about PyTorch releases, see [Release page](RELEASE.md). + +## The Team + +PyTorch is a community-driven project with several skillful engineers and researchers contributing to it. + +PyTorch is currently maintained by [Soumith Chintala](http://soumith.ch), [Gregory Chanan](https://github.com/gchanan), [Dmytro Dzhulgakov](https://github.com/dzhulgakov), [Edward Yang](https://github.com/ezyang), and [Nikita Shulga](https://github.com/malfet) with major contributions coming from hundreds of talented individuals in various forms and means. +A non-exhaustive but growing list needs to mention: Trevor Killeen, Sasank Chilamkurthy, Sergey Zagoruyko, Adam Lerer, Francisco Massa, Alykhan Tejani, Luca Antiga, Alban Desmaison, Andreas Koepf, James Bradbury, Zeming Lin, Yuandong Tian, Guillaume Lample, Marat Dukhan, Natalia Gimelshein, Christian Sarofeen, Martin Raison, Edward Yang, Zachary Devito. + +Note: This project is unrelated to [hughperkins/pytorch](https://github.com/hughperkins/pytorch) with the same name. Hugh is a valuable contributor to the Torch community and has helped with many things Torch and PyTorch. + +## License + +PyTorch has a BSD-style license, as found in the [LICENSE](LICENSE) file. + + diff --git a/parrot/lib/python3.10/site-packages/torch-2.4.1.dist-info/WHEEL b/parrot/lib/python3.10/site-packages/torch-2.4.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..d77a3981e5ea20c488afb479f81f1868349f78c3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch-2.4.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.34.2) +Root-Is-Purelib: false +Tag: cp310-cp310-linux_x86_64 + diff --git a/parrot/lib/python3.10/xml/etree/ElementPath.py b/parrot/lib/python3.10/xml/etree/ElementPath.py new file mode 100644 index 0000000000000000000000000000000000000000..cd3c354d0813ee4b55000431306736c653331d51 --- /dev/null +++ b/parrot/lib/python3.10/xml/etree/ElementPath.py @@ -0,0 +1,421 @@ +# +# ElementTree +# $Id: ElementPath.py 3375 2008-02-13 08:05:08Z fredrik $ +# +# limited xpath support for element trees +# +# history: +# 2003-05-23 fl created +# 2003-05-28 fl added support for // etc +# 2003-08-27 fl fixed parsing of periods in element names +# 2007-09-10 fl new selection engine +# 2007-09-12 fl fixed parent selector +# 2007-09-13 fl added iterfind; changed findall to return a list +# 2007-11-30 fl added namespaces support +# 2009-10-30 fl added child element value filter +# +# Copyright (c) 2003-2009 by Fredrik Lundh. All rights reserved. +# +# fredrik@pythonware.com +# http://www.pythonware.com +# +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2009 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +# Licensed to PSF under a Contributor Agreement. +# See https://www.python.org/psf/license for licensing details. + +## +# Implementation module for XPath support. There's usually no reason +# to import this module directly; the <b>ElementTree</b> does this for +# you, if needed. +## + +import re + +xpath_tokenizer_re = re.compile( + r"(" + r"'[^']*'|\"[^\"]*\"|" + r"::|" + r"//?|" + r"\.\.|" + r"\(\)|" + r"!=|" + r"[/.*:\[\]\(\)@=])|" + r"((?:\{[^}]+\})?[^/\[\]\(\)@!=\s]+)|" + r"\s+" + ) + +def xpath_tokenizer(pattern, namespaces=None): + default_namespace = namespaces.get('') if namespaces else None + parsing_attribute = False + for token in xpath_tokenizer_re.findall(pattern): + ttype, tag = token + if tag and tag[0] != "{": + if ":" in tag: + prefix, uri = tag.split(":", 1) + try: + if not namespaces: + raise KeyError + yield ttype, "{%s}%s" % (namespaces[prefix], uri) + except KeyError: + raise SyntaxError("prefix %r not found in prefix map" % prefix) from None + elif default_namespace and not parsing_attribute: + yield ttype, "{%s}%s" % (default_namespace, tag) + else: + yield token + parsing_attribute = False + else: + yield token + parsing_attribute = ttype == '@' + + +def get_parent_map(context): + parent_map = context.parent_map + if parent_map is None: + context.parent_map = parent_map = {} + for p in context.root.iter(): + for e in p: + parent_map[e] = p + return parent_map + + +def _is_wildcard_tag(tag): + return tag[:3] == '{*}' or tag[-2:] == '}*' + + +def _prepare_tag(tag): + _isinstance, _str = isinstance, str + if tag == '{*}*': + # Same as '*', but no comments or processing instructions. + # It can be a surprise that '*' includes those, but there is no + # justification for '{*}*' doing the same. + def select(context, result): + for elem in result: + if _isinstance(elem.tag, _str): + yield elem + elif tag == '{}*': + # Any tag that is not in a namespace. + def select(context, result): + for elem in result: + el_tag = elem.tag + if _isinstance(el_tag, _str) and el_tag[0] != '{': + yield elem + elif tag[:3] == '{*}': + # The tag in any (or no) namespace. + suffix = tag[2:] # '}name' + no_ns = slice(-len(suffix), None) + tag = tag[3:] + def select(context, result): + for elem in result: + el_tag = elem.tag + if el_tag == tag or _isinstance(el_tag, _str) and el_tag[no_ns] == suffix: + yield elem + elif tag[-2:] == '}*': + # Any tag in the given namespace. + ns = tag[:-1] + ns_only = slice(None, len(ns)) + def select(context, result): + for elem in result: + el_tag = elem.tag + if _isinstance(el_tag, _str) and el_tag[ns_only] == ns: + yield elem + else: + raise RuntimeError(f"internal parser error, got {tag}") + return select + + +def prepare_child(next, token): + tag = token[1] + if _is_wildcard_tag(tag): + select_tag = _prepare_tag(tag) + def select(context, result): + def select_child(result): + for elem in result: + yield from elem + return select_tag(context, select_child(result)) + else: + if tag[:2] == '{}': + tag = tag[2:] # '{}tag' == 'tag' + def select(context, result): + for elem in result: + for e in elem: + if e.tag == tag: + yield e + return select + +def prepare_star(next, token): + def select(context, result): + for elem in result: + yield from elem + return select + +def prepare_self(next, token): + def select(context, result): + yield from result + return select + +def prepare_descendant(next, token): + try: + token = next() + except StopIteration: + return + if token[0] == "*": + tag = "*" + elif not token[0]: + tag = token[1] + else: + raise SyntaxError("invalid descendant") + + if _is_wildcard_tag(tag): + select_tag = _prepare_tag(tag) + def select(context, result): + def select_child(result): + for elem in result: + for e in elem.iter(): + if e is not elem: + yield e + return select_tag(context, select_child(result)) + else: + if tag[:2] == '{}': + tag = tag[2:] # '{}tag' == 'tag' + def select(context, result): + for elem in result: + for e in elem.iter(tag): + if e is not elem: + yield e + return select + +def prepare_parent(next, token): + def select(context, result): + # FIXME: raise error if .. is applied at toplevel? + parent_map = get_parent_map(context) + result_map = {} + for elem in result: + if elem in parent_map: + parent = parent_map[elem] + if parent not in result_map: + result_map[parent] = None + yield parent + return select + +def prepare_predicate(next, token): + # FIXME: replace with real parser!!! refs: + # http://javascript.crockford.com/tdop/tdop.html + signature = [] + predicate = [] + while 1: + try: + token = next() + except StopIteration: + return + if token[0] == "]": + break + if token == ('', ''): + # ignore whitespace + continue + if token[0] and token[0][:1] in "'\"": + token = "'", token[0][1:-1] + signature.append(token[0] or "-") + predicate.append(token[1]) + signature = "".join(signature) + # use signature to determine predicate type + if signature == "@-": + # [@attribute] predicate + key = predicate[1] + def select(context, result): + for elem in result: + if elem.get(key) is not None: + yield elem + return select + if signature == "@-='" or signature == "@-!='": + # [@attribute='value'] or [@attribute!='value'] + key = predicate[1] + value = predicate[-1] + def select(context, result): + for elem in result: + if elem.get(key) == value: + yield elem + def select_negated(context, result): + for elem in result: + if (attr_value := elem.get(key)) is not None and attr_value != value: + yield elem + return select_negated if '!=' in signature else select + if signature == "-" and not re.match(r"\-?\d+$", predicate[0]): + # [tag] + tag = predicate[0] + def select(context, result): + for elem in result: + if elem.find(tag) is not None: + yield elem + return select + if signature == ".='" or signature == ".!='" or ( + (signature == "-='" or signature == "-!='") + and not re.match(r"\-?\d+$", predicate[0])): + # [.='value'] or [tag='value'] or [.!='value'] or [tag!='value'] + tag = predicate[0] + value = predicate[-1] + if tag: + def select(context, result): + for elem in result: + for e in elem.findall(tag): + if "".join(e.itertext()) == value: + yield elem + break + def select_negated(context, result): + for elem in result: + for e in elem.iterfind(tag): + if "".join(e.itertext()) != value: + yield elem + break + else: + def select(context, result): + for elem in result: + if "".join(elem.itertext()) == value: + yield elem + def select_negated(context, result): + for elem in result: + if "".join(elem.itertext()) != value: + yield elem + return select_negated if '!=' in signature else select + if signature == "-" or signature == "-()" or signature == "-()-": + # [index] or [last()] or [last()-index] + if signature == "-": + # [index] + index = int(predicate[0]) - 1 + if index < 0: + raise SyntaxError("XPath position >= 1 expected") + else: + if predicate[0] != "last": + raise SyntaxError("unsupported function") + if signature == "-()-": + try: + index = int(predicate[2]) - 1 + except ValueError: + raise SyntaxError("unsupported expression") + if index > -2: + raise SyntaxError("XPath offset from last() must be negative") + else: + index = -1 + def select(context, result): + parent_map = get_parent_map(context) + for elem in result: + try: + parent = parent_map[elem] + # FIXME: what if the selector is "*" ? + elems = list(parent.findall(elem.tag)) + if elems[index] is elem: + yield elem + except (IndexError, KeyError): + pass + return select + raise SyntaxError("invalid predicate") + +ops = { + "": prepare_child, + "*": prepare_star, + ".": prepare_self, + "..": prepare_parent, + "//": prepare_descendant, + "[": prepare_predicate, + } + +_cache = {} + +class _SelectorContext: + parent_map = None + def __init__(self, root): + self.root = root + +# -------------------------------------------------------------------- + +## +# Generate all matching objects. + +def iterfind(elem, path, namespaces=None): + # compile selector pattern + if path[-1:] == "/": + path = path + "*" # implicit all (FIXME: keep this?) + + cache_key = (path,) + if namespaces: + cache_key += tuple(sorted(namespaces.items())) + + try: + selector = _cache[cache_key] + except KeyError: + if len(_cache) > 100: + _cache.clear() + if path[:1] == "/": + raise SyntaxError("cannot use absolute path on element") + next = iter(xpath_tokenizer(path, namespaces)).__next__ + try: + token = next() + except StopIteration: + return + selector = [] + while 1: + try: + selector.append(ops[token[0]](next, token)) + except StopIteration: + raise SyntaxError("invalid path") from None + try: + token = next() + if token[0] == "/": + token = next() + except StopIteration: + break + _cache[cache_key] = selector + # execute selector pattern + result = [elem] + context = _SelectorContext(elem) + for select in selector: + result = select(context, result) + return result + +## +# Find first matching object. + +def find(elem, path, namespaces=None): + return next(iterfind(elem, path, namespaces), None) + +## +# Find all matching objects. + +def findall(elem, path, namespaces=None): + return list(iterfind(elem, path, namespaces)) + +## +# Find text for first matching object. + +def findtext(elem, path, default=None, namespaces=None): + try: + elem = next(iterfind(elem, path, namespaces)) + return elem.text or "" + except StopIteration: + return default diff --git a/parrot/lib/python3.10/xml/etree/ElementTree.py b/parrot/lib/python3.10/xml/etree/ElementTree.py new file mode 100644 index 0000000000000000000000000000000000000000..516656b69e3dfef076825ad47472d8265ca6312c --- /dev/null +++ b/parrot/lib/python3.10/xml/etree/ElementTree.py @@ -0,0 +1,2097 @@ +"""Lightweight XML support for Python. + + XML is an inherently hierarchical data format, and the most natural way to + represent it is with a tree. This module has two classes for this purpose: + + 1. ElementTree represents the whole XML document as a tree and + + 2. Element represents a single node in this tree. + + Interactions with the whole document (reading and writing to/from files) are + usually done on the ElementTree level. Interactions with a single XML element + and its sub-elements are done on the Element level. + + Element is a flexible container object designed to store hierarchical data + structures in memory. It can be described as a cross between a list and a + dictionary. Each Element has a number of properties associated with it: + + 'tag' - a string containing the element's name. + + 'attributes' - a Python dictionary storing the element's attributes. + + 'text' - a string containing the element's text content. + + 'tail' - an optional string containing text after the element's end tag. + + And a number of child elements stored in a Python sequence. + + To create an element instance, use the Element constructor, + or the SubElement factory function. + + You can also use the ElementTree class to wrap an element structure + and convert it to and from XML. + +""" + +#--------------------------------------------------------------------- +# Licensed to PSF under a Contributor Agreement. +# See https://www.python.org/psf/license for licensing details. +# +# ElementTree +# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved. +# +# fredrik@pythonware.com +# http://www.pythonware.com +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2008 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +__all__ = [ + # public symbols + "Comment", + "dump", + "Element", "ElementTree", + "fromstring", "fromstringlist", + "indent", "iselement", "iterparse", + "parse", "ParseError", + "PI", "ProcessingInstruction", + "QName", + "SubElement", + "tostring", "tostringlist", + "TreeBuilder", + "VERSION", + "XML", "XMLID", + "XMLParser", "XMLPullParser", + "register_namespace", + "canonicalize", "C14NWriterTarget", + ] + +VERSION = "1.3.0" + +import sys +import re +import warnings +import io +import collections +import collections.abc +import contextlib + +from . import ElementPath + + +class ParseError(SyntaxError): + """An error when parsing an XML document. + + In addition to its exception value, a ParseError contains + two extra attributes: + 'code' - the specific exception code + 'position' - the line and column of the error + + """ + pass + +# -------------------------------------------------------------------- + + +def iselement(element): + """Return True if *element* appears to be an Element.""" + return hasattr(element, 'tag') + + +class Element: + """An XML element. + + This class is the reference implementation of the Element interface. + + An element's length is its number of subelements. That means if you + want to check if an element is truly empty, you should check BOTH + its length AND its text attribute. + + The element tag, attribute names, and attribute values can be either + bytes or strings. + + *tag* is the element name. *attrib* is an optional dictionary containing + element attributes. *extra* are additional element attributes given as + keyword arguments. + + Example form: + <tag attrib>text<child/>...</tag>tail + + """ + + tag = None + """The element's name.""" + + attrib = None + """Dictionary of the element's attributes.""" + + text = None + """ + Text before first subelement. This is either a string or the value None. + Note that if there is no text, this attribute may be either + None or the empty string, depending on the parser. + + """ + + tail = None + """ + Text after this element's end tag, but before the next sibling element's + start tag. This is either a string or the value None. Note that if there + was no text, this attribute may be either None or an empty string, + depending on the parser. + + """ + + def __init__(self, tag, attrib={}, **extra): + if not isinstance(attrib, dict): + raise TypeError("attrib must be dict, not %s" % ( + attrib.__class__.__name__,)) + self.tag = tag + self.attrib = {**attrib, **extra} + self._children = [] + + def __repr__(self): + return "<%s %r at %#x>" % (self.__class__.__name__, self.tag, id(self)) + + def makeelement(self, tag, attrib): + """Create a new element with the same type. + + *tag* is a string containing the element name. + *attrib* is a dictionary containing the element attributes. + + Do not call this method, use the SubElement factory function instead. + + """ + return self.__class__(tag, attrib) + + def copy(self): + """Return copy of current element. + + This creates a shallow copy. Subelements will be shared with the + original tree. + + """ + warnings.warn( + "elem.copy() is deprecated. Use copy.copy(elem) instead.", + DeprecationWarning + ) + return self.__copy__() + + def __copy__(self): + elem = self.makeelement(self.tag, self.attrib) + elem.text = self.text + elem.tail = self.tail + elem[:] = self + return elem + + def __len__(self): + return len(self._children) + + def __bool__(self): + warnings.warn( + "The behavior of this method will change in future versions. " + "Use specific 'len(elem)' or 'elem is not None' test instead.", + FutureWarning, stacklevel=2 + ) + return len(self._children) != 0 # emulate old behaviour, for now + + def __getitem__(self, index): + return self._children[index] + + def __setitem__(self, index, element): + if isinstance(index, slice): + for elt in element: + self._assert_is_element(elt) + else: + self._assert_is_element(element) + self._children[index] = element + + def __delitem__(self, index): + del self._children[index] + + def append(self, subelement): + """Add *subelement* to the end of this element. + + The new element will appear in document order after the last existing + subelement (or directly after the text, if it's the first subelement), + but before the end tag for this element. + + """ + self._assert_is_element(subelement) + self._children.append(subelement) + + def extend(self, elements): + """Append subelements from a sequence. + + *elements* is a sequence with zero or more elements. + + """ + for element in elements: + self._assert_is_element(element) + self._children.append(element) + + def insert(self, index, subelement): + """Insert *subelement* at position *index*.""" + self._assert_is_element(subelement) + self._children.insert(index, subelement) + + def _assert_is_element(self, e): + # Need to refer to the actual Python implementation, not the + # shadowing C implementation. + if not isinstance(e, _Element_Py): + raise TypeError('expected an Element, not %s' % type(e).__name__) + + def remove(self, subelement): + """Remove matching subelement. + + Unlike the find methods, this method compares elements based on + identity, NOT ON tag value or contents. To remove subelements by + other means, the easiest way is to use a list comprehension to + select what elements to keep, and then use slice assignment to update + the parent element. + + ValueError is raised if a matching element could not be found. + + """ + # assert iselement(element) + self._children.remove(subelement) + + def find(self, path, namespaces=None): + """Find first matching element by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + """ + return ElementPath.find(self, path, namespaces) + + def findtext(self, path, default=None, namespaces=None): + """Find text for first matching element by tag name or path. + + *path* is a string having either an element tag or an XPath, + *default* is the value to return if the element was not found, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return text content of first matching element, or default value if + none was found. Note that if an element is found having no text + content, the empty string is returned. + + """ + return ElementPath.findtext(self, path, default, namespaces) + + def findall(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Returns list containing all matching elements in document order. + + """ + return ElementPath.findall(self, path, namespaces) + + def iterfind(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return an iterable yielding all matching elements in document order. + + """ + return ElementPath.iterfind(self, path, namespaces) + + def clear(self): + """Reset element. + + This function removes all subelements, clears all attributes, and sets + the text and tail attributes to None. + + """ + self.attrib.clear() + self._children = [] + self.text = self.tail = None + + def get(self, key, default=None): + """Get element attribute. + + Equivalent to attrib.get, but some implementations may handle this a + bit more efficiently. *key* is what attribute to look for, and + *default* is what to return if the attribute was not found. + + Returns a string containing the attribute value, or the default if + attribute was not found. + + """ + return self.attrib.get(key, default) + + def set(self, key, value): + """Set element attribute. + + Equivalent to attrib[key] = value, but some implementations may handle + this a bit more efficiently. *key* is what attribute to set, and + *value* is the attribute value to set it to. + + """ + self.attrib[key] = value + + def keys(self): + """Get list of attribute names. + + Names are returned in an arbitrary order, just like an ordinary + Python dict. Equivalent to attrib.keys() + + """ + return self.attrib.keys() + + def items(self): + """Get element attributes as a sequence. + + The attributes are returned in arbitrary order. Equivalent to + attrib.items(). + + Return a list of (name, value) tuples. + + """ + return self.attrib.items() + + def iter(self, tag=None): + """Create tree iterator. + + The iterator loops over the element and all subelements in document + order, returning all elements with a matching tag. + + If the tree structure is modified during iteration, new or removed + elements may or may not be included. To get a stable set, use the + list() function on the iterator, and loop over the resulting list. + + *tag* is what tags to look for (default is to return all elements) + + Return an iterator containing all the matching elements. + + """ + if tag == "*": + tag = None + if tag is None or self.tag == tag: + yield self + for e in self._children: + yield from e.iter(tag) + + def itertext(self): + """Create text iterator. + + The iterator loops over the element and all subelements in document + order, returning all inner text. + + """ + tag = self.tag + if not isinstance(tag, str) and tag is not None: + return + t = self.text + if t: + yield t + for e in self: + yield from e.itertext() + t = e.tail + if t: + yield t + + +def SubElement(parent, tag, attrib={}, **extra): + """Subelement factory which creates an element instance, and appends it + to an existing parent. + + The element tag, attribute names, and attribute values can be either + bytes or Unicode strings. + + *parent* is the parent element, *tag* is the subelements name, *attrib* is + an optional directory containing element attributes, *extra* are + additional attributes given as keyword arguments. + + """ + attrib = {**attrib, **extra} + element = parent.makeelement(tag, attrib) + parent.append(element) + return element + + +def Comment(text=None): + """Comment element factory. + + This function creates a special element which the standard serializer + serializes as an XML comment. + + *text* is a string containing the comment string. + + """ + element = Element(Comment) + element.text = text + return element + + +def ProcessingInstruction(target, text=None): + """Processing Instruction element factory. + + This function creates a special element which the standard serializer + serializes as an XML comment. + + *target* is a string containing the processing instruction, *text* is a + string containing the processing instruction contents, if any. + + """ + element = Element(ProcessingInstruction) + element.text = target + if text: + element.text = element.text + " " + text + return element + +PI = ProcessingInstruction + + +class QName: + """Qualified name wrapper. + + This class can be used to wrap a QName attribute value in order to get + proper namespace handing on output. + + *text_or_uri* is a string containing the QName value either in the form + {uri}local, or if the tag argument is given, the URI part of a QName. + + *tag* is an optional argument which if given, will make the first + argument (text_or_uri) be interpreted as a URI, and this argument (tag) + be interpreted as a local name. + + """ + def __init__(self, text_or_uri, tag=None): + if tag: + text_or_uri = "{%s}%s" % (text_or_uri, tag) + self.text = text_or_uri + def __str__(self): + return self.text + def __repr__(self): + return '<%s %r>' % (self.__class__.__name__, self.text) + def __hash__(self): + return hash(self.text) + def __le__(self, other): + if isinstance(other, QName): + return self.text <= other.text + return self.text <= other + def __lt__(self, other): + if isinstance(other, QName): + return self.text < other.text + return self.text < other + def __ge__(self, other): + if isinstance(other, QName): + return self.text >= other.text + return self.text >= other + def __gt__(self, other): + if isinstance(other, QName): + return self.text > other.text + return self.text > other + def __eq__(self, other): + if isinstance(other, QName): + return self.text == other.text + return self.text == other + +# -------------------------------------------------------------------- + + +class ElementTree: + """An XML element hierarchy. + + This class also provides support for serialization to and from + standard XML. + + *element* is an optional root element node, + *file* is an optional file handle or file name of an XML file whose + contents will be used to initialize the tree with. + + """ + def __init__(self, element=None, file=None): + # assert element is None or iselement(element) + self._root = element # first node + if file: + self.parse(file) + + def getroot(self): + """Return root element of this tree.""" + return self._root + + def _setroot(self, element): + """Replace root element of this tree. + + This will discard the current contents of the tree and replace it + with the given element. Use with care! + + """ + # assert iselement(element) + self._root = element + + def parse(self, source, parser=None): + """Load external XML document into element tree. + + *source* is a file name or file object, *parser* is an optional parser + instance that defaults to XMLParser. + + ParseError is raised if the parser fails to parse the document. + + Returns the root element of the given source document. + + """ + close_source = False + if not hasattr(source, "read"): + source = open(source, "rb") + close_source = True + try: + if parser is None: + # If no parser was specified, create a default XMLParser + parser = XMLParser() + if hasattr(parser, '_parse_whole'): + # The default XMLParser, when it comes from an accelerator, + # can define an internal _parse_whole API for efficiency. + # It can be used to parse the whole source without feeding + # it with chunks. + self._root = parser._parse_whole(source) + return self._root + while True: + data = source.read(65536) + if not data: + break + parser.feed(data) + self._root = parser.close() + return self._root + finally: + if close_source: + source.close() + + def iter(self, tag=None): + """Create and return tree iterator for the root element. + + The iterator loops over all elements in this tree, in document order. + + *tag* is a string with the tag name to iterate over + (default is to return all elements). + + """ + # assert self._root is not None + return self._root.iter(tag) + + def find(self, path, namespaces=None): + """Find first matching element by tag name or path. + + Same as getroot().find(path), which is Element.find() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.find(path, namespaces) + + def findtext(self, path, default=None, namespaces=None): + """Find first matching element by tag name or path. + + Same as getroot().findtext(path), which is Element.findtext() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.findtext(path, default, namespaces) + + def findall(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + Same as getroot().findall(path), which is Element.findall(). + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return list containing all matching elements in document order. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.findall(path, namespaces) + + def iterfind(self, path, namespaces=None): + """Find all matching subelements by tag name or path. + + Same as getroot().iterfind(path), which is element.iterfind() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return an iterable yielding all matching elements in document order. + + """ + # assert self._root is not None + if path[:1] == "/": + path = "." + path + warnings.warn( + "This search is broken in 1.3 and earlier, and will be " + "fixed in a future version. If you rely on the current " + "behaviour, change it to %r" % path, + FutureWarning, stacklevel=2 + ) + return self._root.iterfind(path, namespaces) + + def write(self, file_or_filename, + encoding=None, + xml_declaration=None, + default_namespace=None, + method=None, *, + short_empty_elements=True): + """Write element tree to a file as XML. + + Arguments: + *file_or_filename* -- file name or a file object opened for writing + + *encoding* -- the output encoding (default: US-ASCII) + + *xml_declaration* -- bool indicating if an XML declaration should be + added to the output. If None, an XML declaration + is added if encoding IS NOT either of: + US-ASCII, UTF-8, or Unicode + + *default_namespace* -- sets the default XML namespace (for "xmlns") + + *method* -- either "xml" (default), "html, "text", or "c14n" + + *short_empty_elements* -- controls the formatting of elements + that contain no content. If True (default) + they are emitted as a single self-closed + tag, otherwise they are emitted as a pair + of start/end tags + + """ + if not method: + method = "xml" + elif method not in _serialize: + raise ValueError("unknown method %r" % method) + if not encoding: + if method == "c14n": + encoding = "utf-8" + else: + encoding = "us-ascii" + with _get_writer(file_or_filename, encoding) as (write, declared_encoding): + if method == "xml" and (xml_declaration or + (xml_declaration is None and + encoding.lower() != "unicode" and + declared_encoding.lower() not in ("utf-8", "us-ascii"))): + write("<?xml version='1.0' encoding='%s'?>\n" % ( + declared_encoding,)) + if method == "text": + _serialize_text(write, self._root) + else: + qnames, namespaces = _namespaces(self._root, default_namespace) + serialize = _serialize[method] + serialize(write, self._root, qnames, namespaces, + short_empty_elements=short_empty_elements) + + def write_c14n(self, file): + # lxml.etree compatibility. use output method instead + return self.write(file, method="c14n") + +# -------------------------------------------------------------------- +# serialization support + +@contextlib.contextmanager +def _get_writer(file_or_filename, encoding): + # returns text write method and release all resources after using + try: + write = file_or_filename.write + except AttributeError: + # file_or_filename is a file name + if encoding.lower() == "unicode": + encoding="utf-8" + with open(file_or_filename, "w", encoding=encoding, + errors="xmlcharrefreplace") as file: + yield file.write, encoding + else: + # file_or_filename is a file-like object + # encoding determines if it is a text or binary writer + if encoding.lower() == "unicode": + # use a text writer as is + yield write, getattr(file_or_filename, "encoding", None) or "utf-8" + else: + # wrap a binary writer with TextIOWrapper + with contextlib.ExitStack() as stack: + if isinstance(file_or_filename, io.BufferedIOBase): + file = file_or_filename + elif isinstance(file_or_filename, io.RawIOBase): + file = io.BufferedWriter(file_or_filename) + # Keep the original file open when the BufferedWriter is + # destroyed + stack.callback(file.detach) + else: + # This is to handle passed objects that aren't in the + # IOBase hierarchy, but just have a write method + file = io.BufferedIOBase() + file.writable = lambda: True + file.write = write + try: + # TextIOWrapper uses this methods to determine + # if BOM (for UTF-16, etc) should be added + file.seekable = file_or_filename.seekable + file.tell = file_or_filename.tell + except AttributeError: + pass + file = io.TextIOWrapper(file, + encoding=encoding, + errors="xmlcharrefreplace", + newline="\n") + # Keep the original file open when the TextIOWrapper is + # destroyed + stack.callback(file.detach) + yield file.write, encoding + +def _namespaces(elem, default_namespace=None): + # identify namespaces used in this tree + + # maps qnames to *encoded* prefix:local names + qnames = {None: None} + + # maps uri:s to prefixes + namespaces = {} + if default_namespace: + namespaces[default_namespace] = "" + + def add_qname(qname): + # calculate serialized qname representation + try: + if qname[:1] == "{": + uri, tag = qname[1:].rsplit("}", 1) + prefix = namespaces.get(uri) + if prefix is None: + prefix = _namespace_map.get(uri) + if prefix is None: + prefix = "ns%d" % len(namespaces) + if prefix != "xml": + namespaces[uri] = prefix + if prefix: + qnames[qname] = "%s:%s" % (prefix, tag) + else: + qnames[qname] = tag # default element + else: + if default_namespace: + # FIXME: can this be handled in XML 1.0? + raise ValueError( + "cannot use non-qualified names with " + "default_namespace option" + ) + qnames[qname] = qname + except TypeError: + _raise_serialization_error(qname) + + # populate qname and namespaces table + for elem in elem.iter(): + tag = elem.tag + if isinstance(tag, QName): + if tag.text not in qnames: + add_qname(tag.text) + elif isinstance(tag, str): + if tag not in qnames: + add_qname(tag) + elif tag is not None and tag is not Comment and tag is not PI: + _raise_serialization_error(tag) + for key, value in elem.items(): + if isinstance(key, QName): + key = key.text + if key not in qnames: + add_qname(key) + if isinstance(value, QName) and value.text not in qnames: + add_qname(value.text) + text = elem.text + if isinstance(text, QName) and text.text not in qnames: + add_qname(text.text) + return qnames, namespaces + +def _serialize_xml(write, elem, qnames, namespaces, + short_empty_elements, **kwargs): + tag = elem.tag + text = elem.text + if tag is Comment: + write("<!--%s-->" % text) + elif tag is ProcessingInstruction: + write("<?%s?>" % text) + else: + tag = qnames[tag] + if tag is None: + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_xml(write, e, qnames, None, + short_empty_elements=short_empty_elements) + else: + write("<" + tag) + items = list(elem.items()) + if items or namespaces: + if namespaces: + for v, k in sorted(namespaces.items(), + key=lambda x: x[1]): # sort on prefix + if k: + k = ":" + k + write(" xmlns%s=\"%s\"" % ( + k, + _escape_attrib(v) + )) + for k, v in items: + if isinstance(k, QName): + k = k.text + if isinstance(v, QName): + v = qnames[v.text] + else: + v = _escape_attrib(v) + write(" %s=\"%s\"" % (qnames[k], v)) + if text or len(elem) or not short_empty_elements: + write(">") + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_xml(write, e, qnames, None, + short_empty_elements=short_empty_elements) + write("</" + tag + ">") + else: + write(" />") + if elem.tail: + write(_escape_cdata(elem.tail)) + +HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr", + "img", "input", "isindex", "link", "meta", "param") + +try: + HTML_EMPTY = set(HTML_EMPTY) +except NameError: + pass + +def _serialize_html(write, elem, qnames, namespaces, **kwargs): + tag = elem.tag + text = elem.text + if tag is Comment: + write("<!--%s-->" % _escape_cdata(text)) + elif tag is ProcessingInstruction: + write("<?%s?>" % _escape_cdata(text)) + else: + tag = qnames[tag] + if tag is None: + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_html(write, e, qnames, None) + else: + write("<" + tag) + items = list(elem.items()) + if items or namespaces: + if namespaces: + for v, k in sorted(namespaces.items(), + key=lambda x: x[1]): # sort on prefix + if k: + k = ":" + k + write(" xmlns%s=\"%s\"" % ( + k, + _escape_attrib(v) + )) + for k, v in items: + if isinstance(k, QName): + k = k.text + if isinstance(v, QName): + v = qnames[v.text] + else: + v = _escape_attrib_html(v) + # FIXME: handle boolean attributes + write(" %s=\"%s\"" % (qnames[k], v)) + write(">") + ltag = tag.lower() + if text: + if ltag == "script" or ltag == "style": + write(text) + else: + write(_escape_cdata(text)) + for e in elem: + _serialize_html(write, e, qnames, None) + if ltag not in HTML_EMPTY: + write("</" + tag + ">") + if elem.tail: + write(_escape_cdata(elem.tail)) + +def _serialize_text(write, elem): + for part in elem.itertext(): + write(part) + if elem.tail: + write(elem.tail) + +_serialize = { + "xml": _serialize_xml, + "html": _serialize_html, + "text": _serialize_text, +# this optional method is imported at the end of the module +# "c14n": _serialize_c14n, +} + + +def register_namespace(prefix, uri): + """Register a namespace prefix. + + The registry is global, and any existing mapping for either the + given prefix or the namespace URI will be removed. + + *prefix* is the namespace prefix, *uri* is a namespace uri. Tags and + attributes in this namespace will be serialized with prefix if possible. + + ValueError is raised if prefix is reserved or is invalid. + + """ + if re.match(r"ns\d+$", prefix): + raise ValueError("Prefix format reserved for internal use") + for k, v in list(_namespace_map.items()): + if k == uri or v == prefix: + del _namespace_map[k] + _namespace_map[uri] = prefix + +_namespace_map = { + # "well-known" namespace prefixes + "http://www.w3.org/XML/1998/namespace": "xml", + "http://www.w3.org/1999/xhtml": "html", + "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf", + "http://schemas.xmlsoap.org/wsdl/": "wsdl", + # xml schema + "http://www.w3.org/2001/XMLSchema": "xs", + "http://www.w3.org/2001/XMLSchema-instance": "xsi", + # dublin core + "http://purl.org/dc/elements/1.1/": "dc", +} +# For tests and troubleshooting +register_namespace._namespace_map = _namespace_map + +def _raise_serialization_error(text): + raise TypeError( + "cannot serialize %r (type %s)" % (text, type(text).__name__) + ) + +def _escape_cdata(text): + # escape character data + try: + # it's worth avoiding do-nothing calls for strings that are + # shorter than 500 characters, or so. assume that's, by far, + # the most common case in most applications. + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + +def _escape_attrib(text): + # escape attribute value + try: + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + # Although section 2.11 of the XML specification states that CR or + # CR LN should be replaced with just LN, it applies only to EOLNs + # which take part of organizing file into lines. Within attributes, + # we are replacing these with entity numbers, so they do not count. + # http://www.w3.org/TR/REC-xml/#sec-line-ends + # The current solution, contained in following six lines, was + # discussed in issue 17582 and 39011. + if "\r" in text: + text = text.replace("\r", " ") + if "\n" in text: + text = text.replace("\n", " ") + if "\t" in text: + text = text.replace("\t", " ") + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + +def _escape_attrib_html(text): + # escape attribute value + try: + if "&" in text: + text = text.replace("&", "&") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + +# -------------------------------------------------------------------- + +def tostring(element, encoding=None, method=None, *, + xml_declaration=None, default_namespace=None, + short_empty_elements=True): + """Generate string representation of XML element. + + All subelements are included. If encoding is "unicode", a string + is returned. Otherwise a bytestring is returned. + + *element* is an Element instance, *encoding* is an optional output + encoding defaulting to US-ASCII, *method* is an optional output which can + be one of "xml" (default), "html", "text" or "c14n", *default_namespace* + sets the default XML namespace (for "xmlns"). + + Returns an (optionally) encoded string containing the XML data. + + """ + stream = io.StringIO() if encoding == 'unicode' else io.BytesIO() + ElementTree(element).write(stream, encoding, + xml_declaration=xml_declaration, + default_namespace=default_namespace, + method=method, + short_empty_elements=short_empty_elements) + return stream.getvalue() + +class _ListDataStream(io.BufferedIOBase): + """An auxiliary stream accumulating into a list reference.""" + def __init__(self, lst): + self.lst = lst + + def writable(self): + return True + + def seekable(self): + return True + + def write(self, b): + self.lst.append(b) + + def tell(self): + return len(self.lst) + +def tostringlist(element, encoding=None, method=None, *, + xml_declaration=None, default_namespace=None, + short_empty_elements=True): + lst = [] + stream = _ListDataStream(lst) + ElementTree(element).write(stream, encoding, + xml_declaration=xml_declaration, + default_namespace=default_namespace, + method=method, + short_empty_elements=short_empty_elements) + return lst + + +def dump(elem): + """Write element tree or element structure to sys.stdout. + + This function should be used for debugging only. + + *elem* is either an ElementTree, or a single Element. The exact output + format is implementation dependent. In this version, it's written as an + ordinary XML file. + + """ + # debugging + if not isinstance(elem, ElementTree): + elem = ElementTree(elem) + elem.write(sys.stdout, encoding="unicode") + tail = elem.getroot().tail + if not tail or tail[-1] != "\n": + sys.stdout.write("\n") + + +def indent(tree, space=" ", level=0): + """Indent an XML document by inserting newlines and indentation space + after elements. + + *tree* is the ElementTree or Element to modify. The (root) element + itself will not be changed, but the tail text of all elements in its + subtree will be adapted. + + *space* is the whitespace to insert for each indentation level, two + space characters by default. + + *level* is the initial indentation level. Setting this to a higher + value than 0 can be used for indenting subtrees that are more deeply + nested inside of a document. + """ + if isinstance(tree, ElementTree): + tree = tree.getroot() + if level < 0: + raise ValueError(f"Initial indentation level must be >= 0, got {level}") + if not len(tree): + return + + # Reduce the memory consumption by reusing indentation strings. + indentations = ["\n" + level * space] + + def _indent_children(elem, level): + # Start a new indentation level for the first child. + child_level = level + 1 + try: + child_indentation = indentations[child_level] + except IndexError: + child_indentation = indentations[level] + space + indentations.append(child_indentation) + + if not elem.text or not elem.text.strip(): + elem.text = child_indentation + + for child in elem: + if len(child): + _indent_children(child, child_level) + if not child.tail or not child.tail.strip(): + child.tail = child_indentation + + # Dedent after the last child by overwriting the previous indentation. + if not child.tail.strip(): + child.tail = indentations[level] + + _indent_children(tree, 0) + + +# -------------------------------------------------------------------- +# parsing + + +def parse(source, parser=None): + """Parse XML document into element tree. + + *source* is a filename or file object containing XML data, + *parser* is an optional parser instance defaulting to XMLParser. + + Return an ElementTree instance. + + """ + tree = ElementTree() + tree.parse(source, parser) + return tree + + +def iterparse(source, events=None, parser=None): + """Incrementally parse XML document into ElementTree. + + This class also reports what's going on to the user based on the + *events* it is initialized with. The supported events are the strings + "start", "end", "start-ns" and "end-ns" (the "ns" events are used to get + detailed namespace information). If *events* is omitted, only + "end" events are reported. + + *source* is a filename or file object containing XML data, *events* is + a list of events to report back, *parser* is an optional parser instance. + + Returns an iterator providing (event, elem) pairs. + + """ + # Use the internal, undocumented _parser argument for now; When the + # parser argument of iterparse is removed, this can be killed. + pullparser = XMLPullParser(events=events, _parser=parser) + + def iterator(source): + close_source = False + try: + if not hasattr(source, "read"): + source = open(source, "rb") + close_source = True + yield None + while True: + yield from pullparser.read_events() + # load event buffer + data = source.read(16 * 1024) + if not data: + break + pullparser.feed(data) + root = pullparser._close_and_return_root() + yield from pullparser.read_events() + it.root = root + finally: + if close_source: + source.close() + + class IterParseIterator(collections.abc.Iterator): + __next__ = iterator(source).__next__ + it = IterParseIterator() + it.root = None + del iterator, IterParseIterator + + next(it) + return it + + +class XMLPullParser: + + def __init__(self, events=None, *, _parser=None): + # The _parser argument is for internal use only and must not be relied + # upon in user code. It will be removed in a future release. + # See https://bugs.python.org/issue17741 for more details. + + self._events_queue = collections.deque() + self._parser = _parser or XMLParser(target=TreeBuilder()) + # wire up the parser for event reporting + if events is None: + events = ("end",) + self._parser._setevents(self._events_queue, events) + + def feed(self, data): + """Feed encoded data to parser.""" + if self._parser is None: + raise ValueError("feed() called after end of stream") + if data: + try: + self._parser.feed(data) + except SyntaxError as exc: + self._events_queue.append(exc) + + def _close_and_return_root(self): + # iterparse needs this to set its root attribute properly :( + root = self._parser.close() + self._parser = None + return root + + def close(self): + """Finish feeding data to parser. + + Unlike XMLParser, does not return the root element. Use + read_events() to consume elements from XMLPullParser. + """ + self._close_and_return_root() + + def read_events(self): + """Return an iterator over currently available (event, elem) pairs. + + Events are consumed from the internal event queue as they are + retrieved from the iterator. + """ + events = self._events_queue + while events: + event = events.popleft() + if isinstance(event, Exception): + raise event + else: + yield event + + def flush(self): + if self._parser is None: + raise ValueError("flush() called after end of stream") + self._parser.flush() + + +def XML(text, parser=None): + """Parse XML document from string constant. + + This function can be used to embed "XML Literals" in Python code. + + *text* is a string containing XML data, *parser* is an + optional parser instance, defaulting to the standard XMLParser. + + Returns an Element instance. + + """ + if not parser: + parser = XMLParser(target=TreeBuilder()) + parser.feed(text) + return parser.close() + + +def XMLID(text, parser=None): + """Parse XML document from string constant for its IDs. + + *text* is a string containing XML data, *parser* is an + optional parser instance, defaulting to the standard XMLParser. + + Returns an (Element, dict) tuple, in which the + dict maps element id:s to elements. + + """ + if not parser: + parser = XMLParser(target=TreeBuilder()) + parser.feed(text) + tree = parser.close() + ids = {} + for elem in tree.iter(): + id = elem.get("id") + if id: + ids[id] = elem + return tree, ids + +# Parse XML document from string constant. Alias for XML(). +fromstring = XML + +def fromstringlist(sequence, parser=None): + """Parse XML document from sequence of string fragments. + + *sequence* is a list of other sequence, *parser* is an optional parser + instance, defaulting to the standard XMLParser. + + Returns an Element instance. + + """ + if not parser: + parser = XMLParser(target=TreeBuilder()) + for text in sequence: + parser.feed(text) + return parser.close() + +# -------------------------------------------------------------------- + + +class TreeBuilder: + """Generic element structure builder. + + This builder converts a sequence of start, data, and end method + calls to a well-formed element structure. + + You can use this class to build an element structure using a custom XML + parser, or a parser for some other XML-like format. + + *element_factory* is an optional element factory which is called + to create new Element instances, as necessary. + + *comment_factory* is a factory to create comments to be used instead of + the standard factory. If *insert_comments* is false (the default), + comments will not be inserted into the tree. + + *pi_factory* is a factory to create processing instructions to be used + instead of the standard factory. If *insert_pis* is false (the default), + processing instructions will not be inserted into the tree. + """ + def __init__(self, element_factory=None, *, + comment_factory=None, pi_factory=None, + insert_comments=False, insert_pis=False): + self._data = [] # data collector + self._elem = [] # element stack + self._last = None # last element + self._root = None # root element + self._tail = None # true if we're after an end tag + if comment_factory is None: + comment_factory = Comment + self._comment_factory = comment_factory + self.insert_comments = insert_comments + if pi_factory is None: + pi_factory = ProcessingInstruction + self._pi_factory = pi_factory + self.insert_pis = insert_pis + if element_factory is None: + element_factory = Element + self._factory = element_factory + + def close(self): + """Flush builder buffers and return toplevel document Element.""" + assert len(self._elem) == 0, "missing end tags" + assert self._root is not None, "missing toplevel element" + return self._root + + def _flush(self): + if self._data: + if self._last is not None: + text = "".join(self._data) + if self._tail: + assert self._last.tail is None, "internal error (tail)" + self._last.tail = text + else: + assert self._last.text is None, "internal error (text)" + self._last.text = text + self._data = [] + + def data(self, data): + """Add text to current element.""" + self._data.append(data) + + def start(self, tag, attrs): + """Open new element and return it. + + *tag* is the element name, *attrs* is a dict containing element + attributes. + + """ + self._flush() + self._last = elem = self._factory(tag, attrs) + if self._elem: + self._elem[-1].append(elem) + elif self._root is None: + self._root = elem + self._elem.append(elem) + self._tail = 0 + return elem + + def end(self, tag): + """Close and return current Element. + + *tag* is the element name. + + """ + self._flush() + self._last = self._elem.pop() + assert self._last.tag == tag,\ + "end tag mismatch (expected %s, got %s)" % ( + self._last.tag, tag) + self._tail = 1 + return self._last + + def comment(self, text): + """Create a comment using the comment_factory. + + *text* is the text of the comment. + """ + return self._handle_single( + self._comment_factory, self.insert_comments, text) + + def pi(self, target, text=None): + """Create a processing instruction using the pi_factory. + + *target* is the target name of the processing instruction. + *text* is the data of the processing instruction, or ''. + """ + return self._handle_single( + self._pi_factory, self.insert_pis, target, text) + + def _handle_single(self, factory, insert, *args): + elem = factory(*args) + if insert: + self._flush() + self._last = elem + if self._elem: + self._elem[-1].append(elem) + self._tail = 1 + return elem + + +# also see ElementTree and TreeBuilder +class XMLParser: + """Element structure builder for XML source data based on the expat parser. + + *target* is an optional target object which defaults to an instance of the + standard TreeBuilder class, *encoding* is an optional encoding string + which if given, overrides the encoding specified in the XML file: + http://www.iana.org/assignments/character-sets + + """ + + def __init__(self, *, target=None, encoding=None): + try: + from xml.parsers import expat + except ImportError: + try: + import pyexpat as expat + except ImportError: + raise ImportError( + "No module named expat; use SimpleXMLTreeBuilder instead" + ) + parser = expat.ParserCreate(encoding, "}") + if target is None: + target = TreeBuilder() + # underscored names are provided for compatibility only + self.parser = self._parser = parser + self.target = self._target = target + self._error = expat.error + self._names = {} # name memo cache + # main callbacks + parser.DefaultHandlerExpand = self._default + if hasattr(target, 'start'): + parser.StartElementHandler = self._start + if hasattr(target, 'end'): + parser.EndElementHandler = self._end + if hasattr(target, 'start_ns'): + parser.StartNamespaceDeclHandler = self._start_ns + if hasattr(target, 'end_ns'): + parser.EndNamespaceDeclHandler = self._end_ns + if hasattr(target, 'data'): + parser.CharacterDataHandler = target.data + # miscellaneous callbacks + if hasattr(target, 'comment'): + parser.CommentHandler = target.comment + if hasattr(target, 'pi'): + parser.ProcessingInstructionHandler = target.pi + # Configure pyexpat: buffering, new-style attribute handling. + parser.buffer_text = 1 + parser.ordered_attributes = 1 + self._doctype = None + self.entity = {} + try: + self.version = "Expat %d.%d.%d" % expat.version_info + except AttributeError: + pass # unknown + + def _setevents(self, events_queue, events_to_report): + # Internal API for XMLPullParser + # events_to_report: a list of events to report during parsing (same as + # the *events* of XMLPullParser's constructor. + # events_queue: a list of actual parsing events that will be populated + # by the underlying parser. + # + parser = self._parser + append = events_queue.append + for event_name in events_to_report: + if event_name == "start": + parser.ordered_attributes = 1 + def handler(tag, attrib_in, event=event_name, append=append, + start=self._start): + append((event, start(tag, attrib_in))) + parser.StartElementHandler = handler + elif event_name == "end": + def handler(tag, event=event_name, append=append, + end=self._end): + append((event, end(tag))) + parser.EndElementHandler = handler + elif event_name == "start-ns": + # TreeBuilder does not implement .start_ns() + if hasattr(self.target, "start_ns"): + def handler(prefix, uri, event=event_name, append=append, + start_ns=self._start_ns): + append((event, start_ns(prefix, uri))) + else: + def handler(prefix, uri, event=event_name, append=append): + append((event, (prefix or '', uri or ''))) + parser.StartNamespaceDeclHandler = handler + elif event_name == "end-ns": + # TreeBuilder does not implement .end_ns() + if hasattr(self.target, "end_ns"): + def handler(prefix, event=event_name, append=append, + end_ns=self._end_ns): + append((event, end_ns(prefix))) + else: + def handler(prefix, event=event_name, append=append): + append((event, None)) + parser.EndNamespaceDeclHandler = handler + elif event_name == 'comment': + def handler(text, event=event_name, append=append, self=self): + append((event, self.target.comment(text))) + parser.CommentHandler = handler + elif event_name == 'pi': + def handler(pi_target, data, event=event_name, append=append, + self=self): + append((event, self.target.pi(pi_target, data))) + parser.ProcessingInstructionHandler = handler + else: + raise ValueError("unknown event %r" % event_name) + + def _raiseerror(self, value): + err = ParseError(value) + err.code = value.code + err.position = value.lineno, value.offset + raise err + + def _fixname(self, key): + # expand qname, and convert name string to ascii, if possible + try: + name = self._names[key] + except KeyError: + name = key + if "}" in name: + name = "{" + name + self._names[key] = name + return name + + def _start_ns(self, prefix, uri): + return self.target.start_ns(prefix or '', uri or '') + + def _end_ns(self, prefix): + return self.target.end_ns(prefix or '') + + def _start(self, tag, attr_list): + # Handler for expat's StartElementHandler. Since ordered_attributes + # is set, the attributes are reported as a list of alternating + # attribute name,value. + fixname = self._fixname + tag = fixname(tag) + attrib = {} + if attr_list: + for i in range(0, len(attr_list), 2): + attrib[fixname(attr_list[i])] = attr_list[i+1] + return self.target.start(tag, attrib) + + def _end(self, tag): + return self.target.end(self._fixname(tag)) + + def _default(self, text): + prefix = text[:1] + if prefix == "&": + # deal with undefined entities + try: + data_handler = self.target.data + except AttributeError: + return + try: + data_handler(self.entity[text[1:-1]]) + except KeyError: + from xml.parsers import expat + err = expat.error( + "undefined entity %s: line %d, column %d" % + (text, self.parser.ErrorLineNumber, + self.parser.ErrorColumnNumber) + ) + err.code = 11 # XML_ERROR_UNDEFINED_ENTITY + err.lineno = self.parser.ErrorLineNumber + err.offset = self.parser.ErrorColumnNumber + raise err + elif prefix == "<" and text[:9] == "<!DOCTYPE": + self._doctype = [] # inside a doctype declaration + elif self._doctype is not None: + # parse doctype contents + if prefix == ">": + self._doctype = None + return + text = text.strip() + if not text: + return + self._doctype.append(text) + n = len(self._doctype) + if n > 2: + type = self._doctype[1] + if type == "PUBLIC" and n == 4: + name, type, pubid, system = self._doctype + if pubid: + pubid = pubid[1:-1] + elif type == "SYSTEM" and n == 3: + name, type, system = self._doctype + pubid = None + else: + return + if hasattr(self.target, "doctype"): + self.target.doctype(name, pubid, system[1:-1]) + elif hasattr(self, "doctype"): + warnings.warn( + "The doctype() method of XMLParser is ignored. " + "Define doctype() method on the TreeBuilder target.", + RuntimeWarning) + + self._doctype = None + + def feed(self, data): + """Feed encoded data to parser.""" + try: + self.parser.Parse(data, False) + except self._error as v: + self._raiseerror(v) + + def close(self): + """Finish feeding data to parser and return element structure.""" + try: + self.parser.Parse(b"", True) # end of data + except self._error as v: + self._raiseerror(v) + try: + close_handler = self.target.close + except AttributeError: + pass + else: + return close_handler() + finally: + # get rid of circular references + del self.parser, self._parser + del self.target, self._target + + def flush(self): + was_enabled = self.parser.GetReparseDeferralEnabled() + try: + self.parser.SetReparseDeferralEnabled(False) + self.parser.Parse(b"", False) + except self._error as v: + self._raiseerror(v) + finally: + self.parser.SetReparseDeferralEnabled(was_enabled) + +# -------------------------------------------------------------------- +# C14N 2.0 + +def canonicalize(xml_data=None, *, out=None, from_file=None, **options): + """Convert XML to its C14N 2.0 serialised form. + + If *out* is provided, it must be a file or file-like object that receives + the serialised canonical XML output (text, not bytes) through its ``.write()`` + method. To write to a file, open it in text mode with encoding "utf-8". + If *out* is not provided, this function returns the output as text string. + + Either *xml_data* (an XML string) or *from_file* (a file path or + file-like object) must be provided as input. + + The configuration options are the same as for the ``C14NWriterTarget``. + """ + if xml_data is None and from_file is None: + raise ValueError("Either 'xml_data' or 'from_file' must be provided as input") + sio = None + if out is None: + sio = out = io.StringIO() + + parser = XMLParser(target=C14NWriterTarget(out.write, **options)) + + if xml_data is not None: + parser.feed(xml_data) + parser.close() + elif from_file is not None: + parse(from_file, parser=parser) + + return sio.getvalue() if sio is not None else None + + +_looks_like_prefix_name = re.compile(r'^\w+:\w+$', re.UNICODE).match + + +class C14NWriterTarget: + """ + Canonicalization writer target for the XMLParser. + + Serialises parse events to XML C14N 2.0. + + The *write* function is used for writing out the resulting data stream + as text (not bytes). To write to a file, open it in text mode with encoding + "utf-8" and pass its ``.write`` method. + + Configuration options: + + - *with_comments*: set to true to include comments + - *strip_text*: set to true to strip whitespace before and after text content + - *rewrite_prefixes*: set to true to replace namespace prefixes by "n{number}" + - *qname_aware_tags*: a set of qname aware tag names in which prefixes + should be replaced in text content + - *qname_aware_attrs*: a set of qname aware attribute names in which prefixes + should be replaced in text content + - *exclude_attrs*: a set of attribute names that should not be serialised + - *exclude_tags*: a set of tag names that should not be serialised + """ + def __init__(self, write, *, + with_comments=False, strip_text=False, rewrite_prefixes=False, + qname_aware_tags=None, qname_aware_attrs=None, + exclude_attrs=None, exclude_tags=None): + self._write = write + self._data = [] + self._with_comments = with_comments + self._strip_text = strip_text + self._exclude_attrs = set(exclude_attrs) if exclude_attrs else None + self._exclude_tags = set(exclude_tags) if exclude_tags else None + + self._rewrite_prefixes = rewrite_prefixes + if qname_aware_tags: + self._qname_aware_tags = set(qname_aware_tags) + else: + self._qname_aware_tags = None + if qname_aware_attrs: + self._find_qname_aware_attrs = set(qname_aware_attrs).intersection + else: + self._find_qname_aware_attrs = None + + # Stack with globally and newly declared namespaces as (uri, prefix) pairs. + self._declared_ns_stack = [[ + ("http://www.w3.org/XML/1998/namespace", "xml"), + ]] + # Stack with user declared namespace prefixes as (uri, prefix) pairs. + self._ns_stack = [] + if not rewrite_prefixes: + self._ns_stack.append(list(_namespace_map.items())) + self._ns_stack.append([]) + self._prefix_map = {} + self._preserve_space = [False] + self._pending_start = None + self._root_seen = False + self._root_done = False + self._ignored_depth = 0 + + def _iter_namespaces(self, ns_stack, _reversed=reversed): + for namespaces in _reversed(ns_stack): + if namespaces: # almost no element declares new namespaces + yield from namespaces + + def _resolve_prefix_name(self, prefixed_name): + prefix, name = prefixed_name.split(':', 1) + for uri, p in self._iter_namespaces(self._ns_stack): + if p == prefix: + return f'{{{uri}}}{name}' + raise ValueError(f'Prefix {prefix} of QName "{prefixed_name}" is not declared in scope') + + def _qname(self, qname, uri=None): + if uri is None: + uri, tag = qname[1:].rsplit('}', 1) if qname[:1] == '{' else ('', qname) + else: + tag = qname + + prefixes_seen = set() + for u, prefix in self._iter_namespaces(self._declared_ns_stack): + if u == uri and prefix not in prefixes_seen: + return f'{prefix}:{tag}' if prefix else tag, tag, uri + prefixes_seen.add(prefix) + + # Not declared yet => add new declaration. + if self._rewrite_prefixes: + if uri in self._prefix_map: + prefix = self._prefix_map[uri] + else: + prefix = self._prefix_map[uri] = f'n{len(self._prefix_map)}' + self._declared_ns_stack[-1].append((uri, prefix)) + return f'{prefix}:{tag}', tag, uri + + if not uri and '' not in prefixes_seen: + # No default namespace declared => no prefix needed. + return tag, tag, uri + + for u, prefix in self._iter_namespaces(self._ns_stack): + if u == uri: + self._declared_ns_stack[-1].append((uri, prefix)) + return f'{prefix}:{tag}' if prefix else tag, tag, uri + + if not uri: + # As soon as a default namespace is defined, + # anything that has no namespace (and thus, no prefix) goes there. + return tag, tag, uri + + raise ValueError(f'Namespace "{uri}" is not declared in scope') + + def data(self, data): + if not self._ignored_depth: + self._data.append(data) + + def _flush(self, _join_text=''.join): + data = _join_text(self._data) + del self._data[:] + if self._strip_text and not self._preserve_space[-1]: + data = data.strip() + if self._pending_start is not None: + args, self._pending_start = self._pending_start, None + qname_text = data if data and _looks_like_prefix_name(data) else None + self._start(*args, qname_text) + if qname_text is not None: + return + if data and self._root_seen: + self._write(_escape_cdata_c14n(data)) + + def start_ns(self, prefix, uri): + if self._ignored_depth: + return + # we may have to resolve qnames in text content + if self._data: + self._flush() + self._ns_stack[-1].append((uri, prefix)) + + def start(self, tag, attrs): + if self._exclude_tags is not None and ( + self._ignored_depth or tag in self._exclude_tags): + self._ignored_depth += 1 + return + if self._data: + self._flush() + + new_namespaces = [] + self._declared_ns_stack.append(new_namespaces) + + if self._qname_aware_tags is not None and tag in self._qname_aware_tags: + # Need to parse text first to see if it requires a prefix declaration. + self._pending_start = (tag, attrs, new_namespaces) + return + self._start(tag, attrs, new_namespaces) + + def _start(self, tag, attrs, new_namespaces, qname_text=None): + if self._exclude_attrs is not None and attrs: + attrs = {k: v for k, v in attrs.items() if k not in self._exclude_attrs} + + qnames = {tag, *attrs} + resolved_names = {} + + # Resolve prefixes in attribute and tag text. + if qname_text is not None: + qname = resolved_names[qname_text] = self._resolve_prefix_name(qname_text) + qnames.add(qname) + if self._find_qname_aware_attrs is not None and attrs: + qattrs = self._find_qname_aware_attrs(attrs) + if qattrs: + for attr_name in qattrs: + value = attrs[attr_name] + if _looks_like_prefix_name(value): + qname = resolved_names[value] = self._resolve_prefix_name(value) + qnames.add(qname) + else: + qattrs = None + else: + qattrs = None + + # Assign prefixes in lexicographical order of used URIs. + parse_qname = self._qname + parsed_qnames = {n: parse_qname(n) for n in sorted( + qnames, key=lambda n: n.split('}', 1))} + + # Write namespace declarations in prefix order ... + if new_namespaces: + attr_list = [ + ('xmlns:' + prefix if prefix else 'xmlns', uri) + for uri, prefix in new_namespaces + ] + attr_list.sort() + else: + # almost always empty + attr_list = [] + + # ... followed by attributes in URI+name order + if attrs: + for k, v in sorted(attrs.items()): + if qattrs is not None and k in qattrs and v in resolved_names: + v = parsed_qnames[resolved_names[v]][0] + attr_qname, attr_name, uri = parsed_qnames[k] + # No prefix for attributes in default ('') namespace. + attr_list.append((attr_qname if uri else attr_name, v)) + + # Honour xml:space attributes. + space_behaviour = attrs.get('{http://www.w3.org/XML/1998/namespace}space') + self._preserve_space.append( + space_behaviour == 'preserve' if space_behaviour + else self._preserve_space[-1]) + + # Write the tag. + write = self._write + write('<' + parsed_qnames[tag][0]) + if attr_list: + write(''.join([f' {k}="{_escape_attrib_c14n(v)}"' for k, v in attr_list])) + write('>') + + # Write the resolved qname text content. + if qname_text is not None: + write(_escape_cdata_c14n(parsed_qnames[resolved_names[qname_text]][0])) + + self._root_seen = True + self._ns_stack.append([]) + + def end(self, tag): + if self._ignored_depth: + self._ignored_depth -= 1 + return + if self._data: + self._flush() + self._write(f'</{self._qname(tag)[0]}>') + self._preserve_space.pop() + self._root_done = len(self._preserve_space) == 1 + self._declared_ns_stack.pop() + self._ns_stack.pop() + + def comment(self, text): + if not self._with_comments: + return + if self._ignored_depth: + return + if self._root_done: + self._write('\n') + elif self._root_seen and self._data: + self._flush() + self._write(f'<!--{_escape_cdata_c14n(text)}-->') + if not self._root_seen: + self._write('\n') + + def pi(self, target, data): + if self._ignored_depth: + return + if self._root_done: + self._write('\n') + elif self._root_seen and self._data: + self._flush() + self._write( + f'<?{target} {_escape_cdata_c14n(data)}?>' if data else f'<?{target}?>') + if not self._root_seen: + self._write('\n') + + +def _escape_cdata_c14n(text): + # escape character data + try: + # it's worth avoiding do-nothing calls for strings that are + # shorter than 500 character, or so. assume that's, by far, + # the most common case in most applications. + if '&' in text: + text = text.replace('&', '&') + if '<' in text: + text = text.replace('<', '<') + if '>' in text: + text = text.replace('>', '>') + if '\r' in text: + text = text.replace('\r', ' ') + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + + +def _escape_attrib_c14n(text): + # escape attribute value + try: + if '&' in text: + text = text.replace('&', '&') + if '<' in text: + text = text.replace('<', '<') + if '"' in text: + text = text.replace('"', '"') + if '\t' in text: + text = text.replace('\t', ' ') + if '\n' in text: + text = text.replace('\n', ' ') + if '\r' in text: + text = text.replace('\r', ' ') + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + + +# -------------------------------------------------------------------- + +# Import the C accelerators +try: + # Element is going to be shadowed by the C implementation. We need to keep + # the Python version of it accessible for some "creative" by external code + # (see tests) + _Element_Py = Element + + # Element, SubElement, ParseError, TreeBuilder, XMLParser, _set_factories + from _elementtree import * + from _elementtree import _set_factories +except ImportError: + pass +else: + _set_factories(Comment, ProcessingInstruction) diff --git a/parrot/lib/python3.10/xml/etree/__init__.py b/parrot/lib/python3.10/xml/etree/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e2ec53421d3f70267ddc34436eec6a30ee9f1855 --- /dev/null +++ b/parrot/lib/python3.10/xml/etree/__init__.py @@ -0,0 +1,33 @@ +# $Id: __init__.py 3375 2008-02-13 08:05:08Z fredrik $ +# elementtree package + +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2008 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +# Licensed to PSF under a Contributor Agreement. +# See https://www.python.org/psf/license for licensing details. diff --git a/parrot/lib/python3.10/xml/etree/__pycache__/ElementInclude.cpython-310.pyc b/parrot/lib/python3.10/xml/etree/__pycache__/ElementInclude.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c186ec03c0d317638c8539e3b712769676ba7478 Binary files /dev/null and b/parrot/lib/python3.10/xml/etree/__pycache__/ElementInclude.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/etree/__pycache__/ElementPath.cpython-310.pyc b/parrot/lib/python3.10/xml/etree/__pycache__/ElementPath.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17031261737ba7d399cd58dd70c546b9bd4b3d74 Binary files /dev/null and b/parrot/lib/python3.10/xml/etree/__pycache__/ElementPath.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/etree/__pycache__/ElementTree.cpython-310.pyc b/parrot/lib/python3.10/xml/etree/__pycache__/ElementTree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e89bb6cb20ce4ca25d6079af160d4e6a1bbd411 Binary files /dev/null and b/parrot/lib/python3.10/xml/etree/__pycache__/ElementTree.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/etree/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/xml/etree/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa5a7010e8cdccfa7f99aa7c171f98abc2d4ba92 Binary files /dev/null and b/parrot/lib/python3.10/xml/etree/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/sax/__init__.py b/parrot/lib/python3.10/xml/sax/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..17b75879ebaafad86c812bee53b4b24e01c18802 --- /dev/null +++ b/parrot/lib/python3.10/xml/sax/__init__.py @@ -0,0 +1,107 @@ +"""Simple API for XML (SAX) implementation for Python. + +This module provides an implementation of the SAX 2 interface; +information about the Java version of the interface can be found at +http://www.megginson.com/SAX/. The Python version of the interface is +documented at <...>. + +This package contains the following modules: + +handler -- Base classes and constants which define the SAX 2 API for + the 'client-side' of SAX for Python. + +saxutils -- Implementation of the convenience classes commonly used to + work with SAX. + +xmlreader -- Base classes and constants which define the SAX 2 API for + the parsers used with SAX for Python. + +expatreader -- Driver that allows use of the Expat parser with SAX. +""" + +from .xmlreader import InputSource +from .handler import ContentHandler, ErrorHandler +from ._exceptions import SAXException, SAXNotRecognizedException, \ + SAXParseException, SAXNotSupportedException, \ + SAXReaderNotAvailable + + +def parse(source, handler, errorHandler=ErrorHandler()): + parser = make_parser() + parser.setContentHandler(handler) + parser.setErrorHandler(errorHandler) + parser.parse(source) + +def parseString(string, handler, errorHandler=ErrorHandler()): + import io + if errorHandler is None: + errorHandler = ErrorHandler() + parser = make_parser() + parser.setContentHandler(handler) + parser.setErrorHandler(errorHandler) + + inpsrc = InputSource() + if isinstance(string, str): + inpsrc.setCharacterStream(io.StringIO(string)) + else: + inpsrc.setByteStream(io.BytesIO(string)) + parser.parse(inpsrc) + +# this is the parser list used by the make_parser function if no +# alternatives are given as parameters to the function + +default_parser_list = ["xml.sax.expatreader"] + +# tell modulefinder that importing sax potentially imports expatreader +_false = 0 +if _false: + import xml.sax.expatreader + +import os, sys +if not sys.flags.ignore_environment and "PY_SAX_PARSER" in os.environ: + default_parser_list = os.environ["PY_SAX_PARSER"].split(",") +del os + +_key = "python.xml.sax.parser" +if sys.platform[:4] == "java" and sys.registry.containsKey(_key): + default_parser_list = sys.registry.getProperty(_key).split(",") + + +def make_parser(parser_list=()): + """Creates and returns a SAX parser. + + Creates the first parser it is able to instantiate of the ones + given in the iterable created by chaining parser_list and + default_parser_list. The iterables must contain the names of Python + modules containing both a SAX parser and a create_parser function.""" + + for parser_name in list(parser_list) + default_parser_list: + try: + return _create_parser(parser_name) + except ImportError: + import sys + if parser_name in sys.modules: + # The parser module was found, but importing it + # failed unexpectedly, pass this exception through + raise + except SAXReaderNotAvailable: + # The parser module detected that it won't work properly, + # so try the next one + pass + + raise SAXReaderNotAvailable("No parsers found", None) + +# --- Internal utility methods used by make_parser + +if sys.platform[ : 4] == "java": + def _create_parser(parser_name): + from org.python.core import imp + drv_module = imp.importName(parser_name, 0, globals()) + return drv_module.create_parser() + +else: + def _create_parser(parser_name): + drv_module = __import__(parser_name,{},{},['create_parser']) + return drv_module.create_parser() + +del sys